repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
SpannaProject/SpannaAPI
|
src/main/java/org/spanna/material/Torch.java
|
1816
|
package org.spanna.material;
import org.spanna.block.BlockFace;
import org.spanna.Material;
/**
* MaterialData for torches
*/
public class Torch extends SimpleAttachableMaterialData {
public Torch() {
super(Material.TORCH);
}
/**
*
* @deprecated Magic value
*/
@Deprecated
public Torch(final int type) {
super(type);
}
public Torch(final Material type) {
super(type);
}
/**
*
* @deprecated Magic value
*/
@Deprecated
public Torch(final int type, final byte data) {
super(type, data);
}
/**
*
* @deprecated Magic value
*/
@Deprecated
public Torch(final Material type, final byte data) {
super(type, data);
}
/**
* Gets the face that this block is attached on
*
* @return BlockFace attached to
*/
public BlockFace getAttachedFace() {
byte data = getData();
switch (data) {
case 0x1:
return BlockFace.WEST;
case 0x2:
return BlockFace.EAST;
case 0x3:
return BlockFace.NORTH;
case 0x4:
return BlockFace.SOUTH;
case 0x5:
default:
return BlockFace.DOWN;
}
}
public void setFacingDirection(BlockFace face) {
byte data;
switch (face) {
case EAST:
data = 0x1;
break;
case WEST:
data = 0x2;
break;
case SOUTH:
data = 0x3;
break;
case NORTH:
data = 0x4;
break;
case UP:
default:
data = 0x5;
}
setData(data);
}
@Override
public Torch clone() {
return (Torch) super.clone();
}
}
|
apache-2.0
|
PSDev/slf4j-android-logger
|
src/test/java/de/psdev/slf4j/android/logger/AndroidLoggerAdapterTest.java
|
11065
|
/*
* Copyright 2013 Philip Schiffer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.psdev.slf4j.android.logger;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import org.hamcrest.CoreMatchers;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.robolectric.RobolectricTestRunner;
import org.robolectric.annotation.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import android.util.Log;
@RunWith(RobolectricTestRunner.class)
@Config(manifest = Config.NONE, shadows = { EnhancedShadowLog.class })
public class AndroidLoggerAdapterTest {
private Logger mLogger;
@Before
public void setUp() throws Exception {
mLogger = LoggerFactory.getLogger(AndroidLoggerAdapterTest.class);
EnhancedShadowLog.stream = System.out;
}
@Test
public void testInitialization() throws Exception {
assertEquals("should have read correct log tag from properties", "TestLogTag",
AndroidLoggerAdapter.getLogTag());
assertEquals("should have correct name", AndroidLoggerAdapterTest.class.getName(), mLogger.getName());
assertEquals("should have correct log level", LogLevel.TRACE, AndroidLoggerAdapter.getLogLevel());
}
@Test
public void testIsTraceEnabled() throws Exception {
assertTrue("trace should be enabled", mLogger.isTraceEnabled());
}
@Test
public void testTrace() throws Exception {
mLogger.trace("test trace");
assertLog(Log.VERBOSE, "test trace");
}
@Test
public void testTraceWithArg() throws Exception {
mLogger.trace("test trace {}", "argument");
assertLog(Log.VERBOSE, "test trace argument");
}
@Test
public void testTraceWithTwoArgs() throws Exception {
mLogger.trace("test trace {} {}", "argument", "argument2");
assertLog(Log.VERBOSE, "test trace argument argument2");
}
@Test
public void testTraceWithVarArgs() throws Exception {
mLogger.trace("test trace {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.VERBOSE, "test trace argument argument2 argument3");
}
@Test
public void testTraceWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test trace exception");
mLogger.trace("test trace {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.VERBOSE, "test trace argument argument2 argument3", exception);
}
@Test
public void testTraceWithThrowable() throws Exception {
final Exception exception = new Exception("test trace exception");
mLogger.trace("test trace", exception);
assertLog(Log.VERBOSE, "test trace", exception);
}
@Test
public void testIsDebugEnabled() throws Exception {
assertTrue("debug should be enabled", mLogger.isDebugEnabled());
}
@Test
public void testDebug() throws Exception {
mLogger.debug("test debug");
assertLog(Log.DEBUG, "test debug");
}
@Test
public void testDebugWithArg() throws Exception {
mLogger.debug("test debug {}", "argument");
assertLog(Log.DEBUG, "test debug argument");
}
@Test
public void testDebugWithTwoArgs() throws Exception {
mLogger.debug("test debug {} {}", "argument", "argument2");
assertLog(Log.DEBUG, "test debug argument argument2");
}
@Test
public void testDebugWithVarArgs() throws Exception {
mLogger.debug("test debug {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.DEBUG, "test debug argument argument2 argument3");
}
@Test
public void testDebugWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test debug exception");
mLogger.debug("test debug {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.DEBUG, "test debug argument argument2 argument3", exception);
}
@Test
public void testDebugWithThrowable() throws Exception {
final Exception exception = new Exception("test debug exception");
mLogger.debug("test debug", exception);
assertLog(Log.DEBUG, "test debug", exception);
}
@Test
public void testIsInfoEnabled() throws Exception {
assertTrue("info should be enabled", mLogger.isInfoEnabled());
}
@Test
public void testInfo() throws Exception {
mLogger.info("test info");
assertLog(Log.INFO, "test info");
}
@Test
public void testInfoWithArg() throws Exception {
mLogger.info("test info {}", "argument");
assertLog(Log.INFO, "test info argument");
}
@Test
public void testInfoWithTwoArgs() throws Exception {
mLogger.info("test info {} {}", "argument", "argument2");
assertLog(Log.INFO, "test info argument argument2");
}
@Test
public void testInfoWithVarArgs() throws Exception {
mLogger.info("test info {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.INFO, "test info argument argument2 argument3");
}
@Test
public void testInfoWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test info exception");
mLogger.info("test info {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.INFO, "test info argument argument2 argument3", exception);
}
@Test
public void testInfoWithThrowable() throws Exception {
final Exception exception = new Exception("test info exception");
mLogger.info("test info", exception);
assertLog(Log.INFO, "test info", exception);
}
@Test
public void testIsWarnEnabled() throws Exception {
assertTrue("warn should be enabled", mLogger.isWarnEnabled());
}
@Test
public void testWarn() throws Exception {
mLogger.warn("test info");
assertLog(Log.WARN, "test info");
}
@Test
public void testWarnWithArg() throws Exception {
mLogger.warn("test warn {}", "argument");
assertLog(Log.WARN, "test warn argument");
}
@Test
public void testWarnWithTwoArgs() throws Exception {
mLogger.warn("test warn {} {}", "argument", "argument2");
assertLog(Log.WARN, "test warn argument argument2");
}
@Test
public void testWarnWithVarArgs() throws Exception {
mLogger.warn("test warn {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.WARN, "test warn argument argument2 argument3");
}
@Test
public void testWarnWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test warn exception");
mLogger.warn("test warn {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.WARN, "test warn argument argument2 argument3", exception);
}
@Test
public void testWarnWithThrowable() throws Exception {
final Exception exception = new Exception("test warn exception");
mLogger.warn("test warn", exception);
assertLog(Log.WARN, "test warn", exception);
}
@Test
public void testIsErrorEnabled() throws Exception {
assertTrue("error should be enabled", mLogger.isErrorEnabled());
}
@Test
public void testError() throws Exception {
mLogger.error("test error");
assertLog(Log.ERROR, "test error");
}
@Test
public void testErrorWithArg() throws Exception {
mLogger.error("test error {}", "argument");
assertLog(Log.ERROR, "test error argument");
}
@Test
public void testErrorWithTwoArgs() throws Exception {
mLogger.error("test error {} {}", "argument", "argument2");
assertLog(Log.ERROR, "test error argument argument2");
}
@Test
public void testErrorWithVarArgs() throws Exception {
mLogger.error("test error {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.ERROR, "test error argument argument2 argument3");
}
@Test
public void testErrorWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test error exception");
mLogger.error("test error {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.ERROR, "test error argument argument2 argument3", exception);
}
@Test
public void testErrorWithThrowable() throws Exception {
final Exception exception = new Exception("test error exception");
mLogger.error("test error", exception);
assertLog(Log.ERROR, "test error", exception);
}
@Test
public void testInnerclassMatching() throws Exception {
final InnerClassTest innerClassTest = new InnerClassTest();
innerClassTest.doSomething();
assertLog(Log.INFO, "inner class match");
assertThat("should contain correct class name", EnhancedShadowLog.getLogs().get(0).msg,
CoreMatchers.containsString("InnerClassTest"));
}
@After
public void tearDown() throws Exception {
EnhancedShadowLog.reset();
}
// Helper
private static void assertLog(final int expectedLogLevel, final String expectedContainedText) {
assertLog(expectedLogLevel, expectedContainedText, null);
}
private static void assertLog(final int expectedLogLevel, final String expectedContainedText,
final Throwable expectedThrowable) {
assertEquals("should have logged 1 message", 1L, EnhancedShadowLog.getLogs().size());
final EnhancedShadowLog.LogItem logItem = EnhancedShadowLog.getLogs().get(0);
assertEquals("should have correct type", expectedLogLevel, logItem.type);
assertThat("should contain message", logItem.msg, CoreMatchers.containsString(expectedContainedText));
assertThat("should contain class", logItem.msg, CoreMatchers.containsString(
AndroidLoggerAdapterTest.class.getSimpleName()));
assertEquals("should have correct log tag", "TestLogTag", logItem.tag);
if (expectedThrowable != null) {
assertEquals("should have logged the correct throwable", expectedThrowable, logItem.throwable);
}
}
class InnerClassTest {
public void doSomething() {
mLogger.info("inner class match");
}
}
}
|
apache-2.0
|
ustbyjy/leetcode
|
src/main/java/com/yan/leetcode/RemoveDuplicatesFromSortedList.java
|
949
|
package com.yan.leetcode;
import org.junit.Test;
public class RemoveDuplicatesFromSortedList {
@Test
public void test() {
ListNode head = new ListNode(1);
head.next = new ListNode(1);
// head.next.next = new ListNode(2);
System.out.println(deleteDuplicates2(head));
}
public ListNode deleteDuplicates(ListNode head) {
if (head == null || head.next == null) {
return head;
}
ListNode index = head;
while (index.next != null) {
if (index.next.val == index.val) {
index.next = index.next.next;
} else {
index = index.next;
}
}
return head;
}
public ListNode deleteDuplicates2(ListNode head) {
if (head == null) {
return head;
}
ListNode newHead = new ListNode(Integer.MAX_VALUE);
ListNode index = newHead;
while (head != null) {
if (head.val != index.val) {
index.next = head;
index = index.next;
}
head = head.next;
}
index.next = null;
return newHead.next;
}
}
|
apache-2.0
|
Exorath/ExoHUD
|
src/main/java/com/exorath/exoHUD/SimpleHUDPackage.java
|
973
|
/*
* Copyright 2017 Exorath
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.exorath.exoHUD;
import java.util.List;
/**
* Created by toonsev on 9/21/2016.
*/
public class SimpleHUDPackage implements HUDPackage {
private List<HUDText> texts;
public SimpleHUDPackage(List<HUDText> texts) {
this.texts = texts;
}
@Override
public List<HUDText> getTexts() {
return texts;
}
}
|
apache-2.0
|
zhangkun83/grpc-java
|
core/src/main/java/io/grpc/EquivalentAddressGroup.java
|
4376
|
/*
* Copyright 2015 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc;
import com.google.common.base.Preconditions;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.net.SocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* A group of {@link SocketAddress}es that are considered equivalent when channel makes connections.
*
* <p>Usually the addresses are addresses resolved from the same host name, and connecting to any of
* them is equally sufficient. They do have order. An address appears earlier on the list is likely
* to be tried earlier.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1770")
public final class EquivalentAddressGroup {
private final List<SocketAddress> addrs;
private final Attributes attrs;
/**
* {@link SocketAddress} docs say that the addresses are immutable, so we cache the hashCode.
*/
private final int hashCode;
/**
* List constructor without {@link Attributes}.
*/
public EquivalentAddressGroup(List<SocketAddress> addrs) {
this(addrs, Attributes.EMPTY);
}
/**
* List constructor with {@link Attributes}.
*/
public EquivalentAddressGroup(List<SocketAddress> addrs, @Attr Attributes attrs) {
Preconditions.checkArgument(!addrs.isEmpty(), "addrs is empty");
this.addrs = Collections.unmodifiableList(new ArrayList<>(addrs));
this.attrs = Preconditions.checkNotNull(attrs, "attrs");
// Attributes may contain mutable objects, which means Attributes' hashCode may change over
// time, thus we don't cache Attributes' hashCode.
hashCode = this.addrs.hashCode();
}
/**
* Singleton constructor without Attributes.
*/
public EquivalentAddressGroup(SocketAddress addr) {
this(addr, Attributes.EMPTY);
}
/**
* Singleton constructor with Attributes.
*/
public EquivalentAddressGroup(SocketAddress addr, @Attr Attributes attrs) {
this(Collections.singletonList(addr), attrs);
}
/**
* Returns an immutable list of the addresses.
*/
public List<SocketAddress> getAddresses() {
return addrs;
}
/**
* Returns the attributes.
*/
@Attr
public Attributes getAttributes() {
return attrs;
}
@Override
public String toString() {
// TODO(zpencer): Summarize return value if addr is very large
return "[addrs=" + addrs + ", attrs=" + attrs + "]";
}
@Override
public int hashCode() {
// Avoids creating an iterator on the underlying array list.
return hashCode;
}
/**
* Returns true if the given object is also an {@link EquivalentAddressGroup} with an equal
* address list and equal attribute values.
*
* <p>Note that if the attributes include mutable values, it is possible for two objects to be
* considered equal at one point in time and not equal at another (due to concurrent mutation of
* attribute values).
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof EquivalentAddressGroup)) {
return false;
}
EquivalentAddressGroup that = (EquivalentAddressGroup) other;
if (addrs.size() != that.addrs.size()) {
return false;
}
// Avoids creating an iterator on the underlying array list.
for (int i = 0; i < addrs.size(); i++) {
if (!addrs.get(i).equals(that.addrs.get(i))) {
return false;
}
}
if (!attrs.equals(that.attrs)) {
return false;
}
return true;
}
/**
* Annotation for {@link EquivalentAddressGroup}'s attributes. It follows the annotation semantics
* defined by {@link Attributes}.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/4972")
@Retention(RetentionPolicy.SOURCE)
@Documented
public @interface Attr {}
}
|
apache-2.0
|
agolovatjuk/alexander4j
|
chapter_002/src/test/java/ru/job4j/tracker/package-info.java
|
133
|
/**
* Package for test tracker task.
*
* @author Alexander Golovatyuk
* @version $Id$
* @since 0.1
*/
package ru.job4j.tracker;
|
apache-2.0
|
guillaume-philippon/aquilon
|
tests/broker/test_del_user.py
|
1774
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del user command."""
import pwd
import os
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelUser(TestBrokerCommand):
def test_100_del_current_user(self):
pwrec = pwd.getpwuid(os.getuid())
self.noouttest(["del_user", "--username", pwrec[0]])
def test_105_verify_gone(self):
pwrec = pwd.getpwuid(os.getuid())
command = ["show_user", "--username", pwrec[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "User %s not found." % pwrec[0], command)
def test_110_del_current_user_again(self):
pwrec = pwd.getpwuid(os.getuid())
command = ["del_user", "--username", pwrec[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "User %s not found." % pwrec[0], command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelUser)
unittest.TextTestRunner(verbosity=2).run(suite)
|
apache-2.0
|
WangGanxin/Codebase
|
app/src/main/java/com/ganxin/codebase/widgets/layout/SquareLaylout.java
|
1695
|
package com.ganxin.codebase.widgets.layout;
import android.annotation.SuppressLint;
import android.content.Context;
import android.util.AttributeSet;
import android.widget.RelativeLayout;
/**
*
* Description : 正方形的Layout <br/>
* author : WangGanxin <br/>
* date : 2016/9/4 <br/>
* email : [email protected] <br/>
*/
public class SquareLaylout extends RelativeLayout{
@SuppressLint("NewApi")
public SquareLaylout(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
}
public SquareLaylout(Context context, AttributeSet attrs) {
super(context, attrs);
}
public SquareLaylout(Context context) {
super(context);
}
@SuppressWarnings("unused")
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
// For simple implementation, or internal size is always 0.
// We depend on the container to specify the layout size of
// our view. We can't really know what it is since we will be
// adding and removing different arbitrary views and do not
// want the layout to change as this happens.
setMeasuredDimension(getDefaultSize(0, widthMeasureSpec), getDefaultSize(0, heightMeasureSpec));
// Children are just made to fill our space.
int childWidthSize = getMeasuredWidth();
int childHeightSize = getMeasuredHeight();
//高度和宽度一样
heightMeasureSpec = widthMeasureSpec = MeasureSpec.makeMeasureSpec(childWidthSize, MeasureSpec.EXACTLY);
super.onMeasure(widthMeasureSpec, heightMeasureSpec);
}
}
|
apache-2.0
|
andrhamm/Singularity
|
SingularityService/src/main/java/com/hubspot/singularity/scheduler/SingularityUsageHelper.java
|
1323
|
package com.hubspot.singularity.scheduler;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.hubspot.singularity.MachineState;
import com.hubspot.singularity.SingularitySlave;
import com.hubspot.singularity.data.SlaveManager;
@Singleton
public class SingularityUsageHelper {
private final SlaveManager slaveManager;
@Inject
public SingularityUsageHelper(SlaveManager slaveManager) {
this.slaveManager = slaveManager;
}
public Set<String> getSlaveIdsToTrackUsageFor() {
List<SingularitySlave> slaves = getSlavesToTrackUsageFor();
Set<String> slaveIds = new HashSet<>(slaves.size());
for (SingularitySlave slave : slaves) {
slaveIds.add(slave.getId());
}
return slaveIds;
}
public List<SingularitySlave> getSlavesToTrackUsageFor() {
List<SingularitySlave> slaves = slaveManager.getObjects();
List<SingularitySlave> slavesToTrack = new ArrayList<>(slaves.size());
for (SingularitySlave slave : slaves) {
if (slave.getCurrentState().getState().isInactive() || slave.getCurrentState().getState() == MachineState.DECOMMISSIONED) {
continue;
}
slavesToTrack.add(slave);
}
return slavesToTrack;
}
}
|
apache-2.0
|
logzio/camel
|
platforms/commands/commands-core/src/test/java/org/apache/camel/commands/catalog/CamelComponentCatalogTest.java
|
4257
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.commands.catalog;
import java.util.List;
import java.util.Set;
import org.apache.camel.catalog.CamelComponentCatalog;
import org.apache.camel.catalog.DefaultCamelComponentCatalog;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class CamelComponentCatalogTest {
private static final Logger LOG = LoggerFactory.getLogger(CamelComponentCatalogTest.class);
@Test
public void testFindComponentNames() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames();
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilter() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("testing");
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some testing components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilterWildcard() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("t*");
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some t* components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilterTwo() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("transformation");
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some transformation components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilterNoMatch() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("cannotmatchme");
assertNotNull(names);
assertTrue("Should not match any components", names.size() == 0);
}
@Test
public void testCoreComponentJson() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
String json = catalog.componentJSonSchema("bean");
assertNotNull(json);
LOG.info(json);
assertTrue("Should find bean component", json.contains("bean"));
}
@Test
public void testFtpComponentJson() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
String json = catalog.componentJSonSchema("ftp");
assertNotNull(json);
LOG.info(json);
assertTrue("Should find ftp component", json.contains("ftp"));
}
@Test
public void testLabels() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
Set<String> labels = catalog.findComponentLabels();
assertNotNull(labels);
assertTrue("Should find labels", labels.size() > 0);
assertTrue("Should find core label", labels.contains("core"));
assertTrue("Should find testing label", labels.contains("testing"));
assertTrue("Should find rest label", labels.contains("rest"));
}
}
|
apache-2.0
|
simerc/QuicksilverPlus
|
Sources/EPiServer.Reference.Commerce.Site/Features/Global/ProductRegistration/Controllers/ProductRegistrationController.cs
|
792
|
using System.Web.Mvc;
using EPiServer.Reference.Commerce.Site.Features.Global.ProductRegistration.Pages;
using EPiServer.Reference.Commerce.Site.Features.Global.ProductRegistration.ViewModels;
using EPiServer.Reference.Commerce.Site.Features.Global.Profile.Pages;
using EPiServer.Web.Mvc;
using EPiServer.Reference.Commerce.Site.Features.Global.Profile.ViewModels;
namespace EPiServer.Reference.Commerce.Site.Features.Global.ProductRegistration.Controllers
{
//[Authorize]
public class ProductRegistration : PageController<ProductRegistrationPage>
{
public ActionResult Index(ProductRegistrationPage currentPage)
{
var viewModel = new ProductRegistrationViewModel { CurrentPage = currentPage };
return View(viewModel);
}
}
}
|
apache-2.0
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600/v3/B705.scala
|
1111
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Linked}
import uk.gov.hmrc.ct.computations.CP248
case class B705(value: Option[Int]) extends CtBoxIdentifier("Total capital allowances claimed in main pool") with CtOptionalInteger
object B705 extends Linked[CP248, B705] {
override def apply(source: CP248): B705 = {
val b705Value = source.value match {
case Some(0) => None
case sourceValue => sourceValue
}
B705(b705Value)
}
}
|
apache-2.0
|
tfisher1226/ARIES
|
bookshop2/bookshop2-supplier/bookshop2-supplier-service/src/test/java/bookshop2/supplier/incoming/queryBooks/QueryBooksHandlerUnitTest.java
|
5692
|
package bookshop2.supplier.incoming.queryBooks;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import org.aries.runtime.BeanContext;
import org.aries.runtime.RequestContext;
import org.aries.tx.AbstractHandlerUnitTest;
import org.aries.tx.Transactional;
import org.aries.util.FieldUtil;
import org.aries.validate.util.CheckpointManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.runners.MockitoJUnitRunner;
import bookshop2.QueryRequestMessage;
import bookshop2.supplier.SupplierProcess;
import bookshop2.util.Bookshop2Fixture;
@RunWith(MockitoJUnitRunner.class)
public class QueryBooksHandlerUnitTest extends AbstractHandlerUnitTest {
private QueryBooksHandlerImpl fixture;
private RequestContext mockRequestContext;
private SupplierProcess mockSupplierProcess;
public String getName() {
return "QueryBooks";
}
public String getDomain() {
return "bookshop2.supplier";
}
public Transactional getFixture() {
return fixture;
}
public SupplierProcess getMockServiceProcess() {
return mockSupplierProcess;
}
@Before
public void setUp() throws Exception {
mockRequestContext = mock(RequestContext.class);
mockSupplierProcess = mock(SupplierProcess.class);
CheckpointManager.setJAXBSessionCache(getJAXBSessionCache());
CheckpointManager.addConfiguration("bookshop2-supplier-service-checks.xml");
super.setUp();
}
@After
public void tearDown() throws Exception {
BeanContext.clear();
mockRequestContext = null;
mockSupplierProcess = null;
fixture = null;
super.tearDown();
}
protected QueryBooksHandlerImpl createFixture() throws Exception {
fixture = new QueryBooksHandlerImpl();
FieldUtil.setFieldValue(fixture, "requestContext", mockRequestContext);
FieldUtil.setFieldValue(fixture, "supplierProcess", mockSupplierProcess);
initialize(fixture);
return fixture;
}
@Test
public void testExecute_queryBooks_Success() throws Exception {
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_NullRequest() throws Exception {
addExpectedServiceAbortedException("Incoming message is null");
expectedCorrelationId = null;
isRequestExpected = true;
runTestExecute_queryBooks(null);
}
@Test
public void testExecute_queryBooks_EmptyRequest() throws Exception {
addExpectedServiceAbortedException("QueryRequestMessage must include one or more Book(s)");
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.createEmpty_QueryRequestMessage();
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
isAbortExpected = true;
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_NullCorrelationId() throws Exception {
addExpectedServiceAbortedException("CorrelationId null");
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedCorrelationId = null;
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
isAbortExpected = true;
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_EmptyCorrelationId() throws Exception {
addExpectedServiceAbortedException("CorrelationId empty");
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedCorrelationId = "";
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
isAbortExpected = true;
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_NullTransactionId() throws Exception {
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedTransactionId = null;
setupContext(expectedCorrelationId, expectedTransactionId);
//setGlobalTransactionActive(true);
setupMessage(queryRequestMessage);
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_EmptyTransactionId() throws Exception {
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedTransactionId = "";
setupContext(expectedCorrelationId, expectedTransactionId);
//setGlobalTransactionActive(true);
setupMessage(queryRequestMessage);
runTestExecute_queryBooks(queryRequestMessage);
}
public void runTestExecute_queryBooks(QueryRequestMessage queryRequestMessage) throws Exception {
try {
fixture = createFixture();
fixture.queryBooks(queryRequestMessage);
if (isGlobalTransactionActive())
validateEnrollTransaction(queryRequestMessage);
validateProcessInvocation(queryRequestMessage);
} catch (Throwable e) {
validateAfterException(e);
} finally {
validateProcessNotification();
validateAfterExecution();
}
}
protected void validateProcessInvocation(QueryRequestMessage queryRequestMessage) throws Exception {
if (!isAbortExpected)
verify(mockSupplierProcess).handle_QueryBooks_request(queryRequestMessage);
}
protected void validateProcessNotification() throws Exception {
//verify(mockSupplierProcess).fireQueryBooksDone();
}
protected void validateAfterExecution() throws Exception {
if (isAbortExpected)
verify(mockSupplierProcess).handle_QueryBooks_request_exception(expectedCorrelationId, expectedException);
super.validateAfterExecution();
}
}
|
apache-2.0
|
greezeek/shelly
|
src/Shelly/Palette.php
|
2125
|
<?php
/**
* Created by IntelliJ IDEA.
* User: swarm
* Date: 14.10.15
* Time: 10:59
*/
namespace Shelly;
class Palette
{
const MSG_GOLOR_NOT_EXISTS = 'Color does not exists';
/**
* @var array
* contains fg colors and it`s aliases
*/
protected static $colors = [
'black' => 30,
'red' => 31,
'green' => 32,
'yellow' => 33,
'blue' => 34,
'magenta' => 35,
'cyan' => 36,
'white' => 37,
'normal' => 0
];
/**
* @var array
* contains bg colors and it`s aliases
*/
protected static $bgColors = array(
'black' => 40,
'red' => 41,
'green' => 42,
'yellow' => 43,
'blue' => 44,
'magenta' => 45,
'cyan' => 46,
'white' => 47,
);
/**
* @param $alias
* @return string
*/
public function getColorByAlias($alias) {
if(array_key_exists($alias, self::$colors)) {
return self::$colors[$alias];
}
return false;
}
/**
* @param $alias
* @return string
*/
public function getBgColorByAlias($alias) {
if(array_key_exists($alias, self::$bgColors)) {
return self::$bgColors[$alias];
}
return false;
}
/**
* return color code for unix shell, based on passed arguments.
*
* @param string $fg
* @param string $bg
* @param string $bold
* @return string
* @throws \Exception
*/
public function printColourStamp($fg = 'normal', $bg = null, $bold = null)
{
if(!empty($bg) && (false === ($bg = $this->getBgColorByAlias($bg)))) {
throw new \Exception(self::MSG_GOLOR_NOT_EXISTS);
}
if(false === ($fg = $this->getColorByAlias($fg))) {
throw new \Exception(self::MSG_GOLOR_NOT_EXISTS);
}
$return = '';
if (!empty($bold) && (bool)$bold) {
$return .= '1;';
}
if ($bg) {
$return .= $bg . ';';
}
$return .= $fg;
return "\033[{$return}m";
}
}
|
apache-2.0
|
mettacrawler/gcsfs
|
src/base/logger.cc
|
1097
|
/*
* base/logger.cc
* -------------------------------------------------------------------------
* Definitions for s3::base::logger static members and init() method.
* -------------------------------------------------------------------------
*
* Copyright (c) 2012, Tarick Bedeir.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include "logger.h"
using s3::base::logger;
// log all messages unless instructed otherwise
int logger::s_max_level = INT_MAX;
void logger::init(int max_level)
{
s_max_level = max_level;
openlog(PACKAGE_NAME, 0, 0);
}
|
apache-2.0
|
kevstessens/docnetrails
|
vendor/bundle/gems/chronic-0.9.1/lib/chronic/handlers.rb
|
18912
|
module Chronic
module Handlers
module_function
# Handle month/day
def handle_m_d(month, day, time_tokens, options)
month.start = self.now
span = month.this(options[:context])
year, month = span.begin.year, span.begin.month
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
end
# Handle repeater-month-name/scalar-day
def handle_rmn_sd(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName)
day = tokens[1].get_tag(ScalarDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
# Handle repeater-month-name/scalar-day with separator-on
def handle_rmn_sd_on(tokens, options)
if tokens.size > 3
month = tokens[2].get_tag(RepeaterMonthName)
day = tokens[3].get_tag(ScalarDay).type
token_range = 0..1
else
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(ScalarDay).type
token_range = 0..0
end
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[token_range], options)
end
# Handle repeater-month-name/ordinal-day
def handle_rmn_od(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName)
day = tokens[1].get_tag(OrdinalDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
# Handle ordinal this month
def handle_od_rm(tokens, options)
day = tokens[0].get_tag(OrdinalDay).type
month = tokens[2].get_tag(RepeaterMonth)
handle_m_d(month, day, tokens[3..tokens.size], options)
end
# Handle ordinal-day/repeater-month-name
def handle_od_rmn(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[0].get_tag(OrdinalDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
def handle_sy_rmn_od(tokens, options)
year = tokens[0].get_tag(ScalarYear).type
month = tokens[1].get_tag(RepeaterMonthName).index
day = tokens[2].get_tag(OrdinalDay).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/repeater-month-name
def handle_sd_rmn(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[0].get_tag(ScalarDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
# Handle repeater-month-name/ordinal-day with separator-on
def handle_rmn_od_on(tokens, options)
if tokens.size > 3
month = tokens[2].get_tag(RepeaterMonthName)
day = tokens[3].get_tag(OrdinalDay).type
token_range = 0..1
else
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(OrdinalDay).type
token_range = 0..0
end
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[token_range], options)
end
# Handle repeater-month-name/scalar-year
def handle_rmn_sy(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName).index
year = tokens[1].get_tag(ScalarYear).type
if month == 12
next_month_year = year + 1
next_month_month = 1
else
next_month_year = year
next_month_month = month + 1
end
begin
end_time = Chronic.time_class.local(next_month_year, next_month_month)
Span.new(Chronic.time_class.local(year, month), end_time)
rescue ArgumentError
nil
end
end
# Handle generic timestamp (ruby 1.8)
def handle_generic(tokens, options)
t = Chronic.time_class.parse(options[:text])
Span.new(t, t + 1)
end
# Handle repeater-month-name/scalar-day/scalar-year
def handle_rmn_sd_sy(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName).index
day = tokens[1].get_tag(ScalarDay).type
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle repeater-month-name/ordinal-day/scalar-year
def handle_rmn_od_sy(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName).index
day = tokens[1].get_tag(OrdinalDay).type
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle oridinal-day/repeater-month-name/scalar-year
def handle_od_rmn_sy(tokens, options)
day = tokens[0].get_tag(OrdinalDay).type
month = tokens[1].get_tag(RepeaterMonthName).index
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/repeater-month-name/scalar-year
def handle_sd_rmn_sy(tokens, options)
new_tokens = [tokens[1], tokens[0], tokens[2]]
time_tokens = tokens.last(tokens.size - 3)
handle_rmn_sd_sy(new_tokens + time_tokens, options)
end
# Handle scalar-month/scalar-day/scalar-year (endian middle)
def handle_sm_sd_sy(tokens, options)
month = tokens[0].get_tag(ScalarMonth).type
day = tokens[1].get_tag(ScalarDay).type
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/scalar-month/scalar-year (endian little)
def handle_sd_sm_sy(tokens, options)
new_tokens = [tokens[1], tokens[0], tokens[2]]
time_tokens = tokens.last(tokens.size - 3)
handle_sm_sd_sy(new_tokens + time_tokens, options)
end
# Handle scalar-year/scalar-month/scalar-day
def handle_sy_sm_sd(tokens, options)
new_tokens = [tokens[1], tokens[2], tokens[0]]
time_tokens = tokens.last(tokens.size - 3)
handle_sm_sd_sy(new_tokens + time_tokens, options)
end
# Handle scalar-month/scalar-day
def handle_sm_sd(tokens, options)
month = tokens[0].get_tag(ScalarMonth).type
day = tokens[1].get_tag(ScalarDay).type
year = self.now.year
time_tokens = tokens.last(tokens.size - 2)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_start = Chronic.time_class.local(year + 1, month, day) if options[:context] == :future && day_start < now
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/scalar-month
def handle_sd_sm(tokens, options)
new_tokens = [tokens[1], tokens[0]]
time_tokens = tokens.last(tokens.size - 2)
handle_sm_sd(new_tokens + time_tokens, options)
end
def handle_year_and_month(year, month)
if month == 12
next_month_year = year + 1
next_month_month = 1
else
next_month_year = year
next_month_month = month + 1
end
begin
end_time = Chronic.time_class.local(next_month_year, next_month_month)
Span.new(Chronic.time_class.local(year, month), end_time)
rescue ArgumentError
nil
end
end
# Handle scalar-month/scalar-year
def handle_sm_sy(tokens, options)
month = tokens[0].get_tag(ScalarMonth).type
year = tokens[1].get_tag(ScalarYear).type
handle_year_and_month(year, month)
end
# Handle scalar-year/scalar-month
def handle_sy_sm(tokens, options)
year = tokens[0].get_tag(ScalarYear).type
month = tokens[1].get_tag(ScalarMonth).type
handle_year_and_month(year, month)
end
# Handle RepeaterDayName RepeaterMonthName OrdinalDay
def handle_rdn_rmn_od(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(OrdinalDay).type
time_tokens = tokens.last(tokens.size - 3)
year = self.now.year
return if month_overflow?(year, month.index, day)
begin
if time_tokens.empty?
start_time = Chronic.time_class.local(year, month.index, day)
end_time = time_with_rollover(year, month.index, day + 1)
Span.new(start_time, end_time)
else
day_start = Chronic.time_class.local(year, month.index, day)
day_or_time(day_start, time_tokens, options)
end
rescue ArgumentError
nil
end
end
# Handle RepeaterDayName OrdinalDay
def handle_rdn_od(tokens, options)
day = tokens[1].get_tag(OrdinalDay).type
time_tokens = tokens.last(tokens.size - 2)
year = self.now.year
month = self.now.month
if options[:context] == :future
self.now.day > day ? month += 1 : month
end
return if month_overflow?(year, month, day)
begin
if time_tokens.empty?
start_time = Chronic.time_class.local(year, month, day)
end_time = time_with_rollover(year, month, day + 1)
Span.new(start_time, end_time)
else
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
end
rescue ArgumentError
nil
end
end
# Handle RepeaterDayName RepeaterMonthName ScalarDay
def handle_rdn_rmn_sd(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(ScalarDay).type
time_tokens = tokens.last(tokens.size - 3)
year = self.now.year
return if month_overflow?(year, month.index, day)
begin
if time_tokens.empty?
start_time = Chronic.time_class.local(year, month.index, day)
end_time = time_with_rollover(year, month.index, day + 1)
Span.new(start_time, end_time)
else
day_start = Chronic.time_class.local(year, month.index, day)
day_or_time(day_start, time_tokens, options)
end
rescue ArgumentError
nil
end
end
# Handle RepeaterDayName RepeaterMonthName ScalarDay ScalarYear
def handle_rdn_rmn_sd_sy(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(ScalarDay).type
year = tokens[3].get_tag(ScalarYear).type
return if month_overflow?(year, month.index, day)
begin
start_time = Chronic.time_class.local(year, month.index, day)
end_time = time_with_rollover(year, month.index, day + 1)
Span.new(start_time, end_time)
rescue ArgumentError
nil
end
end
def handle_sm_rmn_sy(tokens, options)
day = tokens[0].get_tag(ScalarDay).type
month = tokens[1].get_tag(RepeaterMonthName).index
year = tokens[2].get_tag(ScalarYear).type
if tokens.size > 3
time = get_anchor([tokens.last], options).begin
h, m, s = time.hour, time.min, time.sec
time = Chronic.time_class.local(year, month, day, h, m, s)
end_time = Chronic.time_class.local(year, month, day + 1, h, m, s)
else
time = Chronic.time_class.local(year, month, day)
day += 1 unless day >= 31
end_time = Chronic.time_class.local(year, month, day)
end
Span.new(time, end_time)
end
# anchors
# Handle repeaters
def handle_r(tokens, options)
dd_tokens = dealias_and_disambiguate_times(tokens, options)
get_anchor(dd_tokens, options)
end
# Handle repeater/grabber/repeater
def handle_r_g_r(tokens, options)
new_tokens = [tokens[1], tokens[0], tokens[2]]
handle_r(new_tokens, options)
end
# arrows
# Handle scalar/repeater/pointer helper
def handle_srp(tokens, span, options)
distance = tokens[0].get_tag(Scalar).type
repeater = tokens[1].get_tag(Repeater)
pointer = tokens[2].get_tag(Pointer).type
repeater.offset(span, distance, pointer) if repeater.respond_to?(:offset)
end
# Handle scalar/repeater/pointer
def handle_s_r_p(tokens, options)
span = Span.new(self.now, self.now + 1)
handle_srp(tokens, span, options)
end
# Handle pointer/scalar/repeater
def handle_p_s_r(tokens, options)
new_tokens = [tokens[1], tokens[2], tokens[0]]
handle_s_r_p(new_tokens, options)
end
# Handle scalar/repeater/pointer/anchor
def handle_s_r_p_a(tokens, options)
anchor_span = get_anchor(tokens[3..tokens.size - 1], options)
handle_srp(tokens, anchor_span, options)
end
def handle_s_r_a_s_r_p_a(tokens, options)
anchor_span = get_anchor(tokens[4..tokens.size - 1], options)
span = handle_srp(tokens[0..1]+tokens[4..6], anchor_span, options)
handle_srp(tokens[2..3]+tokens[4..6], span, options)
end
# narrows
# Handle oridinal repeaters
def handle_orr(tokens, outer_span, options)
repeater = tokens[1].get_tag(Repeater)
repeater.start = outer_span.begin - 1
ordinal = tokens[0].get_tag(Ordinal).type
span = nil
ordinal.times do
span = repeater.next(:future)
if span.begin >= outer_span.end
span = nil
break
end
end
span
end
# Handle ordinal/repeater/separator/repeater
def handle_o_r_s_r(tokens, options)
outer_span = get_anchor([tokens[3]], options)
handle_orr(tokens[0..1], outer_span, options)
end
# Handle ordinal/repeater/grabber/repeater
def handle_o_r_g_r(tokens, options)
outer_span = get_anchor(tokens[2..3], options)
handle_orr(tokens[0..1], outer_span, options)
end
# support methods
def day_or_time(day_start, time_tokens, options)
outer_span = Span.new(day_start, day_start + (24 * 60 * 60))
if !time_tokens.empty?
self.now = outer_span.begin
get_anchor(dealias_and_disambiguate_times(time_tokens, options), options)
else
outer_span
end
end
def get_anchor(tokens, options)
grabber = Grabber.new(:this)
pointer = :future
repeaters = get_repeaters(tokens)
repeaters.size.times { tokens.pop }
if tokens.first && tokens.first.get_tag(Grabber)
grabber = tokens.shift.get_tag(Grabber)
end
head = repeaters.shift
head.start = self.now
case grabber.type
when :last
outer_span = head.next(:past)
when :this
if options[:context] != :past and repeaters.size > 0
outer_span = head.this(:none)
else
outer_span = head.this(options[:context])
end
when :next
outer_span = head.next(:future)
else
raise "Invalid grabber"
end
if Chronic.debug
puts "Handler-class: #{head.class}"
puts "--#{outer_span}"
end
find_within(repeaters, outer_span, pointer)
end
def get_repeaters(tokens)
tokens.map { |token| token.get_tag(Repeater) }.compact.sort.reverse
end
def month_overflow?(year, month, day)
if Date.leap?(year)
day > RepeaterMonth::MONTH_DAYS_LEAP[month - 1]
else
day > RepeaterMonth::MONTH_DAYS[month - 1]
end
rescue ArgumentError
false
end
# Recursively finds repeaters within other repeaters.
# Returns a Span representing the innermost time span
# or nil if no repeater union could be found
def find_within(tags, span, pointer)
puts "--#{span}" if Chronic.debug
return span if tags.empty?
head = tags.shift
head.start = (pointer == :future ? span.begin : span.end)
h = head.this(:none)
if span.cover?(h.begin) || span.cover?(h.end)
find_within(tags, h, pointer)
end
end
def time_with_rollover(year, month, day)
date_parts =
if month_overflow?(year, month, day)
if month == 12
[year + 1, 1, 1]
else
[year, month + 1, 1]
end
else
[year, month, day]
end
Chronic.time_class.local(*date_parts)
end
def dealias_and_disambiguate_times(tokens, options)
# handle aliases of am/pm
# 5:00 in the morning -> 5:00 am
# 7:00 in the evening -> 7:00 pm
day_portion_index = nil
tokens.each_with_index do |t, i|
if t.get_tag(RepeaterDayPortion)
day_portion_index = i
break
end
end
time_index = nil
tokens.each_with_index do |t, i|
if t.get_tag(RepeaterTime)
time_index = i
break
end
end
if day_portion_index && time_index
t1 = tokens[day_portion_index]
t1tag = t1.get_tag(RepeaterDayPortion)
case t1tag.type
when :morning
puts '--morning->am' if Chronic.debug
t1.untag(RepeaterDayPortion)
t1.tag(RepeaterDayPortion.new(:am))
when :afternoon, :evening, :night
puts "--#{t1tag.type}->pm" if Chronic.debug
t1.untag(RepeaterDayPortion)
t1.tag(RepeaterDayPortion.new(:pm))
end
end
# handle ambiguous times if :ambiguous_time_range is specified
if options[:ambiguous_time_range] != :none
ambiguous_tokens = []
tokens.each_with_index do |token, i|
ambiguous_tokens << token
next_token = tokens[i + 1]
if token.get_tag(RepeaterTime) && token.get_tag(RepeaterTime).type.ambiguous? && (!next_token || !next_token.get_tag(RepeaterDayPortion))
distoken = Token.new('disambiguator')
distoken.tag(RepeaterDayPortion.new(options[:ambiguous_time_range]))
ambiguous_tokens << distoken
end
end
tokens = ambiguous_tokens
end
tokens
end
end
end
|
apache-2.0
|
updownlife/campustore
|
seller.php
|
3528
|
<?php
session_start();
include_once('includes/connection.php');
include_once('includes/product.php');
include_once('includes/user.php');
$page_title = "Products from this seller.php";
include('includes/header.php');
$product = new Product();
$user = new user();
//$data = $product->fetch_all();
$seller_id = isset($_GET['seller']) ? $_GET['seller'] : ""; // getting seller id
//if seller id is not empty
if(!empty($seller_id)) {
$seller_data = $user->fetch_user($seller_id);
$page_num = isset($_GET['page_num']) ? $_GET['page_num'] : 1;
if (isset($_GET['action']) and $_GET['action'] == 'search') {
$keywords = $_GET['search_str'];
$data = $product->search_seller($keywords, $page_num, $seller_id);
} else {
$data = $product->fetch_by_category_seller($category_id, $page_num, $seller_id);
}
?>
<div class="box_center">
<h3>Products from seller: </h3> <?php echo $seller_data['first_name']. " ". $seller_data['last_name'] ?>
<table class="show_table">
<?php
if (empty($data)) {
echo "<br><br><br><p>Nothing found!</p><br><br><br>";
}
$MAX_COLUMN = 4;
$column_count = 0;
foreach ($data as $item) {
$icon_path = "./uploads/icons/" . $item['icon'];
if (!file_exists($icon_path) || is_dir($icon_path)) {
$icon_path = "./uploads/icons/" . "default.png";
}
if ($column_count % $MAX_COLUMN == 0) {
echo "<tr>";
}
?>
<td>
<div class="item">
<a href="product_detail.php?id=<?php echo $item['id']; ?>"><img class="item_icon"
src=<?php echo $icon_path ?> alt="item
picture"><br><?php echo $item['name']; ?>
</a>
<br>
<!--
<small>posted in
<?php
date_default_timezone_set('America/Detroit');
echo date('l jS', $article['create_date']);
?>
</small>
-->
</div>
</td>
<?php
$column_count++;
if ($column_count % $MAX_COLUMN == 0) {
echo "</tr>";
}
}
?>
</table>
<div class="page_nav">
<?php
if ($page_num > 1) {
echo "<a href='seller.php?cate_id=" . $category_id . "&page_num=" . ($page_num - 1) . "&seller=".$seller_id."'>prev </a>";
}
if (isset($_GET['action']) and $_GET['action'] == 'search') {
$next_data = $product->search_seller($keywords, $page_num + 1, $seller_id);
} else {
$next_data = $product->fetch_by_category_seller($category_id, $page_num + 1, $seller_id);
}
if (count($next_data) > 0) {
echo "<a href='seller.php?cate_id=" . $category_id . "&page_num=" . ($page_num + 1) . "&seller=".$seller_id . "'>next</a>";
}
?>
</div>
</div>
<?php
include('includes/footer.php');
}else{ // if no seller id given go to index.php page
include('includes/index.php');
}
?>
|
apache-2.0
|
nvdk/virtuoso-rpm-builder
|
README.md
|
1379
|
# virtuoso-rpm-builder
Virtuoso RPM builder will build an RPM for centos/redhat using the latest code from github (default is the develop/7 branch). The RPM should work on Centos 7 & Red Hat Enterprise Linux 7. Use the following command to generate the RPM.
``` sh
docker run -v /local/path/to/rpm:/home/rpmbuild/RPMS tenforce/virtuoso-rpm-builder
```
Optionally a different branch can be selected, the following example will build an RPM of the latest stable.
``` sh
docker run -v /local/path/to/rpm:/home/rpmbuild/RPMS -e VIRT_BRANCH=stable/7 tenforce/virtuoso-rpm-builder
```
# contributing
1. Fork the repo on GitHub
2. Commit changes to a branch in your fork
3. Pull request "upstream" with your changes
NOTE: Be sure to merge the latest from "upstream" before making a pull request!
# copyright and licensing
Copyright 2015 TenForce bvba
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
apache-2.0
|
ydc201211/VirtualCampus
|
src/view/ImageControl.java
|
7455
|
package view;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Matrix;
import android.util.AttributeSet;
import android.util.FloatMath;
import android.view.MotionEvent;
import android.widget.ImageView;
public class ImageControl extends ImageView {
public ImageControl(Context context) {
super(context);
// TODO Auto-generated constructor stub
}
public ImageControl(Context context, AttributeSet attrs) {
super(context, attrs);
// TODO Auto-generated constructor stub
}
public ImageControl(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
// TODO Auto-generated constructor stub
}
// ImageView img;
Matrix imgMatrix = null; // ¶¨ÒåͼƬµÄ±ä»»¾ØÕó
static final int DOUBLE_CLICK_TIME_SPACE = 300; // Ë«»÷ʱ¼ä¼ä¸ô
static final int DOUBLE_POINT_DISTANCE = 10; // Á½µã·Å´óÁ½µã¼ä×îС¼ä¾à
static final int NONE = 0;
static final int DRAG = 1; // Í϶¯²Ù×÷
static final int ZOOM = 2; // ·Å´óËõС²Ù×÷
private int mode = NONE; // µ±Ç°Ä£Ê½
float bigScale = 3f; // ĬÈÏ·Å´ó±¶Êý
Boolean isBig = false; // ÊÇ·ñÊÇ·Å´ó״̬
long lastClickTime = 0; // µ¥»÷ʱ¼ä
float startDistance; // ¶àµã´¥ÃþÁ½µã¾àÀë
float endDistance; // ¶àµã´¥ÃþÁ½µã¾àÀë
float topHeight; // ״̬À¸¸ß¶ÈºÍ±êÌâÀ¸¸ß¶È
Bitmap primaryBitmap = null;
float contentW; // ÆÁÄ»ÄÚÈÝÇø¿í¶È
float contentH; // ÆÁÄ»ÄÚÈÝÇø¸ß¶È
float primaryW; // Ôͼ¿í¶È
float primaryH; // Ôͼ¸ß¶È
float scale; // ÊÊºÏÆÁÄ»Ëõ·Å±¶Êý
Boolean isMoveX = true; // ÊÇ·ñÔÊÐíÔÚXÖáÍ϶¯
Boolean isMoveY = true; // ÊÇ·ñÔÊÐíÔÚYÖáÍ϶¯
float startX;
float startY;
float endX;
float endY;
float subX;
float subY;
float limitX1;
float limitX2;
float limitY1;
float limitY2;
ICustomMethod mCustomMethod = null;
/**
* ³õʼ»¯Í¼Æ¬
*
* @param bitmap
* ÒªÏÔʾµÄͼƬ
* @param contentW
* ÄÚÈÝÇøÓò¿í¶È
* @param contentH
* ÄÚÈÝÇøÓò¸ß¶È
* @param topHeight
* ״̬À¸¸ß¶ÈºÍ±êÌâÀ¸¸ß¶ÈÖ®ºÍ
*/
public void imageInit(Bitmap bitmap, int contentW, int contentH,
int topHeight, ICustomMethod iCustomMethod) {
this.primaryBitmap = bitmap;
this.contentW = contentW;
this.contentH = contentH;
this.topHeight = topHeight;
mCustomMethod = iCustomMethod;
primaryW = primaryBitmap.getWidth();
primaryH = primaryBitmap.getHeight();
float scaleX = (float) contentW / primaryW;
float scaleY = (float) contentH / primaryH;
scale = scaleX < scaleY ? scaleX : scaleY;
if (scale < 1 && 1 / scale < bigScale) {
bigScale = (float) (1 / scale + 0.5);
}
imgMatrix = new Matrix();
subX = (contentW - primaryW * scale) / 2;
subY = (contentH - primaryH * scale) / 2;
this.setImageBitmap(primaryBitmap);
this.setScaleType(ScaleType.MATRIX);
imgMatrix.postScale(scale, scale);
imgMatrix.postTranslate(subX, subY);
this.setImageMatrix(imgMatrix);
}
/**
* °´Ï²Ù×÷
*
* @param event
*/
public void mouseDown(MotionEvent event) {
mode = NONE;
startX = event.getRawX();
startY = event.getRawY();
if (event.getPointerCount() == 1) {
// Èç¹ûÁ½´Îµã»÷ʱ¼ä¼ä¸ôСÓÚÒ»¶¨Öµ£¬ÔòĬÈÏΪ˫»÷ʼþ
if (event.getEventTime() - lastClickTime < DOUBLE_CLICK_TIME_SPACE) {
changeSize(startX, startY);
} else if (isBig) {
mode = DRAG;
}
}
lastClickTime = event.getEventTime();
}
/**
* ·ÇµÚÒ»¸öµã°´Ï²Ù×÷
*
* @param event
*/
public void mousePointDown(MotionEvent event) {
startDistance = getDistance(event);
if (startDistance > DOUBLE_POINT_DISTANCE) {
mode = ZOOM;
} else {
mode = NONE;
}
}
/**
* ÒÆ¶¯²Ù×÷
*
* @param event
*/
public void mouseMove(MotionEvent event) {
if ((mode == DRAG) && (isMoveX || isMoveY)) {
float[] XY = getTranslateXY(imgMatrix);
float transX = 0;
float transY = 0;
if (isMoveX) {
endX = event.getRawX();
transX = endX - startX;
if ((XY[0] + transX) <= limitX1) {
transX = limitX1 - XY[0];
}
if ((XY[0] + transX) >= limitX2) {
transX = limitX2 - XY[0];
}
}
if (isMoveY) {
endY = event.getRawY();
transY = endY - startY;
if ((XY[1] + transY) <= limitY1) {
transY = limitY1 - XY[1];
}
if ((XY[1] + transY) >= limitY2) {
transY = limitY2 - XY[1];
}
}
imgMatrix.postTranslate(transX, transY);
startX = endX;
startY = endY;
this.setImageMatrix(imgMatrix);
} else if (mode == ZOOM && event.getPointerCount() > 1) {
endDistance = getDistance(event);
float dif = endDistance - startDistance;
if (Math.abs(endDistance - startDistance) > DOUBLE_POINT_DISTANCE) {
if (isBig) {
if (dif < 0) {
changeSize(0, 0);
mode = NONE;
}
} else if (dif > 0) {
float x = event.getX(0) / 2 + event.getX(1) / 2;
float y = event.getY(0) / 2 + event.getY(1) / 2;
changeSize(x, y);
mode = NONE;
}
}
}
}
/**
* Êó±ȩ̂Æðʼþ
*/
public void mouseUp() {
mode = NONE;
}
/**
* ͼƬ·Å´óËõС
*
* @param x
* µã»÷µãX×ø±ê
* @param y
* µã»÷µãY×ø±ê
*/
private void changeSize(float x, float y) {
if (isBig) {
// Èç¹û´¦ÓÚ×î´ó״̬£¬Ôò»¹Ô
imgMatrix.reset();
imgMatrix.postScale(scale, scale);
imgMatrix.postTranslate(subX, subY);
isBig = false;
} else {
imgMatrix.postScale(bigScale, bigScale); // ÔÚÔÓоØÕóºó³Ë·Å´ó±¶Êý
float transX = -((bigScale - 1) * x);
float transY = -((bigScale - 1) * (y - topHeight)); // (bigScale-1)(y-statusBarHeight-subY)+2*subY;
float currentWidth = primaryW * scale * bigScale; // ·Å´óºóͼƬ´óС
float currentHeight = primaryH * scale * bigScale;
// Èç¹ûͼƬ·Å´óºó³¬³öÆÁÄ»·¶Î§´¦Àí
if (currentHeight > contentH) {
limitY1 = -(currentHeight - contentH); // Æ½ÒÆÏÞÖÆ
limitY2 = 0;
isMoveY = true; // ÔÊÐíÔÚYÖáÉÏÍ϶¯
float currentSubY = bigScale * subY; // µ±Ç°Æ½ÒƾàÀë
// Æ½ÒÆºó£¬ÄÚÈÝÇøÓòÉϲ¿Óпհ״¦Àí°ì·¨
if (-transY < currentSubY) {
transY = -currentSubY;
}
// Æ½ÒÆºó£¬ÄÚÈÝÇøÓòϲ¿Óпհ״¦Àí°ì·¨
if (currentSubY + transY < limitY1) {
transY = -(currentHeight + currentSubY - contentH);
}
} else {
// Èç¹ûͼƬ·Å´óºóûÓг¬³öÆÁÄ»·¶Î§´¦Àí£¬Ôò²»ÔÊÐíÍ϶¯
isMoveY = false;
}
if (currentWidth > contentW) {
limitX1 = -(currentWidth - contentW);
limitX2 = 0;
isMoveX = true;
float currentSubX = bigScale * subX;
if (-transX < currentSubX) {
transX = -currentSubX;
}
if (currentSubX + transX < limitX1) {
transX = -(currentWidth + currentSubX - contentW);
}
} else {
isMoveX = false;
}
imgMatrix.postTranslate(transX, transY);
isBig = true;
}
this.setImageMatrix(imgMatrix);
if (mCustomMethod != null) {
mCustomMethod.customMethod(isBig);
}
}
/**
* »ñÈ¡±ä»»¾ØÕóÖÐXÖáÆ«ÒÆÁ¿ºÍYÖáÆ«ÒÆÁ¿
*
* @param matrix
* ±ä»»¾ØÕó
* @return
*/
private float[] getTranslateXY(Matrix matrix) {
float[] values = new float[9];
matrix.getValues(values);
float[] floats = new float[2];
floats[0] = values[Matrix.MTRANS_X];
floats[1] = values[Matrix.MTRANS_Y];
return floats;
}
/**
* »ñÈ¡Á½µã¼äµÄ¾àÀë
*
* @param event
* @return
*/
private float getDistance(MotionEvent event) {
float x = event.getX(0) - event.getX(1);
float y = event.getY(0) - event.getY(1);
return FloatMath.sqrt(x * x + y * y);
}
/**
* @author Administrator Óû§×Ô¶¨Òå·½·¨
*/
public interface ICustomMethod {
public void customMethod(Boolean currentStatus);
}
}
|
apache-2.0
|
tensorflow/tfhub.dev
|
assets/docs/deepmind/models/mmt/data_combined-dataset/1.md
|
11206
|
# Module deepmind/mmt/data_combined-dataset/1
A multimodal transformer which outputs a score indicating if image and text match.
<!-- asset-path: internal -->
<!-- task: text-language-model -->
<!-- fine-tunable: true -->
<!-- format: hub -->
## Overview
A multimodal transformer model as described in "Decoupling the Role of Data,
Attention, and Losses in Multimodal Transformers". This model can be used to
score if an image-text pair match. Please see our paper for more details and
[colab](https://github.com/deepmind/multimodal_transformers) for example use.
This model is our `data_combined-dataset` model. Please see the table below for details on
model specifications (including this model as well as other released models).
Name | Training Dataset | ITM | MRM | MLM | Heads | Layers | Att. Type | FineTuned | Notes
------------------------------------ | ----------------------------------- | -------------- | --- | --- | ----- | ------ | --------------------- | --------- | -----
data_cc (base) | Conceptual Captions | Classification | Y | Y | 12 | 6 | Merged | N |
data_sbu | SBU | Classification | Y | Y | 12 | 6 | Merged | N |
data_vg | Visual Genome | Classification | Y | Y | 12 | 6 | Merged | N |
data_mscoco | MSCOCO | Classification | Y | Y | 12 | 6 | Merged | N |
data_mscoco-narratives | MSCOCO Narratives | Classification | Y | Y | 12 | 6 | Merged | N |
data_oi-narratives | OI Narratives | Classification | Y | Y | 12 | 6 | Merged | N |
data_combined-instance | All (instance sampling) | Classification | Y | Y | 12 | 6 | Merged | N |
data_combined-dataset | All (dataset sampling) | Classification | Y | Y | 12 | 6 | Merged | N |
data_uniter-instance | Uniter datasets (instance sampling) | Classification | Y | Y | 12 | 6 | Merged | N |
data_uniter-dataset | Uniter datasets (dataset sampling) | Classification | Y | Y | 12 | 6 | Merged | N |
data_cc-with-bert | Conceptual Captions | Classification | Y | Y | 12 | 6 | Merged | N | Language initialised with BERT
loss_itm_mrm | Conceptual Captions | Classification | Y | N | 12 | 6 | Merged | N |
loss_itm_mlm | Conceptual Captions | Classification | N | Y | 12 | 6 | Merged | N |
loss_single-modality-contrastive32 | Conceptual Captions | Contrastive | Y | Y | 12 | 6 | Sing. Modality | N |
loss_single-modality-contrastive1024 | Conceptual Captions | Contrastive | Y | Y | 12 | 6 | Sing. Modality | N |
loss_v1-contrastive32 | Conceptual Captions | Contrastive | Y | Y | 12 | 1 | Merged | N |
architecture_heads1-768 | Conceptual Captions | Classification | Y | Y | 1 | 6 | Merged | N |
architecture_heads3-256 | Conceptual Captions | Classification | Y | Y | 3 | 6 | Merged | N |
architecture_heads6-64 | Conceptual Captions | Classification | Y | Y | 6 | 6 | Merged | N |
architecture_heads18-64 | Conceptual Captions | Classification | Y | Y | 18 | 6 | Merged | N |
architecture_vilbert-1block | Conceptual Captions | Classification | Y | Y | 12 | 1 | Merged | N |
architecture_vilbert-2block | Conceptual Captions | Classification | Y | Y | 12 | 2 | Merged | N |
architecture_vilbert-4block | Conceptual Captions | Classification | Y | Y | 12 | 4 | Merged | N |
architecture_vilbert-12block | Conceptual Captions | Classification | Y | Y | 12 | 12 | Merged | N |
architecture_single-modality | Conceptual Captions | Classification | Y | Y | 12 | 6 | Sing. Modality | N |
architecture_mixed-modality | Conceptual Captions | Classification | Y | Y | 12 | 6 | Mix Modality | N | 5 single modality layers and 1 merged layer
architecture_single-stream | Conceptual Captions | Classification | Y | Y | 12 | 6 | Single Stream | N |
architecture_language-q-12 | Conceptual Captions | Classification | Y | Y | 12 | 6 | Asymmetric (language) | N |
architecture_image-q-12 | Conceptual Captions | Classification | Y | Y | 12 | 6 | Asymmetric (image) | N |
architecture_language-q-24 | Conceptual Captions | Classification | Y | Y | 24 | 6 | Asymmetric (language) | N |
architecture_image-q-24 | Conceptual Captions | Classification | Y | Y | 24 | 6 | Asymmetric (image) | N |
architecture_single-modality-hloss | Conceptual Captions | Classification | Y | Y | 12 | 6 | Single modality | N | Includes ITM loss after every layer
data-ft_sbu | SBU | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_vg | Visual Genome | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_mscoco | MSCOCO | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_mscoco-narratives | MSCOCO Narratives | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_oi-narratives | OI Narratives | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_cc | Conceptual Captions | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_combined-instance | All (instance sampling) | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_combined-dataset | All (dataset sampling) | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_uniter-instance | Uniter datasets (instance sampling) | Classification | Y | Y | 12 | 6 | Merged | Y |
data-ft_uniter-dataset | Uniter datasets (dataset sampling) | Classification | Y | Y | 12 | 6 | Merged | Y |
architecture-ft_single-modality | Conceptual Captions | Classification | Y | Y | 12 | 6 | Sing. Modality | Y |
architecture-ft_single-stream | Conceptual Captions | Classification | Y | Y | 12 | 6 | Single Stream | Y |
architecture-ft_language-q-12 | Conceptual Captions | Classification | Y | Y | 12 | 6 | Asymmetric (language) | Y |
architecture-ft_image-q-12 | Conceptual Captions | Classification | Y | Y | 12 | 6 | Asymmetric (image) | Y |
architecture-ft_language-q-24 | Conceptual Captions | Classification | Y | Y | 24 | 6 | Asymmetric (language) | Y |
architecture-ft_image-q-24 | Conceptual Captions | Classification | Y | Y | 24 | 6 | Asymmetric (image) | Y |
In addition to our transformer models, we also release our baseline models. See details of our baseline models in the chart below:
| Name | ITM | Bert Initialisation | FineTuned |
|---------------------------------------|----------------|---------------------|-----------|
| baseline_baseline | Contrastive | Yes | N |
| baseline_baseline-cls | Classification | No | N |
| baseline_baseline-no-bert-transfer | Contrastive | No | N |
| baseline-ft_baseline | Contrastive | Yes | Y |
| baseline-ft_baseline-cls | Classification | No | Y |
| baseline-ft_baseline-no-bert-transfer | Contrastive | No | Y |
### Example use
You can run an image and text pair through our module and see if the image and
text pair match.
```python
import tensorflow.compat.v1 as tf import tensorflow_hub as hub
m =
hub.Module('https://tfhub.dev/deepmind/mmt/data_combined-dataset/1')
```
Inference:
```python
output = model.signatures['default'](**inputs)
score = tf.nn.softmax(output['output']).numpy()[0]
```
where `score` indicates if an image-text pair match (`1` indicates a perfect
match). Inputs is a dictionary with the following keys:
* `image/bboxes`: Coordinates of detected image bounding boxes.
* `image/detection_features`: Features from image detector.
* `image/padding_mask`: Indicator if image features are padded.
* `masked_tokens`: Text tokens
* `text/segment_ids`: Indicates sentence segment. (Since we train with one sentencce this will always be 0.)
* `text/token_ids`: Indicates which words tokens belong to. (We use a tokenizer which can break one word into multiple tokens).
* `text/padding_mask`: Indicator if text features are padded.
Please see the colab linked in our arxiv paper for details on pre-processing.
Note you will need to use our detector to see good results (instructions in colab).
## References
If you use this model in your research please cite:
[1] Lisa Anne Hendricks, John Mellor, Rosalia Schneider, Jean-Baptiste Alayrac,
and Aida Nematzadeh.
[Decoupling the Role of Data, Attention, and Losses
in Multimodal Transformers](https://arxiv.org/pdf/2102.00529.pdf),
TACL 2021.
|
apache-2.0
|
realityforge/buildr-osgi-assembler
|
lib/buildr/osgi/ordered_hash.rb
|
4530
|
# AUTHOR
# jan molic /mig/at/1984/dot/cz/
#
# DESCRIPTION
# Hash with preserved order and some array-like extensions
# Public domain.
#
# THANKS
# Andrew Johnson for his suggestions and fixes of Hash[],
# merge, to_a, inspect and shift
module Buildr
module OSGi
class OrderedHash < ::Hash
attr_accessor :order
class << self
def [] *args
hsh = OrderedHash.new
if Hash === args[0]
hsh.replace args[0]
elsif (args.size % 2) != 0
raise ArgumentError, "odd number of elements for Hash"
else
0.step(args.size - 1, 2) do |a|
b = a + 1
hsh[args[a]] = args[b]
end
end
hsh
end
end
def initialize(*a, &b)
super
@order = []
end
def store_only a, b
store a, b
end
alias orig_store store
def store a, b
@order.push a unless has_key? a
super a, b
end
alias []= store
def == hsh2
return false if @order != hsh2.order
super hsh2
end
def clear
@order = []
super
end
def delete key
@order.delete key
super
end
def each_key
@order.each { |k| yield k }
self
end
def each_value
@order.each { |k| yield self[k] }
self
end
def each
@order.each { |k| yield k, self[k] }
self
end
alias each_pair each
def delete_if
@order.clone.each { |k|
delete k if yield(k)
}
self
end
def values
ary = []
@order.each { |k| ary.push self[k] }
ary
end
def keys
@order
end
def first
{@order.first => self[@order.first]}
end
def last
{@order.last => self[@order.last]}
end
def invert
hsh2 = Hash.new
@order.each { |k| hsh2[self[k]] = k }
hsh2
end
def reject &block
self.dup.delete_if &block
end
def reject! &block
hsh2 = reject &block
self == hsh2 ? nil : hsh2
end
def replace hsh2
@order = hsh2.keys
super hsh2
end
def shift
key = @order.first
key ? [key, delete(key)] : super
end
def unshift k, v
unless self.include? k
@order.unshift k
orig_store(k, v)
true
else
false
end
end
def push k, v
unless self.include? k
@order.push k
orig_store(k, v)
true
else
false
end
end
def pop
key = @order.last
key ? [key, delete(key)] : nil
end
def to_a
ary = []
each { |k, v| ary << [k, v] }
ary
end
def to_s
self.to_a.to_s
end
def inspect
ary = []
each {|k, v| ary << k.inspect + "=>" + v.inspect}
'{' + ary.join(", ") + '}'
end
def update hsh2
hsh2.each { |k, v| self[k] = v }
self
end
alias :merge! update
def merge hsh2
self.dup.update(hsh2)
end
def select
ary = []
each { |k, v| ary << [k, v] if yield k, v }
ary
end
def class
Hash
end
def __class__
OrderedHash
end
attr_accessor "to_yaml_style"
def yaml_inline= bool
if respond_to?("to_yaml_style")
self.to_yaml_style = :inline
else
unless defined? @__yaml_inline_meth
@__yaml_inline_meth =
lambda {|opts|
YAML::quick_emit(object_id, opts) {|emitter|
emitter << '{ ' << map{|kv| kv.join ': '}.join(', ') << ' }'
}
}
class << self
def to_yaml opts = {}
begin
@__yaml_inline ? @__yaml_inline_meth[ opts ] : super
rescue
@to_yaml_style = :inline
super
end
end
end
end
end
@__yaml_inline = bool
end
def yaml_inline!()
self.yaml_inline = true
end
def each_with_index
@order.each_with_index { |k, index| yield k, self[k], index }
self
end
end # class OrderedHash
end
end
|
apache-2.0
|
joshholl/intellij-csharp
|
gen/com/github/joshholl/intellij/csharp/lang/psi/CSharpThisAccess.java
|
271
|
// This is a generated file. Not intended for manual editing.
package com.github.joshholl.intellij.csharp.lang.psi;
import java.util.List;
import org.jetbrains.annotations.*;
import com.intellij.psi.PsiElement;
public interface CSharpThisAccess extends PsiElement {
}
|
apache-2.0
|
beav/netty-ant
|
doc/xref/org/jboss/netty/example/portunification/package-summary.html
|
2295
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "DTD/xhtml1-transitional.dtd">
<html xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<title>The Netty Project Source Xref (3.3.1.Final) Package org.jboss.netty.example.portunification</title>
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="style" />
</head>
<body>
<div class="overview">
<ul>
<li>
<a href="../../../../../overview-summary.html">Overview</a>
</li>
<li class="selected">Package</li>
</ul>
</div>
<div class="framenoframe">
<ul>
<li>
<a href="../../../../../index.html" target="_top">FRAMES</a>
</li>
<li>
<a href="package-summary.html" target="_top">NO FRAMES</a>
</li>
</ul>
</div>
<h2>Package org.jboss.netty.example.portunification</h2>
<table class="summary">
<thead>
<tr>
<th>Class Summary</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<a href="PortUnificationServer.html" target="classFrame">PortUnificationServer</a>
</td>
</tr>
<tr>
<td>
<a href="PortUnificationServerHandler.html" target="classFrame">PortUnificationServerHandler</a>
</td>
</tr>
</tbody>
</table>
<div class="overview">
<ul>
<li>
<a href="../../../../../overview-summary.html">Overview</a>
</li>
<li class="selected">Package</li>
</ul>
</div>
<div class="framenoframe">
<ul>
<li>
<a href="../../../../../index.html" target="_top">FRAMES</a>
</li>
<li>
<a href="package-summary.html" target="_top">NO FRAMES</a>
</li>
</ul>
</div>
<hr />
Copyright © 2008-2012 The Netty Project. All Rights Reserved.
</body>
</html>
|
apache-2.0
|
linkedin/WhereHows
|
li-utils/src/test/java/com/linkedin/common/util/RecordUtilsTest.java
|
16237
|
package com.linkedin.common.util;
import com.datahub.test.testing.AspectBar;
import com.datahub.test.testing.AspectBaz;
import com.datahub.test.testing.AspectFoo;
import com.datahub.test.testing.AspectFooArray;
import com.datahub.test.testing.AspectInvalid;
import com.datahub.test.testing.EntitySnapshot;
import com.datahub.test.testing.EntityValueArray;
import com.datahub.test.testing.MixedRecord;
import com.datahub.test.testing.StringUnion;
import com.datahub.test.testing.StringUnionArray;
import com.datahub.test.testing.singleaspectentity.EntityValue;
import com.datahub.test.testing.urn.FooUrn;
import com.datahub.util.ModelUtils;
import com.datahub.util.RecordUtils;
import com.datahub.util.exception.InvalidSchemaException;
import com.datahub.util.exception.ModelConversionException;
import com.datahub.util.validator.ValidationUtils;
import com.linkedin.common.urn.Urn;
import com.linkedin.data.schema.PathSpec;
import com.linkedin.data.schema.RecordDataSchema;
import com.linkedin.data.template.RecordTemplate;
import com.linkedin.data.template.StringArray;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Optional;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.Test;
import static com.datahub.utils.TestUtils.*;
import static org.testng.Assert.*;
public class RecordUtilsTest {
@Test
public void testToJsonString() throws IOException {
AspectFoo foo = new AspectFoo().setValue("foo");
String expected =
loadJsonFromResource("foo.json").replaceAll("\\s+", "").replaceAll("\\n", "").replaceAll("\\r", "");
String actual = RecordUtils.toJsonString(foo);
assertEquals(actual, expected);
}
@Test
public void testToRecordTemplate() throws IOException {
AspectFoo expected = new AspectFoo().setValue("foo");
String jsonString = loadJsonFromResource("foo.json");
AspectFoo actual = RecordUtils.toRecordTemplate(AspectFoo.class, jsonString);
assertEquals(actual, expected);
RecordTemplate actual2 = RecordUtils.toRecordTemplate(AspectFoo.class.getCanonicalName(), expected.data());
assertEquals(actual2.getClass(), AspectFoo.class);
assertEquals(actual2, expected);
}
@Test(expectedExceptions = ModelConversionException.class)
public void testToRecordTemplateFromInvalidString() {
RecordUtils.toRecordTemplate(AspectFoo.class, "invalid_json");
}
@Test
public void testGetValidRecordDataSchemaField() {
RecordDataSchema schema = ValidationUtils.getRecordSchema(AspectFoo.class);
RecordDataSchema.Field expected = schema.getField("value");
assertEquals(RecordUtils.getRecordDataSchemaField(new AspectFoo().setValue("foo"), "value"), expected);
}
@Test(expectedExceptions = InvalidSchemaException.class)
public void testGetInvalidRecordDataSchemaField() {
RecordUtils.getRecordDataSchemaField(new AspectFoo().setValue("foo"), "non-existing-field");
}
@Test
public void testSetRecordTemplatePrimitiveField() {
AspectBaz baz = new AspectBaz();
RecordUtils.setRecordTemplatePrimitiveField(baz, "boolField", Boolean.FALSE);
RecordUtils.setRecordTemplatePrimitiveField(baz, "stringField", "baz");
RecordUtils.setRecordTemplatePrimitiveField(baz, "longField", Long.valueOf(1234L));
assertFalse(baz.isBoolField());
assertEquals(baz.getStringField(), "baz");
assertEquals(baz.getLongField(), Long.valueOf(1234L));
}
@Test
public void testSetRecordTemplateComplexField() throws IOException {
AspectBaz baz = new AspectBaz();
StringArray stringArray = new StringArray(Arrays.asList("1", "2", "3"));
RecordUtils.setRecordTemplateComplexField(baz, "arrayField", stringArray);
AspectFoo foo = new AspectFoo().setValue("foo");
RecordUtils.setRecordTemplateComplexField(baz, "recordField", foo);
assertEquals(baz.getArrayField(), stringArray);
assertEquals(baz.getRecordField(), foo);
}
@Test
public void testGetRecordTemplatePrimitiveField() throws IOException {
AspectBaz baz = loadAspectBaz("baz.json");
assertTrue(RecordUtils.getRecordTemplateField(baz, "boolField", Boolean.class));
assertEquals(RecordUtils.getRecordTemplateField(baz, "stringField", String.class), "baz");
assertEquals(RecordUtils.getRecordTemplateField(baz, "longField", Long.class), Long.valueOf(1234L));
}
@Test
public void testGetRecordTemplateUrnField() {
Urn urn = makeUrn(1);
EntitySnapshot snapshot = new EntitySnapshot().setUrn(urn);
assertEquals(RecordUtils.getRecordTemplateField(snapshot, "urn", Urn.class), urn);
}
@Test
public void testGetRecordTemplateWrappedField() throws IOException {
AspectBaz baz = loadAspectBaz("baz.json");
StringArray stringArray = RecordUtils.getRecordTemplateWrappedField(baz, "arrayField", StringArray.class);
assertEquals(stringArray.toArray(), new String[]{"1", "2", "3"});
}
@Test
public void testGetSelectedRecordTemplateFromUnion() throws IOException {
AspectBaz baz = new AspectBaz();
baz.setUnionField(new AspectBaz.UnionField());
baz.getUnionField().setAspectFoo(new AspectFoo().setValue("foo"));
RecordTemplate selected = RecordUtils.getSelectedRecordTemplateFromUnion(baz.getUnionField());
assertEquals(selected.getClass(), AspectFoo.class);
}
@Test
public void testSetSelectedRecordTemplateInUnion() throws IOException {
AspectBaz baz = new AspectBaz();
baz.setUnionField(new AspectBaz.UnionField());
AspectFoo expected = new AspectFoo().setValue("foo");
RecordUtils.setSelectedRecordTemplateInUnion(baz.getUnionField(), expected);
assertEquals(baz.getUnionField().getAspectFoo(), expected);
}
@Test
public void testGetValidMetadataSnapshotClassFromName() {
Class<? extends RecordTemplate> actualClass =
ModelUtils.getMetadataSnapshotClassFromName(EntitySnapshot.class.getCanonicalName());
assertEquals(actualClass, EntitySnapshot.class);
}
@Test(expectedExceptions = InvalidSchemaException.class)
public void testGetInvalidMetadataSnapshotClassFromName() {
ModelUtils.getMetadataSnapshotClassFromName(AspectInvalid.class.getCanonicalName());
}
@Test
public void testExtractAspectFromSingleAspectEntity() {
String field1 = "foo";
EntityValue value = new EntityValue();
value.setValue(field1);
AspectBar aspect = new AspectBar();
aspect.setValue(field1);
assertEquals(RecordUtils.extractAspectFromSingleAspectEntity(value, AspectBar.class), aspect);
}
@Test(description = "Test getFieldValue() when RecordTemplate has primitive fields")
public void testGetFieldValuePrimitive() {
// case 1: string field set, bool field isn't set, default field should return default value
final MixedRecord mixedRecord1 = new MixedRecord().setValue("fooVal1");
PathSpec ps1 = MixedRecord.fields().value();
PathSpec ps2 = MixedRecord.fields().flag();
PathSpec ps3 = MixedRecord.fields().defaultField();
Optional<Object> o1 = RecordUtils.getFieldValue(mixedRecord1, ps1);
Optional<Object> o2 = RecordUtils.getFieldValue(mixedRecord1, ps2);
Optional<Object> o3 = RecordUtils.getFieldValue(mixedRecord1, ps3);
assertEquals(o1.get(), "fooVal1");
assertFalse(o2.isPresent());
assertEquals(o3.get(), "defaultVal");
assertEquals(ps1.toString(), "/value");
assertEquals(ps2.toString(), "/flag");
assertEquals(ps3.toString(), "/defaultField");
// case 2: string and bool field both set
final MixedRecord mixedRecord2 = new MixedRecord().setValue("fooVal2").setFlag(true);
Object o4 = RecordUtils.getFieldValue(mixedRecord2, MixedRecord.fields().value()).get();
Object o5 = RecordUtils.getFieldValue(mixedRecord2, MixedRecord.fields().flag()).get();
assertEquals(o4, "fooVal2");
assertEquals(o5, true);
// case 3: similar to case1, just that pegasus path as string is used as input
Object o6 = RecordUtils.getFieldValue(mixedRecord1, "/value");
assertEquals(o6, o1);
}
@Test(description = "Test getFieldValue() when RecordTemplate has TypeRef field")
public void testGetFieldValueTypeRef() {
// case 1: Urn as the TypeRef
FooUrn urn = makeFooUrn(1);
final MixedRecord mixedRecord1 = new MixedRecord().setFooUrn(urn);
PathSpec ps1 = MixedRecord.fields().fooUrn();
Object o1 = RecordUtils.getFieldValue(mixedRecord1, ps1).get();
assertEquals(o1, urn);
assertEquals(ps1.toString(), "/fooUrn");
// case 2: TypeRef defined in the same pdl
final MixedRecord mixedRecord2 = new MixedRecord().setIntTypeRef(2);
PathSpec ps2 = MixedRecord.fields().intTypeRef();
Object o2 = RecordUtils.getFieldValue(mixedRecord2, ps2).get();
assertEquals(o2, 2);
assertEquals(ps2.toString(), "/intTypeRef");
// case 3: TypeRef for Record field reference
AspectFoo aspectFoo = new AspectFoo().setValue("fooVal");
PathSpec ps3 = MixedRecord.fields().recordTypeRef().value();
final MixedRecord mixedRecord3 = new MixedRecord().setRecordTypeRef(aspectFoo);
Object o3 = RecordUtils.getFieldValue(mixedRecord3, ps3).get();
assertEquals(o3, "fooVal");
assertEquals(ps3.toString(), "/recordTypeRef/value");
}
@Test(description = "Test getFieldValue() when RecordTemplate has another field of Record type")
public void testGetFieldValueRecordType() {
// case 1: referencing a field inside a RecordTemplate, one level deep.
AspectFoo foo1 = new AspectFoo().setValue("fooVal1");
MixedRecord mixedRecord1 = new MixedRecord().setRecordField(foo1);
PathSpec ps1f1 = MixedRecord.fields().recordField().value();
PathSpec ps1f2 =
MixedRecord.fields().nestedRecordField().foo().value(); // referencing a nullable record template field
Optional<Object> o1f1 = RecordUtils.getFieldValue(mixedRecord1, ps1f1);
Optional<Object> o1f2 = RecordUtils.getFieldValue(mixedRecord1, ps1f2);
assertEquals(o1f1.get(), "fooVal1");
assertEquals(ps1f1.toString(), "/recordField/value");
assertFalse(o1f2.isPresent());
assertEquals(ps1f2.toString(), "/nestedRecordField/foo/value");
// case 2: referencing a field inside a RecordTemplate, two levels deep i.e. nested field
AspectFoo foo2 = new AspectFoo().setValue("fooVal2");
com.datahub.test.testing.EntityValue entityValue = new com.datahub.test.testing.EntityValue().setFoo(foo2);
MixedRecord mixedRecord2 = new MixedRecord().setNestedRecordField(entityValue);
PathSpec ps2 = MixedRecord.fields().nestedRecordField().foo().value();
Object o2 = RecordUtils.getFieldValue(mixedRecord2, ps2).get();
assertEquals(o2, "fooVal2");
assertEquals(ps2.toString(), "/nestedRecordField/foo/value");
}
@Test(description = "Test getFieldValue() when RecordTemplate has field of type array")
public void testGetFieldValueArray() {
// case 1: array of strings
final MixedRecord mixedRecord1 =
new MixedRecord().setStringArray(new StringArray(Arrays.asList("val1", "val2", "val3", "val4")));
PathSpec ps1 = MixedRecord.fields().stringArray();
Object o1 = RecordUtils.getFieldValue(mixedRecord1, ps1).get();
assertEquals(o1, new StringArray(Arrays.asList("val1", "val2", "val3", "val4")));
assertEquals(ps1.toString(), "/stringArray");
// case 2: wildcard on array of records
AspectFoo aspectFoo1 = new AspectFoo().setValue("fooVal1");
AspectFoo aspectFoo2 = new AspectFoo().setValue("fooVal2");
AspectFoo aspectFoo3 = new AspectFoo().setValue("fooVal3");
AspectFoo aspectFoo4 = new AspectFoo().setValue("fooVal4");
final AspectFooArray aspectFooArray =
new AspectFooArray(Arrays.asList(aspectFoo1, aspectFoo2, aspectFoo3, aspectFoo4));
final MixedRecord mixedRecord2 = new MixedRecord().setRecordArray(aspectFooArray);
PathSpec ps2 = MixedRecord.fields().recordArray().items().value();
Object o2 = RecordUtils.getFieldValue(mixedRecord2, ps2).get();
assertEquals(o2, new StringArray(Arrays.asList("fooVal1", "fooVal2", "fooVal3", "fooVal4")));
assertEquals(ps2.toString(), "/recordArray/*/value");
// case 3: array of records is empty
final MixedRecord mixedRecord3 = new MixedRecord().setRecordArray(new AspectFooArray());
Object o3 = RecordUtils.getFieldValue(mixedRecord3, MixedRecord.fields().recordArray().items().value()).get();
assertEquals(o3, new StringArray());
// case 4: referencing an index of array is not supported
final MixedRecord mixedRecord4 = new MixedRecord().setRecordArray(aspectFooArray);
assertThrows(UnsupportedOperationException.class,
() -> RecordUtils.getFieldValue(mixedRecord4, "/recordArray/0/value"));
// case 5: referencing nested field inside array of records, field being 2 levels deep
AspectFoo f1 = new AspectFoo().setValue("val1");
AspectFoo f2 = new AspectFoo().setValue("val2");
com.datahub.test.testing.EntityValue val1 = new com.datahub.test.testing.EntityValue().setFoo(f1);
com.datahub.test.testing.EntityValue val2 = new com.datahub.test.testing.EntityValue().setFoo(f2);
EntityValueArray entityValues = new EntityValueArray(Arrays.asList(val1, val2));
final MixedRecord mixedRecord5 = new MixedRecord().setNestedRecordArray(entityValues);
PathSpec psFoo5 = MixedRecord.fields().nestedRecordArray().items().foo().value();
PathSpec psBar5 = MixedRecord.fields().nestedRecordArray().items().bar().value();
Optional<Object> oFoo5 = RecordUtils.getFieldValue(mixedRecord5, psFoo5);
Optional<Object> oBar5 = RecordUtils.getFieldValue(mixedRecord5, psBar5);
assertEquals(oFoo5.get(), new StringArray("val1", "val2"));
assertEquals(psFoo5.toString(), "/nestedRecordArray/*/foo/value");
assertEquals(oBar5.get(), new StringArray());
assertEquals(psBar5.toString(), "/nestedRecordArray/*/bar/value");
// case 6: optional field containing array of strings is not set
final MixedRecord mixedRecord6 = new MixedRecord();
PathSpec ps6 = MixedRecord.fields().stringArray();
Optional<Object> o6 = RecordUtils.getFieldValue(mixedRecord6, ps6);
assertFalse(o6.isPresent());
// case 7: optional field containing array of records is not set
final MixedRecord mixedRecord7 = new MixedRecord();
PathSpec ps7 = MixedRecord.fields().recordArray().items().value();
Optional<Object> o7 = RecordUtils.getFieldValue(mixedRecord7, ps7);
assertFalse(o7.isPresent());
}
@Test(description = "Test getFieldValue() when RecordTemplate has field of type array of primitive unions")
public void testGetFieldValueArrayOfPrimitiveUnions() {
// case 1: array of unions of strings
final MixedRecord mixedRecord1 =
new MixedRecord().setUnionArray(new StringUnionArray(Arrays.asList(
StringUnion.create("val1"),
StringUnion.create("val2"),
StringUnion.create("val3"),
StringUnion.create("val4")
)));
PathSpec ps1 = MixedRecord.fields().unionArray();
Object o1 = RecordUtils.getFieldValue(mixedRecord1, ps1).get();
PathSpec ps2 = MixedRecord.fields().unionArray().items();
Object o2 = RecordUtils.getFieldValue(mixedRecord1, ps2).get();
assertEquals(o1, new StringUnionArray(Arrays.asList(
StringUnion.create("val1"),
StringUnion.create("val2"),
StringUnion.create("val3"),
StringUnion.create("val4")
)));
assertEquals(ps1.toString(), "/unionArray");
assertEquals(o2, new StringUnionArray(Arrays.asList(
StringUnion.create("val1"),
StringUnion.create("val2"),
StringUnion.create("val3"),
StringUnion.create("val4")
)));
assertEquals(ps2.toString(), "/unionArray/*");
}
@Test
public void testCapitalizeFirst() {
String s = "field1";
assertEquals(RecordUtils.capitalizeFirst(s), "Field1");
s = "t";
assertEquals(RecordUtils.capitalizeFirst(s), "T");
s = "";
assertEquals(RecordUtils.capitalizeFirst(s), "");
}
private AspectBaz loadAspectBaz(String resourceName) throws IOException {
return RecordUtils.toRecordTemplate(AspectBaz.class,
IOUtils.toString(ClassLoader.getSystemResourceAsStream(resourceName), StandardCharsets.UTF_8));
}
}
|
apache-2.0
|
freeVM/freeVM
|
enhanced/buildtest/tests/vts/vm/src/test/vm/jvmti/funcs/GetErrorName/GetErrorName0102/GetErrorName0102.java
|
877
|
/*
Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.harmony.vts.test.vm.jvmti;
/**
* @author Valentin Al. Sitnick
* @version $Revision: 1.1 $
*
*/
public class GetErrorName0102 {
static public void main(String args[]) {
return;
}
}
|
apache-2.0
|
googleads/google-ads-perl
|
examples/billing/add_account_budget_proposal.pl
|
5283
|
#!/usr/bin/perl -w
#
# Copyright 2019, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example creates an account budget proposal using the 'CREATE' operation.
# To get account budget proposals, run get_account_budget_proposals.pl.
use strict;
use warnings;
use utf8;
use FindBin qw($Bin);
use lib "$Bin/../../lib";
use Google::Ads::GoogleAds::Client;
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
use Google::Ads::GoogleAds::V10::Resources::AccountBudgetProposal;
use Google::Ads::GoogleAds::V10::Enums::AccountBudgetProposalTypeEnum
qw(CREATE);
use Google::Ads::GoogleAds::V10::Enums::TimeTypeEnum qw(NOW FOREVER);
use
Google::Ads::GoogleAds::V10::Services::AccountBudgetProposalService::AccountBudgetProposalOperation;
use Google::Ads::GoogleAds::V10::Utils::ResourceNames;
use Getopt::Long qw(:config auto_help);
use Pod::Usage;
use Cwd qw(abs_path);
# The following parameter(s) should be provided to run the example. You can
# either specify these by changing the INSERT_XXX_ID_HERE values below, or on
# the command line.
#
# Parameters passed on the command line will override any parameters set in
# code.
#
# Running the example with -h will print the command line usage.
my $customer_id = "INSERT_CUSTOMER_ID_HERE";
my $billing_setup_id = "INSERT_BILLING_SETUP_ID_HERE";
# [START add_account_budget_proposal]
sub add_account_budget_proposal {
my ($api_client, $customer_id, $billing_setup_id) = @_;
# Create an account budget proposal.
my $account_budget_proposal =
Google::Ads::GoogleAds::V10::Resources::AccountBudgetProposal->new({
billingSetup =>
Google::Ads::GoogleAds::V10::Utils::ResourceNames::billing_setup(
$customer_id, $billing_setup_id
),
proposalType => CREATE,
proposedName => "Account Budget (example)",
# Specify that the account budget starts immediately.
proposedStartTimeType => NOW,
# Alternatively you can specify a specific start time. Refer to the
# AccountBudgetProposal class for allowed formats.
#
# proposedStartDateTime => "2020-01-02 03:04:05",
# Specify that the account budget runs forever.
proposedEndDateTime => FOREVER,
# Alternatively you can specify a specific end time. Allowed formats are as below.
# proposedEndDateTime => "2021-02-03 04:05:06",
# Optional: set notes for the budget. These are free text and do not effect budget
# delivery.
# proposedNotes => "Received prepayment of $0.01",
# Optional: set PO number for record keeping. This value is at the user's
# discretion, and has no effect on Google Billing & Payments.
# proposedPurchaseOrderNumber => "PO number 12345",
# Set the spending limit to 0.01, measured in the Google Ads account currency.
proposedSpendingLimitMicros => 10000
});
# Create an account budget proposal operation.
my $account_budget_proposal_operation =
Google::Ads::GoogleAds::V10::Services::AccountBudgetProposalService::AccountBudgetProposalOperation
->new({
create => $account_budget_proposal
});
# Add the account budget proposal.
my $account_budget_proposal_response =
$api_client->AccountBudgetProposalService()->mutate({
customerId => $customer_id,
operation => $account_budget_proposal_operation
});
printf "Created account budget proposal '%s'.\n",
$account_budget_proposal_response->{result}{resourceName};
return 1;
}
# [END add_account_budget_proposal]
# Don't run the example if the file is being included.
if (abs_path($0) ne abs_path(__FILE__)) {
return 1;
}
# Get Google Ads Client, credentials will be read from ~/googleads.properties.
my $api_client = Google::Ads::GoogleAds::Client->new();
# By default examples are set to die on any server returned fault.
$api_client->set_die_on_faults(1);
# Parameters passed on the command line will override any parameters set in code.
GetOptions(
"customer_id=s" => \$customer_id,
"billing_setup_id=i" => \$billing_setup_id,
);
# Print the help message if the parameters are not initialized in the code nor
# in the command line.
pod2usage(2) if not check_params($customer_id, $billing_setup_id);
# Call the example.
add_account_budget_proposal($api_client, $customer_id =~ s/-//gr,
$billing_setup_id);
=pod
=head1 NAME
add_account_budget_proposal
=head1 DESCRIPTION
This example creates an account budget proposal using the 'CREATE' operation. To get
account budget proposals, run get_account_budget_proposals.pl.
=head1 SYNOPSIS
add_account_budget_proposal.pl [options]
-help Show the help message.
-customer_id The Google Ads customer ID.
-billing_setup_id The billing setup ID.
=cut
|
apache-2.0
|
veltri/DLV2
|
tests/parser/grounding.7.test.py
|
559
|
input = """
supp(B) :- on(B,table,0).
supp(B) :- on(B,B1,0), supp(B1).
on(b0,table,0) :- true.
on(b1,b0,0) :- true.
on(B,L,0) | -on(B,L,0) :- block(B), location(L).
true.
location(L) :- block(L).
location(table) :- true.
block(b0).
block(b1).
block(b2).
"""
output = """
supp(B) :- on(B,table,0).
supp(B) :- on(B,B1,0), supp(B1).
on(b0,table,0) :- true.
on(b1,b0,0) :- true.
on(B,L,0) | -on(B,L,0) :- block(B), location(L).
true.
location(L) :- block(L).
location(table) :- true.
block(b0).
block(b1).
block(b2).
"""
|
apache-2.0
|
nkeddie/draft-it
|
DraftIt.Web/Angular/Views/createDraft.html
|
1277
|
<form class="form-horizontal" ng-submit="create()">
<fieldset>
<legend>Create Draft</legend>
<div class="form-group">
<label class="col-lg-2" for="inputId">Id</label>
<div class="col-lg-10">
<input type="text" class="form-control" id="inputId" ng-model="id"/>
</div>
</div>
<div class="form-group">
<label class="col-lg-2" for="inputName">Name</label>
<div class="col-lg-10">
<input type="text" class="form-control" id="inputName" ng-model="name"/>
</div>
</div>
<div class="form-group">
<label class="col-lg-2" for="inputTemplate">Template</label>
<div class="col-lg-10">
<select ng-model="template">
<option ng-repeat="template in templates" value="{{template.id}}">{{template.name}}</option>
</select>
<a class="btn btn-link" ui-sref="createTemplate">Add template</a>
</div>
</div>
<div class="form-group">
<div class="col-lg-10 col-lg-offset-2">
<button class="btn btn-primary" type="submit">Submit</button>
</div>
</div>
</fieldset>
</form>
|
apache-2.0
|
cassandra-project/disaggregation
|
src/eu/cassandra/utils/Utils.java
|
26163
|
/*
Copyright 2011-2013 The Cassandra Consortium (cassandra-fp7.eu)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eu.cassandra.utils;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Map;
import java.util.Scanner;
import java.util.TreeMap;
import org.apache.log4j.Logger;
import org.jfree.chart.ChartFactory;
import org.jfree.chart.ChartUtilities;
import org.jfree.chart.JFreeChart;
import org.jfree.chart.plot.PlotOrientation;
import org.jfree.data.xy.XYSeries;
import org.jfree.data.xy.XYSeriesCollection;
import weka.clusterers.SimpleKMeans;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.AddCluster;
import eu.cassandra.appliance.Appliance;
import eu.cassandra.event.Event;
/**
* This class contains static functions that are used for general purposes
* throughout the Disaggregation Module.
*
* @author Antonios Chrysopoulos
* @version 0.9, Date: 29.07.2013
*/
public class Utils
{
static Logger log = Logger.getLogger(Utils.class);
/** Loading a library for integer programming. */
static {
System.loadLibrary("jniconstraintsolver");
}
/**
* This function is estimating the absolute euclidean distance of the active
* and reactive power vector distance of two points of interest in the form of
* arrays.
*
* @param a1
* The first array of values
* @param a2
* The second array of values
*
* @return the estimated absolute euclidean distance.
*/
public static double absoluteEuclideanDistance (double[] a1, double[] a2)
{
return Math.sqrt(Math.pow(a1[0] - a2[0], 2) + Math.pow(a1[1] - a2[1], 2));
}
/**
* This function is estimating the percentage euclidean distance of the active
* and reactive power vector distance of two points of interest in the form of
* arrays.
*
* @param a1
* The first array of values
*
* @param a2
* The second array of values
*
* @return the estimated percentage euclidean distance.
*/
public static double percentageEuclideanDistance (double[] a1, double[] a2)
{
return 100
* Math.sqrt(Math.pow(a1[0] - a2[0], 2) + Math.pow(a1[1] - a2[1], 2))
/ norm(a1);
}
/**
* This function is estimating the euclidean length (or norm) of an array of
* two values
*
* @param poi
* The point of interest's array of values
* @return the euclidean length of the array.
*/
public static double norm (double[] poi)
{
return Math.sqrt(Math.pow(poi[0], 2) + Math.pow(poi[1], 2));
}
/**
* This function is used in order to check if a certain appliance is within
* the permitted limits
*
* @param trueValue
* The value under examination
* @param limit
* The limit value that is used as threshold
* @return true if it is within limits, false otherwise
*/
public static boolean checkLimit (double trueValue, double limit)
{
double lowerLimit = (1 - Constants.ERROR_FRINGE) * limit;
return (trueValue > lowerLimit);
}
/**
* This function is used in order to check if a certain appliance is within
* the permitted limits
*
* @param trueValue
* The value under examination
* @param limit
* The limit value that is used as threshold
* @return true if it is within limits, false otherwise
*/
public static boolean checkLimitFridge (double trueValue, double limit)
{
double lowerLimit = 0, upperLimit = 0;
if (Constants.REF_LOOSE_COUPLING) {
lowerLimit = Constants.REF_DURATION_FRINGE - limit;
upperLimit = Constants.REF_DURATION_FRINGE + limit;
}
else {
lowerLimit = (1 - Constants.STRICT_REF_DURATION_FRINGE) * limit;
upperLimit = (1 + Constants.STRICT_REF_DURATION_FRINGE) * limit;
}
log.debug("True Value: " + trueValue + " Limit: " + limit + " UpperLimit: "
+ upperLimit + " Lower Limit: " + lowerLimit);
return (trueValue < upperLimit && trueValue > lowerLimit);
}
/**
* This function is used in order to check if a certain appliance is within
* the permitted limits
*
* @param trueValue
* The value under examination
* @param limit
* The limit value that is used as threshold
* @return true if it is within limits, false otherwise
*/
public static double pairingLimit (double trueValue)
{
double upperLimit = (1 + Constants.PAIR_ERROR_FRINGE) * trueValue;
return upperLimit;
}
/**
* This function is used for the detection of reduction points following a
* rising point, so that there is a possibility they can be connected as a
* pair
*
* @param index
* The index of the rising point of interest.
* @param pois
* The list of points of interest under examination.
* @return an array of indices that contain possible combinatorial reduction
* points of interest.
*/
public static Integer[] findRedPoints (int index,
ArrayList<PointOfInterest> pois)
{
ArrayList<Integer> temp = new ArrayList<Integer>();
double limit = pairingLimit(pois.get(index).getPDiff());
double timeLimit = Double.POSITIVE_INFINITY;
if (Constants.SIMPLE_TIME_COMPLEXITY == true)
timeLimit = pois.get(index).getMinute() + Constants.TEMPORAL_THRESHOLD;
for (int i = index + 1; i < pois.size(); i++)
if (pois.get(i).getRising() == false
&& pois.get(i).getMinute() <= timeLimit
&& limit > -pois.get(i).getPDiff())
temp.add(i);
Integer[] result = new Integer[temp.size()];
for (int i = 0; i < temp.size(); i++)
result[i] = temp.get(i);
return result;
}
/**
* This function is used for the detection of rising points preceding a
* reduction point, so that there is a possibility they can be connected as a
* pair
*
* @param index
* The index of the reduction point of interest.
* @param pois
* The list of points of interest under examination.
* @return an array of indices that contain possible combinatorial rising
* points of interest.
*/
public static Integer[] findRisPoints (int index,
ArrayList<PointOfInterest> pois)
{
ArrayList<Integer> temp = new ArrayList<Integer>();
double limit = -pairingLimit(pois.get(index).getPDiff());
double timeLimit = Double.NEGATIVE_INFINITY;
if (Constants.SIMPLE_TIME_COMPLEXITY == true)
timeLimit = pois.get(index).getMinute() - Constants.TEMPORAL_THRESHOLD;
for (int i = 0; i < index; i++)
if (pois.get(i).getRising() && pois.get(i).getMinute() >= timeLimit
&& limit > pois.get(i).getPDiff())
temp.add(i);
Integer[] result = new Integer[temp.size()];
for (int i = 0; i < temp.size(); i++)
result[i] = temp.get(i);
return result;
}
/**
* This is an auxiliary function used to estimate the mean values of a pair of
* points of interest.
*
* @param pois
* The pair of points of interest under examination.
* @return an array of the mean values of active and reactive power.
*/
public static double[] meanValues (PointOfInterest[] pois)
{
double[] result =
{ (pois[0].getPDiff() - pois[1].getPDiff()) / 2,
(pois[0].getQDiff() - pois[1].getQDiff()) / 2 };
return result;
}
/**
* This function is used for the creation of final matching pairs of points of
* interest from the solutions that the integer programming solver has
* provided.
*
* @param pois
* The list of points of interest under examination.
* @param array
* An array of 0-1 that shows which points of interest are included
* in the solution.
* @return a list of pairs of points of interest.
*/
public static ArrayList<PointOfInterest[]>
createFinalPairs (ArrayList<PointOfInterest> pois, int[] array)
{
// Initializing the auxiliary variables.
ArrayList<PointOfInterest[]> result = new ArrayList<PointOfInterest[]>();
ArrayList<PointOfInterest> rising = new ArrayList<PointOfInterest>();
ArrayList<PointOfInterest> reduction = new ArrayList<PointOfInterest>();
// For all the points if the are 1 are included in the solution
for (int i = 0; i < array.length; i++) {
if (array[i] == 1) {
if (pois.get(i).getRising())
rising.add(pois.get(i));
else
reduction.add(pois.get(i));
}
}
// If there are one of each point types.
if (rising.size() == 1 && reduction.size() == 1) {
PointOfInterest[] temp = { rising.get(0), reduction.get(0) };
result.add(temp);
}
// If there is only one rising
else if (rising.size() == 1) {
for (PointOfInterest red: reduction) {
PointOfInterest start =
new PointOfInterest(rising.get(0).getMinute(), true, -red.getPDiff(),
-red.getQDiff());
PointOfInterest[] temp = { start, red };
result.add(temp);
}
}
// If there is only one reduction
else {
for (PointOfInterest rise: rising) {
PointOfInterest end =
new PointOfInterest(reduction.get(0).getMinute(), false,
-rise.getPDiff(), -rise.getQDiff());
PointOfInterest[] temp = { rise, end };
result.add(temp);
}
}
return result;
}
/**
* This function is used to extract the file name from a path of a file,
* excluding the file extension.
*
* @param filename
* The full name and path of the file of interest.
* @return The name of the file without the file extension.
*/
public static String getFileName (String filename)
{
return filename.substring(0, filename.length() - 4);
}
/**
* This function is used in order to create clusters of points of interest
* based on the active power difference they have.
*
* @param pois
* The list of points of interest that will be clustered.
* @return The newly created clusters with the points that are comprising
* them.
* @throws Exception
*/
public static ArrayList<ArrayList<PointOfInterest>>
clusterPoints (ArrayList<PointOfInterest> pois, int bias) throws Exception
{
// Initialize the auxiliary variables
ArrayList<ArrayList<PointOfInterest>> result =
new ArrayList<ArrayList<PointOfInterest>>();
// Estimating the number of clusters that will be created
int numberOfClusters =
(int) (Math.ceil((double) pois.size()
/ (double) Constants.MAX_POINTS_OF_INTEREST))
+ bias;
log.info("Clusters: " + pois.size() + " / "
+ Constants.MAX_POINTS_OF_INTEREST + " + " + bias + " = "
+ numberOfClusters);
// Create a new empty list of points for each cluster
for (int i = 0; i < numberOfClusters; i++)
result.add(new ArrayList<PointOfInterest>());
// Initializing auxiliary variables namely the attributes of the data set
Attribute id = new Attribute("id");
Attribute pDiffRise = new Attribute("pDiff");
ArrayList<Attribute> attr = new ArrayList<Attribute>();
attr.add(id);
attr.add(pDiffRise);
Instances instances = new Instances("Points of Interest", attr, 0);
// Each event is translated to an instance with the above attributes
for (int i = 0; i < pois.size(); i++) {
Instance inst = new DenseInstance(2);
inst.setValue(id, i);
inst.setValue(pDiffRise, Math.abs(pois.get(i).getPDiff()));
instances.add(inst);
}
// System.out.println(instances.toString());
Instances newInst = null;
log.debug("Instances: " + instances.toSummaryString());
// Create the addcluster filter of Weka and the set up the hierarchical
// clusterer.
AddCluster addcluster = new AddCluster();
SimpleKMeans kmeans = new SimpleKMeans();
kmeans.setSeed(numberOfClusters);
// This is the important parameter to set
kmeans.setPreserveInstancesOrder(true);
kmeans.setNumClusters(numberOfClusters);
kmeans.buildClusterer(instances);
addcluster.setClusterer(kmeans);
addcluster.setInputFormat(instances);
addcluster.setIgnoredAttributeIndices("1");
// Cluster data set
newInst = Filter.useFilter(instances, addcluster);
// System.out.println(newInst.toString());
// Parse through the dataset to see where each point is placed in the
// clusters.
for (int i = 0; i < newInst.size(); i++) {
String cluster = newInst.get(i).stringValue(newInst.attribute(2));
cluster = cluster.replace("cluster", "");
log.debug("Point of Interest: " + i + " Cluster: " + cluster);
result.get(Integer.parseInt(cluster) - 1).add(pois.get(i));
}
// Sorting the each cluster points by their minutes.
for (int i = result.size() - 1; i >= 0; i--) {
if (result.get(i).size() == 0)
result.remove(i);
else
Collections.sort(result.get(i), Constants.comp);
}
// Sorting the all clusters by their active power.
Collections.sort(result, Constants.comp5);
return result;
}
/**
* This function is utilized for the extraction of the points that are not
* combined with other ones in order to create the final pairs of operation.
*
* @param pois
* The list of all the points of interest.
* @param solution
* This is the list that contains the solution vectors for the points
* of interest.
* @param solutionArray
* This array contains the indices of the points of interest
* participating in each solution.
* @return
*/
public static ArrayList<PointOfInterest>
extractRemainingPoints (ArrayList<PointOfInterest> pois,
ArrayList<Integer> solution, int[][] solutionArray)
{
ArrayList<PointOfInterest> result = new ArrayList<PointOfInterest>();
int[] tempArray = new int[solutionArray[0].length];
for (Integer index: solution)
for (int i = 0; i < solutionArray[index].length; i++)
if (solutionArray[index][i] == 1)
tempArray[i] = 1;
// System.out.println("TempArray:" + Arrays.toString(tempArray));
for (int i = 0; i < tempArray.length; i++)
if (tempArray[i] == 0)
result.add(pois.get(i));
if (result.size() == 0)
result = null;
return result;
}
/**
* This function is used to remove the smallest points of interest from a list
* in order to make its size viable to estimate the pairs.
*
* @param pois
* The list of points of interest.
* @return The list of points of interest with a percentage of the points
* removed.
*/
public static ArrayList<PointOfInterest>
removePoints (ArrayList<PointOfInterest> pois)
{
ArrayList<PointOfInterest> result = new ArrayList<PointOfInterest>();
int number = pois.size() - Constants.REMOVAL_MAX_POINTS;
log.debug("Initial Size: " + pois.size() + " Removing: " + number);
Collections.sort(pois, Constants.comp4);
log.debug("Initial POIS: " + pois.toString());
Collections.sort(result, Constants.comp4);
for (int i = 0; i < number; i++)
result.add(pois.remove(pois.size() - 1));
log.debug("Removed POIS: " + result.toString());
return result;
}
/**
* This function is used in order to find the maximum value from an array.
*
* @param matrix
* @return
*/
public static double findMax (double[] matrix)
{
double result = Double.NEGATIVE_INFINITY;
for (int i = 0; i < matrix.length; i++)
if (result < matrix[i])
result = matrix[i];
return result;
}
/**
* This function is used in order to find the maximum value from an array.
*
* @param matrix
* @return
*/
public static double findMax (ArrayList<Double> matrix)
{
double result = Double.NEGATIVE_INFINITY;
for (int i = 0; i < matrix.size(); i++)
if (result < matrix.get(i))
result = matrix.get(i);
return result;
}
/**
* This function is used when the user has already tracked the electrical
* appliances installed in the installation. He can used them as a base case
* and extend it with any additional ones that may be found during the later
* stages of analysis of the consumption.
*
* @param filename
* The filename of the file containing the appliances.
* @return
* A list of appliances
* @throws FileNotFoundException
*/
public static ArrayList<Appliance> appliancesFromFile (String filename)
throws FileNotFoundException
{
// Read appliance file and start appliance parsing
File file = new File(filename);
Scanner input = new Scanner(file);
ArrayList<Appliance> appliances = new ArrayList<Appliance>();
String nextLine;
String[] line;
while (input.hasNextLine()) {
nextLine = input.nextLine();
line = nextLine.split(",");
String name = line[0];
String activity = line[1];
if (activity.contains("Standby") == false
&& activity.contains("Refrigeration") == false) {
double p = Double.parseDouble(line[2]);
double q = Double.parseDouble(line[3]);
// For each appliance found in the file, an temporary Appliance
// Entity is created.
appliances.add(new Appliance(name, activity, p, q, 0, 100));
}
}
System.out.println("Appliances:" + appliances.size());
input.close();
return appliances;
}
/**
* This is an auxiliary function used to check if the distance in time and
* space of a pair is close to this appliance, meaning that it belongs to this
* appliance.
*
* @param mean
* The mean active and reactive power measurements.
* @param duration
* The duration of the end-use.
* @param metrics
* The metrics that are the base level
* @return true if it is close, false otherwise.
*/
public static boolean isCloseRef (double[] mean, int duration,
double[] metrics)
{
double[] meanValues = { metrics[0], metrics[1] };
return ((Utils.percentageEuclideanDistance(mean, meanValues) < Constants.REF_THRESHOLD) && (Utils
.checkLimitFridge(duration, metrics[2])));
}
/**
* This function is called when the temporary files must be removed from the
* temporary folder used to store the csv and xls used to create the entity
* models during the procedure of training and disaggregation. It is done when
* the program starts, when the program ends and when the reset button is
* pressed by the user.
*/
public static void cleanFiles ()
{
File directory = new File("TempFiles");
File files[] = directory.listFiles();
String extension = "";
for (int index = 0; index < files.length; index++) {
{
extension =
files[index].getAbsolutePath().substring(files[index]
.getAbsolutePath()
.length() - 3,
files[index]
.getAbsolutePath()
.length());
if (extension.equalsIgnoreCase("csv")) {
boolean wasDeleted = files[index].delete();
if (!wasDeleted) {
System.out.println("Not Deleted File " + files[index].toString());
}
}
}
}
}
public static double estimateThreshold (double[] power, boolean median)
{
double result = 0;
ArrayList<Double> minimums = new ArrayList<Double>();
double min = Double.POSITIVE_INFINITY;
for (int i = 0; i < power.length; i++) {
if (min > power[i])
min = power[i];
if (i % 1440 == 0 && i != 0) {
minimums.add(min);
min = Double.POSITIVE_INFINITY;
}
}
if (minimums.size() == 0)
minimums.add(min);
log.debug("================THRESHOLD SETTING================");
log.debug("Minimums: " + minimums.toString());
log.debug("Median:" + median);
if (median)
result = Utils.estimateMedian(minimums);
else
result = Utils.estimateMean(minimums);
log.debug("Resulting threshold: " + result);
log.debug("");
log.debug("");
return result;
}
public static double estimateMedian (ArrayList<Double> values)
{
double result = 0.0;
int index = -1;
Collections.sort(values);
log.info("Values: " + values);
if (values.size() == 2)
index = 0;
else
index = values.size() / 2;
if (values.size() % 2 == 0)
result = (values.get(index) + values.get(index + 1)) / 2;
else
result = values.get(index);
log.info("Result:" + result);
return result;
}
public static double estimateMean (ArrayList<Double> values)
{
double result = 0.0;
double sum = 0.0;
for (double minimum: values)
sum += minimum;
result = sum / values.size();
return result;
}
public static double estimateStd (ArrayList<Double> values, double mean)
{
double result = 0.0;
double sum = 0;
for (double value: values)
sum += Math.pow((value - mean), 2);
sum /= values.size();
result = Math.sqrt(sum);
return result;
}
/**
* This is an auxiliary function used for checking if all the points of
* interest are of the same type.
*
* @param pois
* A list of points of interest
* @return true if they are all of the same type, false otherwise.
*/
public static boolean allSamePoints (ArrayList<PointOfInterest> pois)
{
// Initializing the auxiliary variables
boolean flag = true;
boolean start = pois.get(0).getRising();
for (PointOfInterest poi: pois)
if (start != poi.getRising()) {
flag = false;
break;
}
return flag;
}
public static double[] normalizeReactive (Event event)
{
double[] result = new double[event.getReactivePowerConsumptions().length];
return result;
}
/**
* This function is used for the visualization of a Line Diagram.
*
* @param title
* The title of the chart.
* @param x
* The unit on the X axis of the chart.
* @param y
* The unit on the Y axis of the chart.
* @param data
* The array of values.
* @return a chart panel with the graphical representation.
*/
public static void createLineDiagram (String title, String x, String y,
ArrayList<Double> data)
{
XYSeries series1 = new XYSeries("Active Power");
for (int i = 0; i < data.size(); i++) {
series1.add(i, data.get(i));
}
XYSeriesCollection dataset = new XYSeriesCollection();
dataset.addSeries(series1);
PlotOrientation orientation = PlotOrientation.VERTICAL;
boolean show = true;
boolean toolTips = false;
boolean urls = false;
JFreeChart chart =
ChartFactory.createXYLineChart(title, x, y, dataset, orientation, show,
toolTips, urls);
int width = 1024;
int height = 768;
try {
ChartUtilities.saveChartAsPNG(new File(Constants.chartFolder + title
+ ".PNG"), chart, width, height);
}
catch (IOException e) {
}
}
public static double countPoints (int[] points)
{
int counter = 0;
for (int i = 0; i < points.length; i++)
if (points[i] == 1)
counter++;
return counter;
}
public static void durationCheck (ArrayList<Event> events)
{
log.info("====================DURATIONS========================");
ArrayList<Integer> durations = new ArrayList<Integer>();
int start = -1, end = -1, counter = 0;
int duration = -1;
for (Event event: events) {
start = event.getStartMinute();
end = event.getEndMinute();
duration = end - start;
if (duration > Constants.MINUTES_PER_DAY) {
counter++;
log.info("Start:" + +start + " End: " + end + " Duration:" + duration);
}
durations.add(duration);
}
Collections.sort(durations);
log.info("Durations:" + durations.toString());
log.info("Events over a day: " + counter);
}
public static void
removePoints (ArrayList<PointOfInterest> points, int minute)
{
int i = 0;
for (i = 0; i < points.size(); i++)
if (points.get(i).getMinute() == minute)
break;
points.remove(i);
}
public static Map<Double, Double>
estimateCumulativeValues (ArrayList<Double> dataset)
{
log.info("============ESTIMATE CUMULATIVE VALUES==================");
Map<Double, Double> result = new TreeMap<Double, Double>();
double mean = estimateMean(dataset);
double std = estimateStd(dataset, mean);
log.info("Mean: " + mean);
log.info("Standard Deviation: " + std);
for (Double value: dataset)
if (result.containsKey(value) == false)
result.put(value, 1 - Gaussian.bigPhi(value, mean, std));
// System.out.println(result.toString());
return result;
}
}
|
apache-2.0
|
Digaku/closure-library
|
closure/goog/net/tmpnetwork.js
|
4070
|
// Copyright 2006 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview tmpnetwork.js contains some temporary networking functions
* for browserchannel which will be moved at a later date.
*/
/**
* Namespace for BrowserChannel
*/
goog.provide('goog.net.tmpnetwork');
goog.require('goog.Uri');
goog.require('goog.net.ChannelDebug');
/**
* Default timeout to allow for google.com pings.
* @type {number}
*/
goog.net.tmpnetwork.GOOGLECOM_TIMEOUT = 10000;
goog.net.tmpnetwork.testGoogleCom = function(callback, opt_imageUri) {
// We need to add a 'rand' to make sure the response is not fulfilled
// by browser cache.
var uri = opt_imageUri;
if (!uri) {
uri = new goog.Uri('//www.google.com/images/cleardot.gif');
uri.makeUnique();
}
goog.net.tmpnetwork.testLoadImage(uri.toString(),
goog.net.tmpnetwork.GOOGLECOM_TIMEOUT, callback);
};
/**
* Test loading the given image, retrying if necessary.
* @param {string} url URL to the iamge.
* @param {number} timeout Milliseconds before giving up.
* @param {Function} callback Function to call with results.
* @param {number} retries The number of times to retry.
* @param {number=} opt_pauseBetweenRetriesMS Optional number of milliseconds
* between retries - defaults to 0.
*/
goog.net.tmpnetwork.testLoadImageWithRetries = function(url, timeout, callback,
retries, opt_pauseBetweenRetriesMS) {
var channelDebug = new goog.net.ChannelDebug();
channelDebug.debug('TestLoadImageWithRetries: ' + opt_pauseBetweenRetriesMS);
if (retries == 0) {
// no more retries, give up
callback(false);
return;
}
var pauseBetweenRetries = opt_pauseBetweenRetriesMS || 0;
retries--;
goog.net.tmpnetwork.testLoadImage(url, timeout, function(succeeded) {
if (succeeded) {
callback(true);
} else {
// try again
goog.global.setTimeout(function() {
goog.net.tmpnetwork.testLoadImageWithRetries(url, timeout, callback,
retries, pauseBetweenRetries);
}, pauseBetweenRetries);
}
});
};
/**
* Test loading the given image.
* @param {string} url URL to the iamge.
* @param {number} timeout Milliseconds before giving up.
* @param {Function} callback Function to call with results.
*/
goog.net.tmpnetwork.testLoadImage = function(url, timeout, callback) {
var channelDebug = new goog.net.ChannelDebug();
channelDebug.debug('TestLoadImage: loading ' + url);
var img = new Image();
var timer = null;
createHandler = function(result, message) {
return function() {
try {
channelDebug.debug('TestLoadImage: ' + message);
goog.net.tmpnetwork.clearImageCallbacks_(img);
goog.global.clearTimeout(timer);
callback(result);
} catch (e) {
channelDebug.dumpException(e);
}
};
};
img.onload = createHandler(true, 'loaded');
img.onerror = createHandler(false, 'error');
img.onabort = createHandler(false, 'abort');
img.ontimeout = createHandler(false, 'timeout');
timer = goog.global.setTimeout(function() {
if (img.ontimeout) {
img.ontimeout();
}
}, timeout);
img.src = url;
};
/**
* Clear handlers to avoid memory leaks.
* @param {Image} img The image to clear handlers from.
* @private
*/
goog.net.tmpnetwork.clearImageCallbacks_ = function(img) {
// NOTE(user): Nullified individually to avoid compiler warnings
// (BUG 658126)
img.onload = null;
img.onerror = null;
img.onabort = null;
img.ontimeout = null;
};
|
apache-2.0
|
RyanTech/admin-v5
|
js/page/Publisher.js
|
1341
|
/**
* Created by chensheng on 15/8/3.
*/
'use strict';
(function (ns) {
ns.Publisher = tp.view.Loader.extend({
events: {
'change [name=publisher_type]': 'publisherType_changeHandler',
'change [name=province]': 'province_changeHandler'
},
initialize: function (options) {
tp.view.Loader.prototype.initialize.call(this, options);
this.optionTemplate = Handlebars.compile('{{#each cities}}<option value="{{.}}">{{.}}</option>{{/each}}');
},
render: function () {
tp.view.Loader.prototype.render.call(this);
var province = this.model.get('province');
this.$('[name=province]').val(province);
this.renderCities(this.model.options.provinces.indexOf(province));
},
renderCities: function (province) {
var cities = this.model.options.cities;
this.$('[name=city]').html(this.optionTemplate({cities: cities[province]}));
},
province_changeHandler: function (event) {
var province = event.target.selectedIndex;
this.renderCities(province);
},
publisherType_changeHandler: function (event) {
var className = $(event.currentTarget).data('class');
this.$('.personal, .corp').not('.' + className).addClass('hide');
this.$('.' + className).removeClass('hide');
}
});
}(Nervenet.createNameSpace('admin.page')));
|
apache-2.0
|
tcnghia/kubernetes
|
test/e2e/apimachinery/resource_quota.go
|
88449
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"context"
"fmt"
"strconv"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
)
const (
// how long to wait for a resource quota update to occur
resourceQuotaTimeout = 30 * time.Second
podName = "pfpod"
)
var classGold = "gold"
var extendedResourceName = "example.com/dongle"
var _ = SIGDescribe("ResourceQuota", func() {
f := framework.NewDefaultFramework("resourcequota")
/*
Release: v1.16
Testname: ResourceQuota, object count quota, resourcequotas
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
*/
framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, service
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Service. Its creation MUST be successful and resource usage count against the Service object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the Service. Deletion MUST succeed and resource usage count against the Service object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a Service")
service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP)
service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures service creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceServices] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a Service")
err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceServices] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, secret
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Secret. Its creation MUST be successful and resource usage count against the Secret object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the Secret. Deletion MUST succeed and resource usage count against the Secret object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func() {
ginkgo.By("Discovering how many secrets are in namespace by default")
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(secrets.Items) == found {
// loop until the number of secrets has stabilized for 5 seconds
unchanged++
return unchanged > 4, nil
}
unchanged = 0
found = len(secrets.Items)
return false, nil
})
defaultSecrets := fmt.Sprintf("%d", found)
hardSecrets := fmt.Sprintf("%d", found+1)
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a Secret")
secret := newTestSecretForQuota("test-secret")
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures secret creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
// we expect there to be two secrets because each namespace will receive
// a service account token secret by default
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a secret")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, pod
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Pod with resource request count for CPU, Memory, EphemeralStorage and ExtendedResourceName. Pod creation MUST be successful and respective resource usage count MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Create another Pod with resource request exceeding remaining quota. Pod creation MUST fail as the request exceeds ResourceQuota limits.
Update the successfully created pod's resource requests. Updation MUST fail as a Pod can not dynamically update its resource requirements.
Delete the successfully created Pod. Pod Deletion MUST be scuccessful and it MUST release the allocated resource counts from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a Pod that fits quota")
podName := "test-pod"
requests := v1.ResourceList{}
limits := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("252Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
podToUpdate := pod
ginkgo.By("Ensuring ResourceQuota status captures the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota")
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("600m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err)
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
requests = v1.ResourceList{}
limits = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err)
ginkgo.By("Ensuring a pod cannot update its resource requirements")
// a pod cannot dynamically update its resource requirements.
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("100m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate, metav1.UpdateOptions{})
framework.ExpectError(err)
ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceCPU] = resource.MustParse("0")
usedResources[v1.ResourceMemory] = resource.MustParse("0")
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, configmap
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a ConfigMap. Its creation MUST be successful and resource usage count against the ConfigMap object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ConfigMap. Deletion MUST succeed and resource usage count against the ConfigMap object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, time.Minute, func() (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(configmaps.Items) == found {
// loop until the number of configmaps has stabilized for 15 seconds
unchanged++
return unchanged > 15, nil
}
unchanged = 0
found = len(configmaps.Items)
return false, nil
})
defaultConfigMaps := fmt.Sprintf("%d", found)
hardConfigMaps := fmt.Sprintf("%d", found+1)
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ConfigMap")
configMap := newTestConfigMapForQuota("test-configmap")
configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures configMap creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
// we expect there to be two configmaps because each namespace will receive
// a ca.crt configmap by default.
// ref:https://github.com/kubernetes/kubernetes/pull/68812
usedResources[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a ConfigMap")
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, replicationController
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a ReplicationController. Its creation MUST be successful and resource usage count against the ReplicationController object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ReplicationController. Deletion MUST succeed and resource usage count against the ReplicationController object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ReplicationController")
replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0)
replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures replication controller creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a ReplicationController")
// Without the delete options, the object isn't actually
// removed until the GC verifies that all children have been
// detached. ReplicationControllers default to "orphan", which
// is different from most resources. (Why? To preserve a common
// workflow from prior to the GC's introduction.)
err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, metav1.DeleteOptions{
PropagationPolicy: func() *metav1.DeletionPropagation {
p := metav1.DeletePropagationBackground
return &p
}(),
})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, replicaSet
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a ReplicaSet. Its creation MUST be successful and resource usage count against the ReplicaSet object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ReplicaSet. Deletion MUST succeed and resource usage count against the ReplicaSet object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ReplicaSet")
replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0)
replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures replicaset creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a ReplicaSet")
err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, pvc
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create PersistentVolumeClaim (PVC) to request storage capacity of 1G. PVC creation MUST be successful and resource usage count against the PVC and storage object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the PVC. Deletion MUST succeed and resource usage count against its PVC and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
*/
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim. [sig-storage]", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a PersistentVolumeClaim")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, storageClass
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create PersistentVolumeClaim (PVC) with specified storageClass to request storage capacity of 1G. PVC creation MUST be successful and resource usage count against PVC, storageClass and storage object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the PVC. Deletion MUST succeed and resource usage count against PVC, storageClass and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
*/
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class. [sig-storage]", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a PersistentVolumeClaim with storage class")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc.Spec.StorageClassName = &classGold
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func() {
ginkgo.By("Creating a Custom Resource Definition")
testcrd, err := crd.CreateTestCRD(f)
framework.ExpectNoError(err)
defer testcrd.CleanUp()
countResourceName := "count/" + testcrd.Crd.Spec.Names.Plural + "." + testcrd.Crd.Spec.Group
// resourcequota controller needs to take 30 seconds at most to detect the new custom resource.
// in order to make sure the resourcequota controller knows this resource, we create one test
// resourcequota object, and triggering updates on it until the status is updated.
quotaName := "quota-for-" + testcrd.Crd.Spec.Names.Plural
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: quotaName},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceName(countResourceName): resource.MustParse("0"),
},
},
})
framework.ExpectNoError(err)
err = updateResourceQuotaUntilUsageAppears(f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName = "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[v1.ResourceName(countResourceName)] = resource.MustParse("1")
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a custom resource")
resourceClient := testcrd.DynamicClients["v1"]
testcr, err := instantiateCustomResource(&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name,
"kind": testcrd.Crd.Spec.Names.Kind,
"metadata": map[string]interface{}{
"name": "test-cr-1",
},
},
}, resourceClient, testcrd.Crd)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures custom resource creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a second custom resource")
_, err = instantiateCustomResource(&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name,
"kind": testcrd.Crd.Spec.Names.Kind,
"metadata": map[string]interface{}{
"name": "test-cr-2",
},
},
}, resourceClient, testcrd.Crd)
// since we only give one quota, this creation should fail.
framework.ExpectError(err)
ginkgo.By("Deleting a custom resource")
err = deleteCustomResource(resourceClient, testcr.GetName())
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, quota scope, Terminating and NotTerminating scope
Description: Create two ResourceQuotas, one with 'Terminating' scope and another 'NotTerminating' scope. Request and the limit counts for CPU and Memory resources are set for the ResourceQuota. Creation MUST be successful and their ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Pod with specified CPU and Memory ResourceRequirements fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota but MUST NOT in 'Terminating' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota.
Create a pod with specified activeDeadlineSeconds and resourceRequirements for CPU and Memory fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'Terminating' scoped ResourceQuota but MUST NOT in 'NotTerminating' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'Terminating' scoped ResourceQuota.
*/
framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func() {
ginkgo.By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, quota scope, BestEffort and NotBestEffort scope
Description: Create two ResourceQuotas, one with 'BestEffort' scope and another with 'NotBestEffort' scope. Creation MUST be successful and their ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a 'BestEffort' Pod by not explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota but MUST NOT in 'NotBestEffort' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota.
Create a 'NotBestEffort' Pod by explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota but MUST NOT in 'BestEffort' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota.
*/
framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func() {
ginkgo.By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, update and delete
Description: Create a ResourceQuota for CPU and Memory quota limits. Creation MUST be successful.
When ResourceQuota is updated to modify CPU and Memory quota limits, update MUST succeed with updated values for CPU and Memory limits.
When ResourceQuota is deleted, it MUST not be available in the namespace.
*/
framework.ConformanceIt("should be able to update and delete ResourceQuota.", func() {
client := f.ClientSet
ns := f.Namespace.Name
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := &v1.ResourceQuota{
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{},
},
}
resourceQuota.ObjectMeta.Name = quotaName
resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1")
resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi")
_, err := createResourceQuota(client, ns, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Getting a ResourceQuota")
resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("1"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("500Mi"))
ginkgo.By("Updating a ResourceQuota")
resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2")
resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi")
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi"))
ginkgo.By("Verifying a ResourceQuota was modified")
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi"))
ginkgo.By("Deleting a ResourceQuota")
err = deleteResourceQuota(client, ns, quotaName)
framework.ExpectNoError(err)
ginkgo.By("Verifying the deleted ResourceQuota")
_, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true)
})
})
var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
f := framework.NewDefaultFramework("scope-selectors")
ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func() {
ginkgo.By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func() {
ginkgo.By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
})
})
var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
f := framework.NewDefaultFramework("resourcequota-priorityclass")
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class")
podName := "testpod-pclass1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating first pod with priority class should pass")
podName := "testpod-pclass2-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating 2nd pod with priority class should fail")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectError(err)
ginkgo.By("Deleting first pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class with pclass3")
podName := "testpod-pclass3-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a 2nd pod with priority class pclass3")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("2")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class pclass5")
podName := "testpod-pclass5"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating 2nd pod with priority class pclass6")
podName2 := "testpod-pclass6"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("2")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class pclass7")
podName := "testpod-pclass7"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is not used")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class pclass8")
podName := "testpod-pclass8"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("3")
hard[v1.ResourceLimitsMemory] = resource.MustParse("3Gi")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class")
podName := "testpod-pclass9"
request := v1.ResourceList{}
request[v1.ResourceCPU] = resource.MustParse("1")
request[v1.ResourceMemory] = resource.MustParse("1Gi")
limit := v1.ResourceList{}
limit[v1.ResourceCPU] = resource.MustParse("2")
limit[v1.ResourceMemory] = resource.MustParse("2Gi")
pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("1")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("2")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("2Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
})
// newTestResourceQuotaWithScopeSelector returns a quota that enforces default constraints for testing with scopeSelectors
func newTestResourceQuotaWithScopeSelector(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: scope,
Operator: v1.ScopeSelectorOpExists},
},
},
},
}
}
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
}
}
// newTestResourceQuotaWithScopeForPriorityClass returns a quota
// that enforces default constraints for testing with ResourceQuotaScopePriorityClass scope
func newTestResourceQuotaWithScopeForPriorityClass(name string, hard v1.ResourceList, op v1.ScopeSelectorOperator, values []string) *v1.ResourceQuota {
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: v1.ResourceQuotaScopePriorityClass,
Operator: op,
Values: values,
},
},
},
},
}
}
// newTestResourceQuota returns a quota that enforces default constraints for testing
func newTestResourceQuota(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
hard[v1.ResourceServices] = resource.MustParse("10")
hard[v1.ResourceServicesNodePorts] = resource.MustParse("1")
hard[v1.ResourceServicesLoadBalancers] = resource.MustParse("1")
hard[v1.ResourceReplicationControllers] = resource.MustParse("10")
hard[v1.ResourceQuotas] = resource.MustParse("1")
hard[v1.ResourceCPU] = resource.MustParse("1")
hard[v1.ResourceMemory] = resource.MustParse("500Mi")
hard[v1.ResourceConfigMaps] = resource.MustParse("2")
hard[v1.ResourceSecrets] = resource.MustParse("10")
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
hard[v1.ResourceEphemeralStorage] = resource.MustParse("50Gi")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
// test quota on discovered resource type
hard[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("5")
// test quota on extended resource
hard[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("3")
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard},
}
}
// newTestPodForQuota returns a pod that has the specified requests and limits
func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
// prevent disruption to other test workloads in parallel test runs by ensuring the quota
// test pods don't get scheduled onto a node
NodeSelector: map[string]string{
"x-test.k8s.io/unsatisfiable": "not-schedulable",
},
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// newTestPodForQuotaWithPriority returns a pod that has the specified requests, limits and priority class
func newTestPodForQuotaWithPriority(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList, pclass string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
// prevent disruption to other test workloads in parallel test runs by ensuring the quota
// test pods don't get scheduled onto a node
NodeSelector: map[string]string{
"x-test.k8s.io/unsatisfiable": "not-schedulable",
},
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
PriorityClassName: pclass,
},
}
}
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
}
// newTestReplicationControllerForQuota returns a simple replication controller
func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
return &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
// newTestReplicaSetForQuota returns a simple replica set
func newTestReplicaSetForQuota(name, image string, replicas int32) *appsv1.ReplicaSet {
zero := int64(0)
return &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
// newTestServiceForQuota returns a simple service
func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Type: serviceType,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
}
func newTestConfigMapForQuota(name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string]string{
"a": "b",
},
}
}
func newTestSecretForQuota(name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
"data-2": []byte("value-2\n"),
"data-3": []byte("value-3\n"),
},
}
}
// createResourceQuota in the specified namespace
func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota, metav1.CreateOptions{})
}
// deleteResourceQuota with the specified name
func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
// countResourceQuota counts the number of ResourceQuota in the specified namespace
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
func countResourceQuota(c clientset.Interface, namespace string) (int, error) {
found, unchanged := 0, 0
return found, wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(resourceQuotas.Items) == found {
// loop until the number of resource quotas has stabilized for 5 seconds
unchanged++
return unchanged > 4, nil
}
unchanged = 0
found = len(resourceQuotas.Items)
return false, nil
})
}
// wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
// used may not yet be calculated
if resourceQuota.Status.Used == nil {
return false, nil
}
// verify that the quota shows the expected used resource values
for k, v := range used {
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
return false, nil
}
}
return true, nil
})
}
// updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated
// for the specific resource name.
func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error {
return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
// verify that the quota shows the expected used resource values
_, ok := resourceQuota.Status.Used[resourceName]
if ok {
return true, nil
}
current := resourceQuota.Spec.Hard[resourceName]
current.Add(resource.MustParse("1"))
resourceQuota.Spec.Hard[resourceName] = current
_, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{})
// ignoring conflicts since someone else may already updated it.
if apierrors.IsConflict(err) {
return false, nil
}
return false, err
})
}
|
apache-2.0
|
rgooch/Dominator
|
lib/connpool/usage_test.go
|
1703
|
package connpool
import (
"net"
"testing"
"github.com/Cloud-Foundations/Dominator/lib/resourcepool"
)
var serverAddress string
func init() {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
panic(err)
}
serverAddress = listener.Addr().String()
//go http.Serve(listener, nil)
}
func TestGetUsePut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.LocalAddr()
conn.Put()
}
func TestGetClosePut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
if err := conn.Close(); err != nil {
t.Error(err)
}
conn.Put()
}
func TestGetPutPut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.Put()
defer func() {
if err := recover(); err == nil {
t.Errorf("Multiple Put() did not panic")
}
}()
conn.Put()
}
func TestUseAfterPut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.Put()
defer func() {
if err := recover(); err == nil {
t.Errorf("Use after Put() did not panic")
}
}()
conn.LocalAddr()
}
func TestUseAfterClose(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.Close()
defer func() {
if err := recover(); err == nil {
t.Errorf("Use after Close() did not panic")
}
}()
conn.LocalAddr()
}
|
apache-2.0
|
wildfly-swarm/wildfly-swarm-javadocs
|
2017.5.0/apidocs/org/wildfly/swarm/config/logging/RootLoggerSupplier.html
|
9214
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_112) on Mon May 01 08:43:49 MST 2017 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>RootLoggerSupplier (Public javadocs 2017.5.0 API)</title>
<meta name="date" content="2017-05-01">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="RootLoggerSupplier (Public javadocs 2017.5.0 API)";
}
}
catch(err) {
}
//-->
var methods = {"i0":6};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/RootLoggerSupplier.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">WildFly Swarm API, 2017.5.0</div>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/wildfly/swarm/config/logging/RootLoggerConsumer.html" title="interface in org.wildfly.swarm.config.logging"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/wildfly/swarm/config/logging/SizeRotatingFileHandler.html" title="class in org.wildfly.swarm.config.logging"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/wildfly/swarm/config/logging/RootLoggerSupplier.html" target="_top">Frames</a></li>
<li><a href="RootLoggerSupplier.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.wildfly.swarm.config.logging</div>
<h2 title="Interface RootLoggerSupplier" class="title">Interface RootLoggerSupplier<T extends <a href="../../../../../org/wildfly/swarm/config/logging/RootLogger.html" title="class in org.wildfly.swarm.config.logging">RootLogger</a>></h2>
</div>
<div class="contentContainer">
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>Functional Interface:</dt>
<dd>This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.</dd>
</dl>
<hr>
<br>
<pre><a href="http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true" title="class or interface in java.lang">@FunctionalInterface</a>
public interface <span class="typeNameLabel">RootLoggerSupplier<T extends <a href="../../../../../org/wildfly/swarm/config/logging/RootLogger.html" title="class in org.wildfly.swarm.config.logging">RootLogger</a>></span></pre>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t3" class="tableTab"><span><a href="javascript:show(4);">Abstract Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code><a href="../../../../../org/wildfly/swarm/config/logging/RootLogger.html" title="class in org.wildfly.swarm.config.logging">RootLogger</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/wildfly/swarm/config/logging/RootLoggerSupplier.html#get--">get</a></span>()</code>
<div class="block">Constructed instance of RootLogger resource</div>
</td>
</tr>
</table>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="get--">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>get</h4>
<pre><a href="../../../../../org/wildfly/swarm/config/logging/RootLogger.html" title="class in org.wildfly.swarm.config.logging">RootLogger</a> get()</pre>
<div class="block">Constructed instance of RootLogger resource</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>The instance</dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/RootLoggerSupplier.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">WildFly Swarm API, 2017.5.0</div>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/wildfly/swarm/config/logging/RootLoggerConsumer.html" title="interface in org.wildfly.swarm.config.logging"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/wildfly/swarm/config/logging/SizeRotatingFileHandler.html" title="class in org.wildfly.swarm.config.logging"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/wildfly/swarm/config/logging/RootLoggerSupplier.html" target="_top">Frames</a></li>
<li><a href="RootLoggerSupplier.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2017 <a href="http://www.jboss.org">JBoss by Red Hat</a>. All rights reserved.</small></p>
</body>
</html>
|
apache-2.0
|
wildfly-swarm/wildfly-swarm-javadocs
|
2.6.1.Final-SNAPSHOT/apidocs/org/wildfly/swarm/microprofile/jwtauth/class-use/MicroProfileJWTAuthFraction.html
|
5191
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_151) on Wed Jun 10 10:20:18 MST 2020 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class org.wildfly.swarm.microprofile.jwtauth.MicroProfileJWTAuthFraction (BOM: * : All 2.6.1.Final-SNAPSHOT API)</title>
<meta name="date" content="2020-06-10">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.wildfly.swarm.microprofile.jwtauth.MicroProfileJWTAuthFraction (BOM: * : All 2.6.1.Final-SNAPSHOT API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/wildfly/swarm/microprofile/jwtauth/MicroProfileJWTAuthFraction.html" title="class in org.wildfly.swarm.microprofile.jwtauth">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">Thorntail API, 2.6.1.Final-SNAPSHOT</div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/wildfly/swarm/microprofile/jwtauth/class-use/MicroProfileJWTAuthFraction.html" target="_top">Frames</a></li>
<li><a href="MicroProfileJWTAuthFraction.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.wildfly.swarm.microprofile.jwtauth.MicroProfileJWTAuthFraction" class="title">Uses of Class<br>org.wildfly.swarm.microprofile.jwtauth.MicroProfileJWTAuthFraction</h2>
</div>
<div class="classUseContainer">No usage of org.wildfly.swarm.microprofile.jwtauth.MicroProfileJWTAuthFraction</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/wildfly/swarm/microprofile/jwtauth/MicroProfileJWTAuthFraction.html" title="class in org.wildfly.swarm.microprofile.jwtauth">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">Thorntail API, 2.6.1.Final-SNAPSHOT</div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/wildfly/swarm/microprofile/jwtauth/class-use/MicroProfileJWTAuthFraction.html" target="_top">Frames</a></li>
<li><a href="MicroProfileJWTAuthFraction.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2020 <a href="http://www.jboss.org">JBoss by Red Hat</a>. All rights reserved.</small></p>
</body>
</html>
|
apache-2.0
|
cglib/cglib
|
README.md
|
1087
|
cglib [](https://travis-ci.org/cglib/cglib)
================
***IMPORTANT NOTE: cglib is unmaintained and does not work well (or possibly at all?) in newer JDKs, particularly JDK17+. If you need to support newer JDKs, we will accept well-tested well-thought-out patches... but you'll probably have better luck migrating to something like [ByteBuddy](https://bytebuddy.net).***
Byte Code Generation Library is high level API to generate and transform JAVA byte code.
It is used by AOP, testing, data access frameworks to generate dynamic proxy objects and intercept field access.
https://github.com/cglib/cglib/wiki
How To: https://github.com/cglib/cglib/wiki/How-To
Latest Release: https://github.com/cglib/cglib/releases/latest
All Releases: https://github.com/cglib/cglib/releases
cglib-#.#_#.jar binary distribution, CGLIB classes only,
it must be used to extend cglib classes dependant on ASM API
cglib-nodep-#.#_#.jar binary distribution, CGLIB and renamed ASM classes,
not extendable
|
apache-2.0
|
pawel-nn/proj_app_bd_sem7
|
ProjAppBD/src/main/java/app/dataTransportObject/OrderDTO.java
|
144
|
package app.dataTransportObject;
import app.viewObject.OrderVO;
public class OrderDTO {
public OrderDTO(OrderVO orderVo) {
super();
}
}
|
apache-2.0
|
kubostech/KubOS
|
hal/kubos-hal/source/i2c.c
|
2962
|
/*
* KubOS HAL
* Copyright (C) 2016 Kubos Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if (defined YOTTA_CFG_HARDWARE_I2C) && (YOTTA_CFG_HARDWARE_I2C_COUNT > 0)
#include "kubos-hal/i2c.h"
#include <string.h>
static KI2C k_i2cs[K_NUM_I2CS];
KI2CStatus k_i2c_init(KI2CNum i2c, KI2CConf *conf)
{
KI2C *k_i2c = kprv_i2c_get(i2c);
if (k_i2c->bus_num == K_I2C_NO_BUS)
{
memcpy(&k_i2c->conf, conf, sizeof(KI2CConf));
k_i2c->bus_num = i2c;
csp_mutex_create(&(k_i2c->i2c_lock));
return kprv_i2c_dev_init(i2c);
}
return I2C_OK;
}
void k_i2c_terminate(KI2CNum i2c)
{
KI2C *k_i2c = kprv_i2c_get(i2c);
if (k_i2c->bus_num != K_I2C_NO_BUS)
{
kprv_i2c_dev_terminate(i2c);
csp_mutex_remove(&(k_i2c->i2c_lock));
k_i2c->bus_num = K_I2C_NO_BUS;
}
}
KI2CConf k_i2c_conf_defaults(void)
{
return (KI2CConf) {
.addressing_mode = YOTTA_CFG_HARDWARE_I2C_DEFAULTS_ADDRESSINGMODE,
.role = YOTTA_CFG_HARDWARE_I2C_DEFAULTS_ROLE,
.clock_speed = YOTTA_CFG_HARDWARE_I2C_DEFAULTS_CLOCKSPEED
};
}
void k_i2c_default_init()
{
KI2CConf conf = k_i2c_conf_defaults();
k_i2c_init(DEFAULT_I2C, &conf);
}
void k_i2c_default_dev_init(KI2CNum i2c)
{
KI2CConf conf = k_i2c_conf_defaults();
k_i2c_init(i2c, &conf);
}
KI2CStatus k_i2c_write(KI2CNum i2c, uint16_t addr, uint8_t* ptr, int len)
{
KI2C * ki2c = kprv_i2c_get(i2c);
KI2CStatus ret = I2C_ERROR;
if ((ki2c->bus_num != K_I2C_NO_BUS) && (ptr != NULL))
{
// Today...block indefinitely
if (csp_mutex_lock(&(ki2c->i2c_lock), CSP_MAX_DELAY) == CSP_SEMAPHORE_OK)
{
ret = kprv_i2c_master_write(i2c, addr, ptr, len);
csp_mutex_unlock(&(ki2c->i2c_lock));
}
}
return ret;
}
KI2CStatus k_i2c_read(KI2CNum i2c, uint16_t addr, uint8_t* ptr, int len)
{
KI2C * ki2c = kprv_i2c_get(i2c);
KI2CStatus ret = I2C_ERROR;
if ((ki2c->bus_num != K_I2C_NO_BUS) && (ptr != NULL))
{
// Today...block indefinitely
if (csp_mutex_lock(&(ki2c->i2c_lock), CSP_MAX_DELAY) == CSP_SEMAPHORE_OK)
{
ret = kprv_i2c_master_read(i2c, addr, ptr, len);
csp_mutex_unlock(&(ki2c->i2c_lock));
}
}
return ret;
}
KI2C* kprv_i2c_get(KI2CNum i2c)
{
//Validate I2C number
if(i2c > (K_NUM_I2CS))
{
return NULL;
}
return &k_i2cs[i2c - 1];
}
#endif
|
apache-2.0
|
filestack/filestack-php
|
docs/classes/Filestack_FilestackClient/getTags.html
|
3701
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html SYSTEM "about:legacy-compat">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<head>
<title>phpDox - Filestack\FilestackClient::getTags</title>
<link rel="stylesheet" type="text/css" href="../../css/style.css" media="screen"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
</head>
<body>
<nav class="topnav">
<ul>
<li>
<div class="logo"><span>/**</span>phpDox</div>
</li>
<li class="separator">
<a href="../../index.html">Overview</a>
</li>
<li class="separator">
<a href="../../namespaces.html">Namespaces</a>
</li>
<li>
<a href="../../classes.html">Classes</a>
</li>
<li>
<a href="../../traits.html">Traits</a>
</li>
<li class="separator">
<a href="../../source/index.html">Source</a>
</li>
</ul>
</nav>
<div id="mainstage">
<div class="box">
<ul class="breadcrumb">
<li>
<a href="../../index.html">Overview</a>
</li>
<li class="separator">
<a href="../../classes.html">Classes</a>
</li>
<li class="separator">
<a href="../../classes.html#Filestack">Filestack</a>
</li>
<li class="separator">
<a title="Filestack\FilestackClient" href="../../classes/Filestack_FilestackClient.html">FilestackClient</a>
</li>
<li class="separator">getTags</li>
</ul>
</div>
<nav class="box">
<ul>
<li>
<a href="#introduction">Introduction</a>
</li>
<li>
<a href="#synopsis">Synopsis</a>
</li>
<li>
<a href="#parameter">Parameter</a>
</li>
<li>
<a href="#return">Return</a>
</li>
<li>
<a href="#throws">Throws</a>
</li>
<li>
<a href="#tests">Tests</a>
</li>
<li>
<a href="../../source/FilestackClient.php.html#line149">Source</a>
</li>
</ul>
</nav>
<section>
<h1><small>Filestack\FilestackClient::</small>getTags</h1>
<h4>Get tags of a filelink</h4>
<p/>
<ul/>
<h2 id="signature">Signature</h2>
<div class="styled synopsis">
<code>public function getTags(string
$handle )
</code>
</div>
<h2 id="parameterlist">Parameters</h2>
<dl class="styled">
<dt><code>$handle</code>
—
string</dt>
<dd>Filestack filelink handle</dd>
</dl>
<h2 id="return">Returns</h2>
<dl class="styled">
<dt>json</dt>
<dd><br/>
</dd>
</dl>
<h2 id="throws">Errors/Exceptions</h2>
<dl class="styled">
<dt>
<code>
<a title="Filestack\FilestackException" href="../../classes/Filestack_FilestackException.html">FilestackException</a>
</code>
</dt>
<dd>if API call fails</dd>
</dl>
<h2 id="tests">Test Coverage</h2>
<div class="styled">
<h3>Information</h3>
<ul class="styled">
<li>Coverage: / Lines (%)</li>
<li>Tests: 0</li>
<li>Passed: 0 (0%)</li>
</ul>
</div>
</section>
</div>
<footer>
<span>Generated using phpDox 0.12.0 - Copyright (C) 2010 - 2020 by Arne Blankerts and Contributors</span>
</footer>
</body>
</html>
|
apache-2.0
|
mslocrian/cuview
|
apis/parameterhandlers.go
|
1535
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
import (
"fmt"
"reflect"
)
// shell holder
type ParameterHandler struct {
}
func (h *ParameterHandler) GetInterfacesParams(p map[string][]string, s []byte) []byte {
// some work shoud go here
return s
}
func (h *ParameterHandler) GetBgpv4NeighborsParams(p map[string][]string, s []byte) []byte {
// some work should go here
return s
}
func (h *ParameterHandler) GetIpv4RoutesParams(p map[string][]string, s []byte) []byte {
// some work should go here
/*
tmp := "HEY THERE!"
s = append(s, tmp...)
*/
return s
}
func CallParameterHandlerFunc(c interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
function := reflect.ValueOf(c)
m := function.MethodByName(funcName)
if !m.IsValid() {
return make([]reflect.Value, 0), fmt.Errorf("Method not found \"%s\"\n", funcName)
}
in := make([]reflect.Value, len(params))
for i, param := range params {
in[i] = reflect.ValueOf(param)
}
out = m.Call(in)
return
}
|
apache-2.0
|
play2-maven-plugin/play2-maven-plugin.github.io
|
play2-maven-plugin/1.0.0-beta3/play2-providers/modules.html
|
8871
|
<!DOCTYPE html>
<!--
| Generated by Apache Maven Doxia at 2015-06-27
| Rendered using Apache Maven Fluido Skin 1.4
-->
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="Date-Revision-yyyymmdd" content="20150627" />
<meta http-equiv="Content-Language" content="en" />
<title>Play! 2.x Providers – Project Modules</title>
<link rel="stylesheet" href="./css/apache-maven-fluido-1.4.min.css" />
<link rel="stylesheet" href="./css/site.css" />
<link rel="stylesheet" href="./css/print.css" media="print" />
<script type="text/javascript" src="./js/apache-maven-fluido-1.4.min.js"></script>
<link rel="stylesheet" href="./css/site.css" type="text/css"/>
<!-- Google Analytics -->
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-17472708-2']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body class="topBarDisabled">
<div class="container-fluid">
<div id="banner">
<div class="pull-left">
<div id="bannerLeft">
<h2>Play! 2.x Providers</h2>
</div>
</div>
<div class="pull-right"> </div>
<div class="clear"><hr/></div>
</div>
<div id="breadcrumbs">
<ul class="breadcrumb">
<li id="publishDate">Last Published: 2015-06-27
<span class="divider">|</span>
</li>
<li id="projectVersion">Version: 1.0.0-beta3
</li>
</ul>
</div>
<div class="row-fluid">
<div id="leftColumn" class="span2">
<div class="well sidebar-nav">
<ul class="nav nav-list">
<li class="nav-header">Parent Project</li>
<li>
<a href="../index.html" title="Play! 2.x">
<span class="none"></span>
Play! 2.x</a>
</li>
<li class="nav-header">Modules</li>
<li>
<a href="play2-provider-play21/index.html" title="Play! 2.x Provider for Play! 2.1.x">
<span class="none"></span>
Play! 2.x Provider for Play! 2.1.x</a>
</li>
<li>
<a href="play2-provider-play22/index.html" title="Play! 2.x Provider for Play! 2.2.x">
<span class="none"></span>
Play! 2.x Provider for Play! 2.2.x</a>
</li>
<li>
<a href="play2-provider-play23/index.html" title="Play! 2.x Provider for Play! 2.3.x">
<span class="none"></span>
Play! 2.x Provider for Play! 2.3.x</a>
</li>
<li>
<a href="play2-provider-play24/index.html" title="Play! 2.x Provider for Play! 2.4.x">
<span class="none"></span>
Play! 2.x Provider for Play! 2.4.x</a>
</li>
<li class="nav-header">Project Documentation</li>
<li>
<a href="project-info.html" title="Project Information">
<span class="icon-chevron-down"></span>
Project Information</a>
<ul class="nav nav-list">
<li>
<a href="dependencies.html" title="Dependencies">
<span class="none"></span>
Dependencies</a>
</li>
<li>
<a href="dependency-info.html" title="Dependency Information">
<span class="none"></span>
Dependency Information</a>
</li>
<li>
<a href="distribution-management.html" title="Distribution Management">
<span class="none"></span>
Distribution Management</a>
</li>
<li>
<a href="index.html" title="About">
<span class="none"></span>
About</a>
</li>
<li>
<a href="issue-tracking.html" title="Issue Tracking">
<span class="none"></span>
Issue Tracking</a>
</li>
<li>
<a href="license.html" title="Project License">
<span class="none"></span>
Project License</a>
</li>
<li class="active">
<a href="#"><span class="none"></span>Project Modules</a>
</li>
<li>
<a href="plugin-management.html" title="Plugin Management">
<span class="none"></span>
Plugin Management</a>
</li>
<li>
<a href="plugins.html" title="Project Plugins">
<span class="none"></span>
Project Plugins</a>
</li>
<li>
<a href="team-list.html" title="Project Team">
<span class="none"></span>
Project Team</a>
</li>
<li>
<a href="source-repository.html" title="Source Repository">
<span class="none"></span>
Source Repository</a>
</li>
<li>
<a href="project-summary.html" title="Project Summary">
<span class="none"></span>
Project Summary</a>
</li>
</ul>
</li>
</ul>
<hr />
<div id="poweredBy">
<div class="clear"></div>
<div class="clear"></div>
<div class="clear"></div>
<div class="clear"></div>
<a href="http://maven.apache.org/" title="Built by Maven" class="poweredBy">
<img class="builtBy" alt="Built by Maven" src="./images/logos/maven-feather.png" />
</a>
</div>
</div>
</div>
<div id="bodyColumn" class="span10" >
<div class="section">
<h2><a name="Project_Modules"></a>Project Modules</h2><a name="Project_Modules"></a>
<p>This project has declared the following modules:</p>
<table border="0" class="table table-striped">
<tr class="a">
<th>Name</th>
<th>Description</th></tr>
<tr class="b">
<td><a href="play2-provider-play21/index.html">Play! 2.x Provider for Play! 2.1.x</a></td>
<td>Play! 2.x Provider for Play! 2.1.x</td></tr>
<tr class="a">
<td><a href="play2-provider-play22/index.html">Play! 2.x Provider for Play! 2.2.x</a></td>
<td>Play! 2.x Provider for Play! 2.2.x</td></tr>
<tr class="b">
<td><a href="play2-provider-play23/index.html">Play! 2.x Provider for Play! 2.3.x</a></td>
<td>Play! 2.x Provider for Play! 2.3.x</td></tr>
<tr class="a">
<td><a href="play2-provider-play24/index.html">Play! 2.x Provider for Play! 2.4.x</a></td>
<td>Play! 2.x Provider for Play! 2.4.x</td></tr></table></div>
</div>
</div>
</div>
<hr/>
<footer>
<div class="container-fluid">
<div class="row-fluid">
<p >Copyright © 2013–2015.
All rights reserved.
</p>
</div>
</div>
</footer>
</body>
</html>
|
apache-2.0
|
guswns0528/TizenRT
|
apps/examples/hello/Makefile
|
4537
|
###########################################################################
#
# Copyright 2016 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
############################################################################
# apps/examples/hello/Makefile
#
# Copyright (C) 2008, 2010-2013 Gregory Nutt. All rights reserved.
# Author: Gregory Nutt <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
-include $(TOPDIR)/.config
-include $(TOPDIR)/Make.defs
include $(APPDIR)/Make.defs
# Hello, World! built-in application info
APPNAME = hello
THREADEXEC = TASH_EXECMD_ASYNC
# Hello, World! Example
ASRCS =
CSRCS =
MAINSRC = hello_main.c
AOBJS = $(ASRCS:.S=$(OBJEXT))
COBJS = $(CSRCS:.c=$(OBJEXT))
MAINOBJ = $(MAINSRC:.c=$(OBJEXT))
SRCS = $(ASRCS) $(CSRCS) $(MAINSRC)
OBJS = $(AOBJS) $(COBJS)
ifneq ($(CONFIG_BUILD_KERNEL),y)
OBJS += $(MAINOBJ)
endif
ifeq ($(CONFIG_WINDOWS_NATIVE),y)
BIN = ..\..\libapps$(LIBEXT)
else
ifeq ($(WINTOOL),y)
BIN = ..\\..\\libapps$(LIBEXT)
else
BIN = ../../libapps$(LIBEXT)
endif
endif
ifeq ($(WINTOOL),y)
INSTALL_DIR = "${shell cygpath -w $(BIN_DIR)}"
else
INSTALL_DIR = $(BIN_DIR)
endif
CONFIG_EXAMPLES_HELLO_PROGNAME ?= hello$(EXEEXT)
PROGNAME = $(CONFIG_EXAMPLES_HELLO_PROGNAME)
ROOTDEPPATH = --dep-path .
# Common build
VPATH =
all: .built
.PHONY: clean depend distclean
$(AOBJS): %$(OBJEXT): %.S
$(call ASSEMBLE, $<, $@)
$(COBJS) $(MAINOBJ): %$(OBJEXT): %.c
$(call COMPILE, $<, $@)
.built: $(OBJS)
$(call ARCHIVE, $(BIN), $(OBJS))
@touch .built
ifeq ($(CONFIG_BUILD_KERNEL),y)
$(BIN_DIR)$(DELIM)$(PROGNAME): $(OBJS) $(MAINOBJ)
@echo "LD: $(PROGNAME)"
$(Q) $(LD) $(LDELFFLAGS) $(LDLIBPATH) -o $(INSTALL_DIR)$(DELIM)$(PROGNAME) $(ARCHCRT0OBJ) $(MAINOBJ) $(LDLIBS)
$(Q) $(NM) -u $(INSTALL_DIR)$(DELIM)$(PROGNAME)
install: $(BIN_DIR)$(DELIM)$(PROGNAME)
else
install:
endif
ifeq ($(CONFIG_BUILTIN_APPS)$(CONFIG_EXAMPLES_HELLO),yy)
$(BUILTIN_REGISTRY)$(DELIM)$(APPNAME)_main.bdat: $(DEPCONFIG) Makefile
$(call REGISTER,$(APPNAME),$(APPNAME)_main,$(THREADEXEC))
context: $(BUILTIN_REGISTRY)$(DELIM)$(APPNAME)_main.bdat
else
context:
endif
.depend: Makefile $(SRCS)
@$(MKDEP) $(ROOTDEPPATH) "$(CC)" -- $(CFLAGS) -- $(SRCS) >Make.dep
@touch $@
depend: .depend
clean:
$(call DELFILE, .built)
$(call CLEAN)
distclean: clean
$(call DELFILE, Make.dep)
$(call DELFILE, .depend)
-include Make.dep
.PHONY: preconfig
preconfig:
|
apache-2.0
|
WU-ARL/RLI
|
HardwareGraphic.java
|
32220
|
/*
* Copyright (c) 2005-2013 Jyoti Parwatikar
* and Washington University in St. Louis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* File: HardwareGraphic.java
* Author: Jyoti Parwatikar
* Email: [email protected]
* Organization: Washington University
*
* Derived from: none
*
* Date Created: 1/14/2008
*
* Description:
*
* Modification History:
*
*/
import java.awt.*;
import java.awt.geom.*;
import java.lang.Math;
import java.lang.String;
import java.awt.event.*;
import java.beans.PropertyChangeEvent;
import javax.swing.event.*;
import javax.swing.*;//JComponent;
//import javax.swing.JLabel;
import java.util.Vector;
import javax.xml.stream.*;
public class HardwareGraphic extends ONLGraphic
{
protected static final int TEST_HWGRAPHIC = 1;
protected static final int MAIN_BUTTON = -1;
protected static final int OFFSET = 5; //space between the inner and outer border of the component
protected static final int D_OFFSET = 12;//12; //2*OFFSET - the offset for the diameter
private MainButton mainButton = null; //circular button 1/3 diameter of switch representation
private Area mButtonArea = null; //area that defines mainButton space used for computing the area of the portButton
private PortButton portButtons[] = null;
private PortButton.PArea portAreas[] = null; //pie segments
private int numPorts = 8;
private boolean dimensionChange = true;
private boolean selected = false;
private Point2D.Double startOfDrag;
private TopButtonListener pButtonListener = null;
private TopButtonListener mButtonListener = null;
protected static final double MBUTTON_FRACTION = 0.4;
private ComponentLabel componentLabel = null;
private ComponentLabel userLabel = null;
private int originalW = 0;
private int originalH = 0;
//private double diameter = 0;
private Ellipse2D.Double borderEllipse = null;
private double radius = 0;
private double labelRadius = 0; //radius used to calculate where to draw the label on the port button
private double CPRadius = 0; //radius used to calculate where to put the port point on the cp port button
private SpinnerButton spinner = null;
private boolean graphicSelected = false;
public static interface HWButton
{
public boolean isPortButton();
public boolean isMainButton();
public void setPressed(boolean b) ;
public boolean isPressed();
}
protected static class ComponentLabel extends JLabel implements ONLComponentButton
{
private ONLComponent component = null;
private int numChars = 0;
public ComponentLabel(String lbl, ONLComponent c)
{
super(lbl);
component = c;
setFont(new Font("Dialog", Font.PLAIN, 9));
setHorizontalAlignment(SwingConstants.CENTER);
setHorizontalTextPosition(SwingConstants.CENTER);
setForeground(Color.black);
numChars = lbl.length();
}
public ONLComponent getONLComponent() { return component;}
public void setText(String s) { numChars = s.length(); super.setText(s);}
public int getNumChars() { return numChars;}
}//end class HardwareGraphic.CLabel
protected static class SpinnerButton extends JComponent
{
private int curIndex = 0;
private boolean selected = false;
private HardwareGraphic routerGraphic;
private Ellipse2D.Double ellipse = null;
private static final double SRADIUS = 4; //radius of spinner
public SpinnerButton(HardwareGraphic nspg)
{
super();
setDoubleBuffered(true);
routerGraphic = nspg;
ellipse = new Ellipse2D.Double();
setCenter();
setOpaque(false);
setVisible(true);
}
public boolean contains(int dx, int dy)
{
return (ellipse.contains(dx,dy));
}
public int getIndex() { return curIndex;}
public void setIndex(int ndx)
{
int diff = HardwareGraphic.mod((ndx - curIndex), routerGraphic.numPorts);
if (diff > 0)
{
//ExpCoordinator.printer.print(new String("SpinnerButton.setIndex from " + curIndex + " to " + ndx), 10);
if ((HardwareGraphic.mod((curIndex + diff), routerGraphic.numPorts)) == ndx) routerGraphic.spinClockwise(diff);
else
{
routerGraphic.spinCClockwise(diff);
}
if (curIndex != ndx)
{
curIndex = ndx;
setCenter();
}
routerGraphic.revalidate();
routerGraphic.repaint();
}
}
public void setCenter()
{
if ((curIndex < routerGraphic.numPorts) && (curIndex >= 0))
{
//ExpCoordinator.printer.print("SpinnerButton::setCenter " + curIndex);
Point2D.Double cen_pnt = routerGraphic.getPortArea(curIndex).getConnectorPoint();
//Point2D.Double cen_pnt = (Point2D.Double)routerGraphic.getPortButton(curIndex).getCenter();
Point2D tmp_pnt = new Point2D.Double();
tmp_pnt.setLocation((cen_pnt.getX() - SRADIUS), (cen_pnt.getY() - SRADIUS));
ellipse.setFrameFromCenter(cen_pnt,tmp_pnt);
}
}
public boolean isSelected() { return selected;}
public void setSelected(boolean b)
{
if (selected != b)
{
selected = b;
resetColor();
}
}
public void resetColor()
{
if (selected) setBackground(routerGraphic.convertColor(Color.red));
else setBackground(routerGraphic.convertColor(Color.pink));
setCenter();
}
public void paintComponent(Graphics g)
{
super.paintComponent(g);
//ExpCoordinator.printer.print("SpinnerButton::paintComponent");
//setCenter();
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
g2.setColor(getBackground());
g2.fill(ellipse);
g2.setColor(routerGraphic.getForeground());
g2.draw(ellipse);
g2.setColor(oldColor);
}
}// inner class Spinner
private class SpinnerListener extends TopButtonListener //MouseInputAdapter
{
private HardwareGraphic routerGraphic = null;
private boolean selected = false;
private Point2D.Double startOfDrag = null;
private SpinnerButton spinner = null;
public SpinnerListener(HardwareGraphic nsp_g)
{
super();
routerGraphic = nsp_g;
spinner = nsp_g.spinner;
}
public void mousePressed(java.awt.event.MouseEvent e)
{
//ExpCoordinator.print(new String("HardwareGraphic.SpinnerListener.mousePressed point=" + e.getPoint() + " spinner=" + spinner.getLocation()), TEST_HWGRAPHIC);
if (spinner.contains(e.getPoint()))
{
selected = true;
startOfDrag = new Point2D.Double(e.getX(), e.getY());
spinner.setSelected(true);
routerGraphic.revalidate();
routerGraphic.repaint();
//spinner.repaint();
}
else super.mousePressed(e);
}
public void mouseReleased(java.awt.event.MouseEvent e)
{
if (selected)
{
selected = false;
startOfDrag = null;
//ExpCoordinator.printer.print("SpinnerReleased");
spinner.setSelected(false);
routerGraphic.revalidate();
routerGraphic.repaint();
//routerGraphic.printPorts();
}
else super.mouseReleased(e);
}
public void mouseDragged(java.awt.event.MouseEvent e)
{
//if not in this component do nothing
if (selected)
{
//convert x,y coords into double
double dx = (double)e.getX();
double dy = (double)e.getY();
//use for spinner
//if spinner moves to a new section clockwise or counter clockwise update picture
int spin_ndx = spinner.getIndex();
int tmp_ndx = spin_ndx;
for (int i = 0; i < numPorts; ++i)
{
if (routerGraphic.getPortArea(i).containsSpinner(dx,dy))
{
tmp_ndx = i;
if (tmp_ndx != spin_ndx) ExpCoordinator.print(new String("HardwareGraphic.SpinnerListener.mouseDragged index change from " + spin_ndx + " to " + tmp_ndx));
//break;
}
}
spinner.setIndex(tmp_ndx); //should set spinners new location
startOfDrag.setLocation(dx, dy);
routerGraphic.revalidate();
routerGraphic.repaint();
}
else super.mouseDragged(e);
}
} //inner class SpinnerListener
public static class MainButton extends JComponent implements HWButton, ONLComponentButton //Ellipse2D.Double
{
private HardwareGraphic routerGraphic = null;
private String mlabel;
private int labelOffsetX = -1;
private int labelOffsetY = -1;
private double labelX = 0;
private double labelY = 0;
private Ellipse2D.Double ellipse = null;
private boolean pressed = false;
private Point2D.Double center = null;
public MainButton(HardwareGraphic nspg, String lbl)
{
super();
setDoubleBuffered(true);
//ExpCoordinator.printer.print("MainButton::MainButton for " + lbl);
routerGraphic = nspg;
mlabel = lbl;
ellipse = new Ellipse2D.Double();
center = new Point2D.Double();
setOpaque(false);
setVisible(true);
setFont(new Font("Dialog", Font.PLAIN, 8));
addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setPressed(true);
}
public void mouseReleased(MouseEvent e)
{
setPressed(false);
}
});
}
public void setSize(double d)
{
this.setSize((int)d,(int)d);
Point p = getLocation();
ellipse.setFrame(p.getX(), p.getY(), d, d);
center.setLocation(d/2, d/2);
labelX = ellipse.getCenterX() - 9;
labelY = ellipse.getCenterY() + 2;
}
public void paintComponent(Graphics g)
{
super.paintComponent(g);
//ExpCoordinator.printer.print("MainButton::paintComponent");
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
Font oldFont = g2.getFont();
g2.setFont(routerGraphic.getFont());
//if (pressed)
//{
// g2.setColor(routerGraphic.getBackground().darker());
// g2.fill(ellipse);
//}
g2.setColor(routerGraphic.getForeground());
g2.draw(ellipse);
//drawLabel(g2);
g2.setColor(oldColor);
g2.setFont(oldFont);
}
public void drawLabel(Graphics2D g2)
{
//place label of switch id in middle of button
float fsize = 8;
g2.setFont(getFont());
g2.setColor(routerGraphic.getForeground());
g2.drawString(mlabel, (int)labelX, (int)labelY);
}
public Point2D getCenter()
{
//Point2D.Double rtn = new Point2D.Double( ellipse.getCenterX(), ellipse.getCenterY());
return center;
}
//interface HWButton
public boolean isPortButton() { return false;}
public boolean isMainButton() { return true;}
public void setPressed(boolean b)
{
if (pressed != b)
{
pressed = b;
routerGraphic.repaint();
}
}
public boolean isPressed() { return pressed;}
//end interface HWButton
public ONLComponent getONLComponent() { return routerGraphic.getONLComponent();}
}//inner class MainButton
public static class PortButton extends ONLGraphic implements HWButton //Arc2D.Double
{
private static final double CONNRADIUS = 3; //radius of port connector graphic
private String plabel;
private Ellipse2D.Double pconnector;
private boolean pressed = false;
private HardwareGraphic routerGraphic = null;
private PArea parea = null;
private int index;
private int numPorts;
private int portID = 0;
private int labelX = 0;
private int labelY = 0;
protected static class PArea extends Area
{
private int index;
private int numPorts;
private Arc2D.Double arc = null;
private Area area = null;
private Point2D.Double ppPoint = null;
private Point2D.Double cpPoint = null;
private Point2D.Double lblPoint = null;
private double labelTheta = 0; //angle needed to draw points
private HardwareGraphic routerGraphic = null;
public PArea(HardwareGraphic routerg, int ndx)
{
super();
index = ndx;
numPorts = routerg.numPorts;
arc = new Arc2D.Double(Arc2D.PIE);
numPorts = routerg.numPorts;
routerGraphic = routerg;
cpPoint = new Point2D.Double();
ppPoint = new Point2D.Double();
lblPoint = new Point2D.Double();
double ext = 360/numPorts;
arc.setAngleExtent(ext);
double ang_strt = 90 - (index * ext);// + 90;
arc.setAngleStart(ang_strt);
double labelDegrees = ang_strt + (ext/2);
labelTheta = Math.toRadians(labelDegrees);
}
public void setPoints(double x, double y, double rad)
{
//ExpCoordinator.printer.print("PortButton " + index + "::SetPoints center = (" + x + ", " + y + ") r = " + rad);
double tmp_x =(Math.cos(labelTheta)*rad) + x;
double tmp_y = y - (Math.sin(labelTheta)*rad);
ppPoint.setLocation(tmp_x, tmp_y);
//ExpCoordinator.printer.print(" ppPoint = (" + tmp_x + ", " + tmp_y);
double tmp_r = rad * MBUTTON_FRACTION;
tmp_x = (Math.cos(labelTheta)*tmp_r) + x;
tmp_y = y - (Math.sin(labelTheta)*tmp_r);
cpPoint.setLocation(tmp_x, tmp_y);
//ExpCoordinator.printer.print(" cpPoint = (" + tmp_x + ", " + tmp_y);
tmp_r = (rad * (MBUTTON_FRACTION + 1))/2;
tmp_x = (Math.cos(labelTheta)*tmp_r) + x;
tmp_y = y - (Math.sin(labelTheta)*tmp_r);
lblPoint.setLocation(tmp_x, tmp_y);
//ExpCoordinator.printer.print(" lblPoint = (" + tmp_x + ", " + tmp_y);
}
public void setSize(double d, Point loc)
{
Point p = loc;
ExpCoordinator.print(new String("PortButton.PArea setSize " + d + " location:(" + p.getX() + ", " + p.getY() + ") index = " + index), TEST_HWGRAPHIC);
//arc.setFrame((p.getX() + HardwareGraphic.OFFSET), (p.getY() + HardwareGraphic.OFFSET), d, d);
arc.setFrame(p.getX(), p.getY(), d, d);
reset();
add(new Area(arc));
subtract(routerGraphic.getMainButtonArea());
setPoints(arc.getCenterX(), arc.getCenterY(), (d/2));
}
public Point2D getLinkPoint()
{
Point loc = routerGraphic.getLocation();
Point tmp_p = new Point((int)ppPoint.getX(), (int)ppPoint.getY());
tmp_p.translate((int)loc.getX(), ((int)loc.getY() + D_OFFSET));
//ExpCoordinator.printer.print(" link point = ( " + tmp_p.getX() + ", " + tmp_p.getY() + ")");
return tmp_p;
}
protected boolean containsSpinner(double x, double y)
{
//boolean rtn = outerArc.contains(x,y);
Rectangle2D.Double rect = new Rectangle2D.Double();
Point2D endPoint = arc.getEndPoint();
Point2D strPoint = arc.getStartPoint();
rect.setFrameFromDiagonal(strPoint, endPoint);
if (rect.getHeight() < 1) rect.setRect(rect.getX(), (rect.getY()-SpinnerButton.SRADIUS), rect.getWidth(), (rect.getHeight() + (2*SpinnerButton.SRADIUS)));
if (rect.getWidth() < 1) rect.setRect((rect.getX()-SpinnerButton.SRADIUS), rect.getY(), (rect.getWidth() + (2*SpinnerButton.SRADIUS)), rect.getHeight());
boolean rtn = rect.contains(x,y);
if (index == 3)
ExpCoordinator.print(new String("HardwareGraphic.PortButton.PArea(" + index + ").containsSpinner (" + x + ", " + y + ")" + " start=" + strPoint + " end=" + endPoint + " rtn=" + rtn), TEST_HWGRAPHIC);
/*if (rtn)
{
ExpCoordinator.printer.print("PortButton (index,port) (" + index + ", " + port + ")::containsSpinner (" + x + ", " + y + ") " + rtn);
//ExpCoordinator.printer.print(" intersecting rect (" + tmp_x + ", " + tmp_y + ", " + tmp_w + ", " + tmp_h +")");
ExpCoordinator.printer.print(" with rect (" + rect.getX() + ", " + rect.getY() + ", " + rect.getWidth() + ", " + rect.getHeight() +")");
ExpCoordinator.printer.print(" ppPoint = (" + ppPoint.getX() + ", " + ppPoint.getY());
ExpCoordinator.printer.print(" cpPoint = (" + cpPoint.getX() + ", " + cpPoint.getY());
ExpCoordinator.printer.print(" lblPoint = (" + lblPoint.getX() + ", " + lblPoint.getY());
ExpCoordinator.printer.print(" startPoint = (" + strPoint.getX() + ", " + strPoint.getY());
ExpCoordinator.printer.print(" endPoint = (" + endPoint.getX() + ", " + endPoint.getY());
}*/
return rtn;
}
public Point2D.Double getConnectorPoint() { return (ppPoint);}
}
//end PortButton.PArea inner class
public PortButton(HardwareGraphic nspg, Hardware.Port p, int ndx)
{
super(p);
setDoubleBuffered(true);
portID = p.getID();
plabel = String.valueOf(portID);
numPorts = nspg.numPorts;
routerGraphic = nspg;
index = ndx;
//parea = routerGraphic.getPortArea(index);
pconnector = new Ellipse2D.Double();
//ExpCoordinator.printer.print("PortButton::PortButton " + ndx + " angleStart:" + ang_strt + " ext:" + ext + " labelDegrees:" + labelDegrees);
setOpaque(false);
setVisible(true);
addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
if (!routerGraphic.spinner.contains(e.getPoint())) setPressed(true);
}
public void mouseReleased(MouseEvent e)
{
setPressed(false);
}
});
}
public Point2D.Double getConnectorPoint() { return (parea.ppPoint);}
public double getConnectorTheta() { return (parea.labelTheta);}
public Point2D getLinkPoint() { return (parea.getLinkPoint());}
public boolean contains(int x, int y) { return (parea.contains(x,y));}
public void incrementIndex(int i)
{
setIndex(HardwareGraphic.mod((index + i), numPorts));
}
public void decrementIndex(int i)
{
setIndex(HardwareGraphic.mod((index - i), numPorts));
}
public Hardware.Port getPort() { return ((Hardware.Port)getONLComponent());}
protected int getIndex() { return index;}
protected void setIndex(int ndx)
{
//if (index != ndx || parea == null)
// {
index = ndx;
setPArea();
}
public void setPArea()
{
parea = routerGraphic.getPortArea(index);
labelX = (int)(parea.lblPoint.getX() - 3);
labelY = (int)(parea.lblPoint.getY() + 7);
Point2D cen_pnt = parea.ppPoint;
Point2D tmp_pnt = new Point2D.Double();
if (portID == 0) cen_pnt = parea.cpPoint;
tmp_pnt.setLocation((cen_pnt.getX() - CONNRADIUS), (cen_pnt.getY() - CONNRADIUS));
pconnector.setFrameFromCenter(cen_pnt,tmp_pnt);
}
public void paintComponent(Graphics g)
{
super.paintComponent(g);
if (parea == null) setPArea();
//ExpCoordinator.printer.print("PortButton::paintComponent index " + index);
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
Font oldFont = g2.getFont();
g2.setFont(routerGraphic.getFont());
if (pressed)
{
g2.setColor(routerGraphic.getConvertedBG().darker());
g2.fill(parea);
}
g2.setColor(routerGraphic.getForeground());
g2.draw(parea);
drawLabel(g2);
drawPortConnector(g2);
if (portID == 0) routerGraphic.spinner.paintComponent(g);
g2.setColor(oldColor);
g2.setFont(oldFont);
}
public void drawLabel(Graphics2D g2)
{
g2.setColor(routerGraphic.getForeground());
g2.drawString(plabel, labelX, labelY);
}
public void drawPortConnector(Graphics2D g2)
{
g2.setColor(Color.black);//routerGraphic.getForeground());
g2.fill(pconnector);
}
public Point2D getCenter() { return (parea.lblPoint);}
//interface HWButton
public boolean isPortButton() { return true;}
public boolean isMainButton() { return false;}
public void setPressed(boolean b)
{
if (pressed != b)
{
//ExpCoordinator.printer.print("PortButton (index,port) (" + index + ", " + port + ")::setPressed");
pressed = b;
this.repaint();
}
}
public boolean isPressed() { return pressed;}
//end interface HWButton
public void addDragListener(MouseInputListener dListener){}
public void addComponentListener(ComponentListener cl)
{
super.addComponentListener(cl);
routerGraphic.addComponentListener(cl);
}
public void removeComponentListener(ComponentListener cl)
{
super.removeComponentListener(cl);
routerGraphic.removeComponentListener(cl);
}
private PArea getPArea()
{
return (parea);
}
protected void setPArea(PArea p) { parea = p;}
public int getScreenX() {
//ExpCoordinator.printer.print("PortButton::getScreenX " + routerGraphic.getLocation().getX());
return ((int)routerGraphic.getLocation().getX());}
public int getScreenY() { return ((int)routerGraphic.getLocation().getY());}
}
//inner class PortButton
public HardwareGraphic(Hardware sd, Color bcolor, double d)
{
this(sd, bcolor);
setSize(d);
}
public HardwareGraphic(Hardware sd, Color bcolor)
{
super(sd);
setDoubleBuffered(true);
numPorts = sd.getNumPorts();
ExpCoordinator.print(new String("HardwareGraphic::HardwareGraphic numPorts = " + numPorts), 2);
mButtonArea = new Area();
setForeground(Color.black);
setBackground(bcolor);
portButtons = new PortButton[numPorts];
portAreas = new PortButton.PArea[numPorts];
mainButton = new MainButton(this, onlComponent.getLabel());
int i = 0;
mainButton.addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setSelected();
}
});
mButtonListener = new TopButtonListener();
mButtonListener.setEnabled(true);
//mainButton.addMouseListener(getActivateListener());
mainButton.addMouseListener(mButtonListener);
mainButton.addMouseMotionListener(mButtonListener);
Hardware router = (Hardware)getONLComponent();
for (i = 0; i < numPorts; ++i)
{
portAreas[i] = new PortButton.PArea(this, i);
}
PortButton tmp_pb = null;
for (i = 0; i < numPorts; ++i)
{
ExpCoordinator.print(("HardwareGraphic.port " + i), 5);
tmp_pb = new PortButton(this, router.getPort(i), i);
//tmp_pb.setLocation(OFFSET, OFFSET);
//tmp_pb.addMouseListener(pButtonListener);
//tmp_pb.addMouseMotionListener(pButtonListener);
//add(tmp_pb);
portButtons[i] = tmp_pb;
}
//printPorts();
spinner = new SpinnerButton(this);
add(spinner,0);//add at the second position so it will be a top level component. i.e. the user can get to it
pButtonListener = new SpinnerListener(this);//TopButtonListener();
pButtonListener.setEnabled(true);
//SpinnerListener sl = new SpinnerListener(this);
//spinner.addMouseListener(sl);
//spinner.addMouseMotionListener(sl);
//addMouseListener(sl);
//addMouseMotionListener(sl);
for (i = 0; i < numPorts; ++i)
{
tmp_pb = portButtons[i];
//tmp_pb.addMouseListener(sl);
//tmp_pb.addMouseMotionListener(sl);
tmp_pb.addMouseListener(pButtonListener);
tmp_pb.addMouseMotionListener(pButtonListener);
add(tmp_pb);
}
add(mainButton,1);
borderEllipse = new Ellipse2D.Double();
setOpaque(false);
setFont(new Font("Dialog", Font.PLAIN, 11));
componentLabel = new ComponentLabel(sd.getLabel(),router);
componentLabel.addMouseListener(mButtonListener);
componentLabel.addMouseMotionListener(mButtonListener);
componentLabel.addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setSelected();
}
});
add(componentLabel);
userLabel = new ComponentLabel(sd.getUserLabel(),router);
userLabel.addMouseListener(mButtonListener);
userLabel.addMouseMotionListener(mButtonListener);
userLabel.setFont(new Font("Dialog", Font.BOLD, 10));
userLabel.addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setSelected();
}
});
add(userLabel);
repaint();
}
public void setSize(double d)
{
setSize((int)d, (int)d);
}
public void setSize(int w, int h)
{
originalW = w;
originalH = h;
int h2 = h - (2*D_OFFSET);
int d = w - D_OFFSET;
if (h2 < w) d = h2;//h2;//h - (2*D_OFFSET);
borderEllipse.setFrame(0, D_OFFSET, d, d);
int d3 = (int)(d/2 *(1 - MBUTTON_FRACTION));
userLabel.setLocation(0, 0);
//userLabel.setHorizontalAlignment(SwingConstants.LEFT);
int ulbl_w = userLabel.getNumChars() * 9;
int lbl_w = componentLabel.getNumChars() * 9;
if (ulbl_w > lbl_w) lbl_w = ulbl_w;
if (lbl_w > w)
{
super.setSize(lbl_w,h);
//float g_loc = (lbl_w - w)/8;
//ExpCoordinator.print("HostGraphic.setSize(" + w + "," + h + ") lbl_w:" + lbl_w + " g_loc:" + g_loc);
userLabel.setSize(lbl_w,D_OFFSET);
componentLabel.setSize(lbl_w,D_OFFSET);
}
else
{
super.setSize(w,h);
userLabel.setSize(d,D_OFFSET);
componentLabel.setSize(d,D_OFFSET);
}
ExpCoordinator.print(new String("HardwareGraphic::setSize " + w + " " + h +" d=" + d + " d3=" + d3), TEST_HWGRAPHIC);
//mainButton.setLocation((d3 + OFFSET), (d3 + OFFSET));
mainButton.setLocation(d3, (d3 + D_OFFSET));
mainButton.setSize(MBUTTON_FRACTION*d);
mButtonArea.reset();
Ellipse2D.Double mb_ellipse = new Ellipse2D.Double(mainButton.ellipse.getX(), (mainButton.ellipse.getY()-D_OFFSET), mainButton.ellipse.getWidth(), mainButton.ellipse.getHeight());
mButtonArea.add(new Area(mb_ellipse));//mainButton.ellipse));
PortButton elem = null;
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
elem.setLocation(0, D_OFFSET);
Point elem_loc = new Point(0,0);//elem.getX(), (elem.getY() + D_OFFSET));
//Point elem_loc = elem.getLocation();
//elem.setLocation(elem_loc);
portAreas[i].setSize(d, elem_loc);
elem.setSize(d, d);
elem.setIndex(i);
}
spinner.setCenter();
spinner.revalidate();
componentLabel.setLocation(0, (d + D_OFFSET));
//componentLabel.setHorizontalAlignment(SwingConstants.LEFT);
revalidate();
repaint();
}
public void setUserLabel(String s)
{
userLabel.setText(s);
setSize(originalW, originalH);
}
/*original setSize without adjustment for label size*/
/*
public void setSize(int w, int h)
{
super.setSize(w,h);
int h2 = h - (2*D_OFFSET);
int d = w - D_OFFSET;
if (h2 < w) d = h2;//h2;//h - (2*D_OFFSET);
borderEllipse.setFrame(0, D_OFFSET, d, d);
int d3 = (int)(d/2 *(1 - MBUTTON_FRACTION));
userLabel.setLocation(0, 0);
userLabel.setSize(d,D_OFFSET);
ExpCoordinator.print(new String("HardwareGraphic::setSize " + w + " " + h +" d=" + d + " d3=" + d3), TEST_HWGRAPHIC);
//mainButton.setLocation((d3 + OFFSET), (d3 + OFFSET));
mainButton.setLocation(d3, (d3 + D_OFFSET));
mainButton.setSize(MBUTTON_FRACTION*d);
mButtonArea.reset();
Ellipse2D.Double mb_ellipse = new Ellipse2D.Double(mainButton.ellipse.getX(), (mainButton.ellipse.getY()-D_OFFSET), mainButton.ellipse.getWidth(), mainButton.ellipse.getHeight());
mButtonArea.add(new Area(mb_ellipse));//mainButton.ellipse));
PortButton elem = null;
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
elem.setLocation(0, D_OFFSET);
Point elem_loc = new Point(0,0);//elem.getX(), (elem.getY() + D_OFFSET));
//Point elem_loc = elem.getLocation();
//elem.setLocation(elem_loc);
portAreas[i].setSize(d, elem_loc);
elem.setSize(d, d);
elem.setIndex(i);
}
spinner.setCenter();
spinner.revalidate();
componentLabel.setLocation(0, (d + D_OFFSET));
componentLabel.setSize(d,D_OFFSET);
revalidate();
repaint();
}*/
private HardwareGraphic.PortButton.PArea getPortArea(int i)
{
return (portAreas[i]);
}
public HardwareGraphic.PortButton getPortButton(int i)
{
return (portButtons[i]);
}
protected PortButton getPortButton(Hardware.Port p)
{
//ExpCoordinator.print(new String("HardwareGraphic.getPortButton " + p.getID()), 2);
return (portButtons[p.getID()]);}
public void setSize(Dimension dim)
{
setSize((int)dim.getWidth(), (int)dim.getHeight());
}
public boolean contains(int x, int y)
{
return (borderEllipse.contains(x, y));
}
public void spinCClockwise(int c)
{
PortButton elem = null;
//ExpCoordinator.printer.print(" spinCCLock " + c);
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
//elem.incrementIndex(c);
elem.decrementIndex(c);
}
revalidate();
}
public void spinClockwise(int c)
{
PortButton elem = null;
//ExpCoordinator.printer.print(" spinCLock " + c);
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
//elem.decrementIndex(c);
elem.incrementIndex(c);
}
revalidate();
}
public void paintComponent(Graphics g)
{
//ExpCoordinator.printer.print("HardwareGraphic::paintComponent", 2);
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
Font oldFont = g2.getFont();
g2.setFont(getFont());
//if (graphicSelected) g2.setColor(getBackground().darker());
//else g2.setColor(getBackgound());
g2.setColor(getConvertedBG());
g2.fill(borderEllipse);
g2.setColor(getForeground());
g2.draw(borderEllipse);
g2.setColor(oldColor);
g2.setFont(oldFont);
mainButton.paintComponent(g);
super.paintComponent(g);
}
public void addPortListener(ONLGraphic.ButtonListener l)
{
pButtonListener.addAction(l);
}
public void removePortListener(ONLGraphic.ButtonListener l)
{
pButtonListener.removeAction(l);
}
public void addNodeListener(ONLGraphic.ButtonListener l)
{
//ExpCoordinator.print(new String("HardwareGraphic(" + onlComponent.getLabel() + ").addNodeListener " + l.toString()), ExpCompare.TEST_CMP);
mButtonListener.addAction(l);
}
public void removeNodeListener(ONLGraphic.ButtonListener l) { mButtonListener.removeAction(l);}
protected Area getMainButtonArea() { return (mButtonArea);}
public boolean isSpinning() { return spinner.isSelected();}
public int getSpinnerPosition() { return (spinner.getIndex());}
public void setSpinnerPosition(int i) { spinner.setIndex(i);}
public static int mod(int x, int m)
{
if (x < m)
{
if (x >= 0) return x;
else
{
return (mod((x+m), m));
}
}
else
{
if (x == 0 && m == 0) return 0;
else return (mod((x-m),m));
}
}
public void printPorts()
{
for (int i = 0; i < numPorts; ++i)
{
ExpCoordinator.printer.print("PortButton (index,port) (" + portButtons[i].getIndex() + ", " + portButtons[i].getPort() + ")");
}
}
public void revalidate()
{
super.revalidate();
if ((mainButton != null) &&
(portButtons != null) &&
(spinner != null))
{
for (int i = 0; i < numPorts; ++i)
{
portButtons[i].revalidate();
}
mainButton.revalidate();
spinner.revalidate();
}
if (componentLabel != null) componentLabel.revalidate();
}
public void setSelected()
{
if (graphicSelected) graphicSelected = false;
else graphicSelected = true;
//if (onlComponent != null && onlComponent instanceof VirtualTopology.VNode)
//{
//ONLComponent real_component = ((VirtualTopology.VNode)onlComponent).getPhysicalComponent();
//if (real_component != null) real_component.setSelected(graphicSelected);
//}
}
public void addComponentListener(ComponentListener cl)
{
super.addComponentListener(cl);
spinner.addComponentListener(cl);
}
public void addDragListener(ONLGraphic.ButtonListener dListener)
{
mainButton.addMouseListener(dListener);
mainButton.addMouseMotionListener(dListener);
componentLabel.addMouseListener(dListener);
componentLabel.addMouseMotionListener(dListener);
//addNodeListener(dListener);
}
public void removeDragListener(ONLGraphic.ButtonListener dListener)
{
mainButton.removeMouseListener(dListener);
mainButton.removeMouseMotionListener(dListener);
componentLabel.removeMouseListener(dListener);
componentLabel.removeMouseMotionListener(dListener);
//removeNodeListener(dListener);
}
protected void setStateChange(String st) //override from ONLGraphic to disable clicks and change colors
{
boolean b = (st.equals(ONLComponent.ACTIVE) ||
st.equals(ONLComponent.WAITING) ||
st.equals(ONLComponent.IN1) ||
st.equals(ONLComponent.INBOTH) ||
st.equals(ONLComponent.IN2));
//ExpCoordinator.printer.print("HardwareGraphic::setStateChange " + st + " " + b);
mButtonListener.setEnabled(b);
pButtonListener.setEnabled(b);
spinner.resetColor();
}
//public Color getBackground() { return (getConvertedBG());}
public void writeXML(XMLStreamWriter xmlWrtr) throws XMLStreamException
{
super.writeXML(xmlWrtr);
xmlWrtr.writeStartElement(ExperimentXML.SPINNER);
xmlWrtr.writeCharacters(String.valueOf(getSpinnerPosition()));
xmlWrtr.writeEndElement();
}
public void propertyChange(PropertyChangeEvent e)
{
if (e.getSource() == onlComponent)
{
if (e.getPropertyName().equals(ExperimentXML.USER_LABEL))
{
userLabel.setText((String)e.getNewValue());
//setSize(getWidth(), getHeight());
}
super.propertyChange(e);
revalidate();
}
}
}
|
apache-2.0
|
syphr42/libmythtv-java
|
db/src/main/java/org/syphr/mythtv/db/schema/impl/_0_24/RecordedProgramId1264.java
|
3214
|
/*
* Copyright 2011-2012 Gregory P. Moyer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.syphr.mythtv.db.schema.impl._0_24;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import org.syphr.mythtv.db.schema.RecordedProgramId;
@Embeddable
public class RecordedProgramId1264 implements RecordedProgramId
{
/**
* Serialization ID
*/
private static final long serialVersionUID = 1L;
@Column(name = "chanid", nullable = false)
private int chanid;
@Column(name = "starttime", nullable = false, length = 19)
private Date starttime;
@Column(name = "manualid", nullable = false)
private int manualid;
@Override
public int getChanid()
{
return this.chanid;
}
@Override
public void setChanid(int chanid)
{
this.chanid = chanid;
}
@Override
public Date getStarttime()
{
return this.starttime;
}
@Override
public void setStarttime(Date starttime)
{
this.starttime = starttime;
}
@Override
public int getManualid()
{
return this.manualid;
}
@Override
public void setManualid(int manualid)
{
this.manualid = manualid;
}
@Override
public boolean equals(Object other)
{
if ((this == other))
{
return true;
}
if ((other == null))
{
return false;
}
if (!(other instanceof RecordedProgramId1264))
{
return false;
}
RecordedProgramId castOther = (RecordedProgramId)other;
return (this.getChanid() == castOther.getChanid())
&& ((this.getStarttime() == castOther.getStarttime()) || (this.getStarttime() != null
&& castOther.getStarttime() != null && this.getStarttime().equals(castOther.getStarttime())))
&& (this.getManualid() == castOther.getManualid());
}
@Override
public int hashCode()
{
int result = 17;
result = 37 * result + this.getChanid();
result = 37 * result + (getStarttime() == null ? 0 : this.getStarttime().hashCode());
result = 37 * result + this.getManualid();
return result;
}
@Override
public String toString()
{
StringBuilder builder = new StringBuilder();
builder.append("RecordedProgramId1264 [chanid=");
builder.append(chanid);
builder.append(", starttime=");
builder.append(starttime);
builder.append(", manualid=");
builder.append(manualid);
builder.append("]");
return builder.toString();
}
}
|
apache-2.0
|
OpenSourceConsulting/athena-meerkat
|
console/app/controller/footerController.js
|
1951
|
/*
* File: app/controller/footerController.js
*/
Ext.define('webapp.controller.footerController', {
extend: 'Ext.app.Controller',
refs: {
footerLabel2: '#footerLabel2'
},
onLaunch: function() {
/**
* address Label click event를 catch 하도록 설정
*/
this.getFooterLabel2().getEl().on('click', function() {
var mapwin;
// create the window on the first click and reuse on subsequent clicks
if(mapwin) {
mapwin.show();
} else {
mapwin = Ext.create('Ext.window.Window', {
autoShow: true,
layout: 'fit',
title: 'OSCI Location',
closeAction: 'hide',
width:600,
height:500,
border: true,
x: 40,
y: 60,
items: {
xtype: 'gmappanel',
center: {
geoCodeAddr: '서울특별시 서초구 서초2동 1337'
},
markers: [{
lat: 37.492359,
lng: 127.028590,
title: 'Gangnam Mirae Tower 805, Saimdang-ro 174(Seocho-dong), Seocho-gu, Seoul, Korea',
listeners: {
click: function(e){
Ext.Msg.alert('Address', 'Gangnam Mirae Tower 805, Saimdang-ro 174(Seocho-dong), Seocho-gu, Seoul, Korea');
}
}
}]
}
});
}
});
// Add below script to index.html manually
// <script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"></script>
}
});
|
apache-2.0
|
blackcathacker/kc.preclean
|
coeus-code/src/test/java/org/kuali/kra/irb/actions/notification/ProtocolNotificationTemplateRuleTest.java
|
4135
|
/*
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl1.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.kra.irb.actions.notification;
import org.apache.struts.upload.FormFile;
import org.jmock.Expectations;
import org.jmock.Mockery;
import org.jmock.integration.junit4.JUnit4Mockery;
import org.jmock.lib.concurrent.Synchroniser;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.kuali.kra.infrastructure.Constants;
import org.kuali.rice.krad.util.GlobalVariables;
import org.kuali.rice.krad.util.MessageMap;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class ProtocolNotificationTemplateRuleTest {
Mockery context = new JUnit4Mockery() {{ setThreadingPolicy(new Synchroniser()); }};
FormFile mockedFile = null;
@Before
public void setUp() throws Exception {
mockedFile = this.context.mock(FormFile.class);
// Clear any error messages that may have been created in prior tests.
MessageMap messageMap = GlobalVariables.getMessageMap();
messageMap.clearErrorMessages();
}
@After
public void tearDown() throws Exception {
mockedFile = null;
}
/**
*
* This test simulates a correspondence template being added whose committee is not specified.
*
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Test
public void testReplaceNotificationTemplateOK() throws Exception {
simulateValidMockedFileBehavior(Constants.CORRESPONDENCE_TEMPLATE_CONTENT_TYPE_1);
ProtocolNotificationTemplate template = new ProtocolNotificationTemplate();
template.setActionTypeCode("116");
template.setFileName("notifyirb.xsl");
template.setNotificationTemplate(new byte[] { (byte) 1, (byte) 2, (byte) 3 });
template.setTemplateFile(mockedFile);
int index = 2;
boolean rulePassed = new ProtocolNotificationTemplateRule()
.processReplaceProtocolNotificationTemplateRules(template, index);
assertTrue(rulePassed);
/*
* There should be no errors.
*/
MessageMap messageMap = GlobalVariables.getMessageMap();
assertEquals(0, messageMap.getErrorCount());
}
@SuppressWarnings("unchecked")
@Test
public void testReplaceNotificationTemplateNotOK() throws Exception {
simulateValidMockedFileBehavior("pdf");
ProtocolNotificationTemplate template = new ProtocolNotificationTemplate();
template.setActionTypeCode("116");
template.setFileName("test.pdf");
template.setNotificationTemplate(new byte[] {});
template.setTemplateFile(mockedFile);
int index = 2;
boolean rulePassed = new ProtocolNotificationTemplateRule()
.processReplaceProtocolNotificationTemplateRules(template, index);
Assert.assertFalse(rulePassed);
/*
* There should be no errors.
*/
MessageMap messageMap = GlobalVariables.getMessageMap();
assertEquals(1, messageMap.getErrorCount());
}
private void simulateValidMockedFileBehavior(final String contentType) throws IOException {
this.context.checking(new Expectations() {
{
allowing(mockedFile).getContentType();
will(returnValue(contentType));
allowing(mockedFile).getFileData();
will(returnValue(new byte[] { (byte) 1, (byte) 2, (byte) 3 }));
}
});
}
}
|
apache-2.0
|
blackcathacker/kc.preclean
|
coeus-code/src/main/java/org/kuali/coeus/common/budget/framework/core/BudgetAction.java
|
38863
|
/*
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.osedu.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.coeus.common.budget.framework.core;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import org.kuali.coeus.common.budget.framework.personnel.*;
import org.kuali.coeus.common.framework.rolodex.PersonRolodex;
import org.kuali.coeus.common.framework.version.VersionStatus;
import org.kuali.coeus.common.framework.version.history.VersionHistory;
import org.kuali.coeus.common.framework.version.history.VersionHistoryService;
import org.kuali.coeus.propdev.impl.budget.ProposalBudgetStatusService;
import org.kuali.coeus.propdev.impl.core.ProposalDevelopmentDocument;
import org.kuali.coeus.sys.framework.controller.StrutsConfirmation;
import org.kuali.coeus.sys.framework.service.KcServiceLocator;
import org.kuali.coeus.sys.framework.workflow.KcDocumentRejectionService;
import org.kuali.kra.award.document.AwardDocument;
import org.kuali.kra.award.home.Award;
import org.kuali.kra.award.home.ContactRole;
import org.kuali.coeus.common.budget.framework.calculator.BudgetCalculationService;
import org.kuali.coeus.common.budget.framework.distribution.BudgetDistributionService;
import org.kuali.coeus.common.budget.framework.core.category.BudgetCategoryTypeValuesFinder;
import org.kuali.coeus.common.budget.framework.nonpersonnel.BudgetLineItem;
import org.kuali.coeus.common.budget.framework.nonpersonnel.BudgetLineItemCalculatedAmount;
import org.kuali.coeus.common.budget.framework.period.BudgetPeriod;
import org.kuali.coeus.common.budget.framework.rate.BudgetRatesService;
import org.kuali.coeus.common.budget.framework.lock.BudgetLockService;
import org.kuali.coeus.common.budget.framework.summary.BudgetSummaryService;
import org.kuali.coeus.common.budget.framework.version.BudgetDocumentVersion;
import org.kuali.coeus.common.budget.framework.version.BudgetVersionOverview;
import org.kuali.kra.infrastructure.Constants;
import org.kuali.kra.infrastructure.KeyConstants;
import org.kuali.coeus.common.framework.print.AttachmentDataSource;
import org.kuali.coeus.propdev.impl.core.DevelopmentProposal;
import org.kuali.coeus.propdev.impl.budget.modular.BudgetModularService;
import org.kuali.coeus.common.budget.framework.print.BudgetPrintService;
import org.kuali.coeus.propdev.impl.budget.subaward.PropDevBudgetSubAwardService;
import org.kuali.coeus.propdev.impl.hierarchy.ProposalHierarcyActionHelper;
import org.kuali.rice.core.api.util.KeyValue;
import org.kuali.rice.coreservice.framework.parameter.ParameterService;
import org.kuali.rice.kew.api.KewApiConstants;
import org.kuali.rice.kew.api.exception.WorkflowException;
import org.kuali.rice.kns.authorization.AuthorizationConstants;
import org.kuali.rice.kns.datadictionary.HeaderNavigation;
import org.kuali.rice.kns.datadictionary.KNSDocumentEntry;
import org.kuali.rice.kns.question.ConfirmationQuestion;
import org.kuali.rice.kns.service.DataDictionaryService;
import org.kuali.rice.kns.util.KNSGlobalVariables;
import org.kuali.rice.kns.util.WebUtils;
import org.kuali.rice.kns.web.struts.form.KualiDocumentFormBase;
import org.kuali.rice.kns.web.struts.form.KualiForm;
import org.kuali.rice.krad.rules.rule.event.DocumentAuditEvent;
import org.kuali.rice.krad.service.DocumentService;
import org.kuali.rice.krad.service.KualiRuleService;
import org.kuali.rice.krad.service.PessimisticLockService;
import org.kuali.rice.krad.util.GlobalVariables;
import org.kuali.rice.krad.util.KRADConstants;
import org.kuali.rice.krad.util.ObjectUtils;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class BudgetAction extends BudgetActionBase {
private static final Log LOG = LogFactory.getLog(BudgetAction.class);
private static final String DOCUMENT_REJECT_QUESTION="DocReject";
protected static final String CONFIRM_SYNCH_BUDGET_RATE = "confirmSynchBudgetRate";
protected static final String NO_SYNCH_BUDGET_RATE = "noSynchBudgetRate";
protected static final String CONFIRM_SYNCH_AWARD_RATES = "confirmSynchAwardRates";
protected static final String NO_SYNCH_AWARD_RATES = "noSynchAwardRates";
private ProposalHierarcyActionHelper hierarchyHelper;
@Override
public ActionForward docHandler(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
ActionForward forward = super.docHandler(mapping, form, request, response);
BudgetForm budgetForm = (BudgetForm) form;
if (KewApiConstants.INITIATE_COMMAND.equals(budgetForm.getCommand())) {
budgetForm.getBudgetDocument().initialize();
}else{
budgetForm.initialize();
}
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
if (budgetDocument.isBudgetDeleted()) {
return mapping.findForward("deleted");
}
Budget budget = budgetDocument.getBudget();
copyLineItemToPersonnelDetails(budgetDocument);
if (budget.getActivityTypeCode().equals("x")) {
budget.setActivityTypeCode(KcServiceLocator.getService(BudgetService.class).getActivityTypeForBudget(budgetDocument));
}
if(budget.getOhRateClassCode()!=null && ((BudgetForm)KNSGlobalVariables.getKualiForm())!=null){
((BudgetForm)KNSGlobalVariables.getKualiForm()).setOhRateClassCodePrevValue(budget.getOhRateClassCode());
}
if(budget.getUrRateClassCode()!=null && ((BudgetForm)KNSGlobalVariables.getKualiForm())!=null){
((BudgetForm)KNSGlobalVariables.getKualiForm()).setUrRateClassCodePrevValue(budget.getUrRateClassCode());
}
if (isAwardBudget(budgetDocument) && StringUtils.isNotBlank(budgetForm.getSyncBudgetRate()) && budgetForm.getSyncBudgetRate().equals("Y")) {
getBudgetRatesService().syncParentDocumentRates(budget);
getBudgetCommonService(budget.getBudgetParent()).recalculateBudget(budget);
}
reconcileBudgetStatus(budgetForm);
if ("Personnel".equals(budgetForm.getActivePanelName())) {
forward = personnel(mapping, budgetForm, request, response);
}
return forward;
}
protected StrutsConfirmation syncAwardBudgetRateConfirmationQuestion(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response, String message) throws Exception {
return buildParameterizedConfirmationQuestion(mapping, form, request, response, CONFIRM_SYNCH_AWARD_RATES,
message, "");
}
public ActionForward confirmSynchAwardRates(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
return synchAwardBudgetRate(mapping, form, request, response, true);
}
public ActionForward noSynchAwardRates(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
return synchAwardBudgetRate(mapping, form, request, response, false);
}
private ActionForward synchAwardBudgetRate(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response, boolean confirm) throws Exception {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDoc = budgetForm.getBudgetDocument();
String routeHeaderId = budgetDoc.getDocumentHeader().getWorkflowDocument().getDocumentId();
String forward = buildForwardUrl(routeHeaderId);
if (confirm) {
forward = forward.replace("awardBudgetParameters.do?", "awardBudgetParameters.do?syncBudgetRate=Y&");
}
return new ActionForward(forward, true);
}
/**
* This method returns true if the BudgetDocument is an AwardBudgetDocument instance
* @param budgetDocument
* @return
*/
protected boolean isAwardBudget(BudgetDocument budgetDocument) {
return !Boolean.parseBoolean(budgetDocument.getBudget().getBudgetParent().getDocument().getProposalBudgetFlag());
}
private BudgetRatesService<BudgetParent> getBudgetRatesService() {
return KcServiceLocator.getService(BudgetRatesService.class);
}
public List<HeaderNavigation> getBudgetHeaderNavigatorList(){
DataDictionaryService dataDictionaryService = (DataDictionaryService) KcServiceLocator.getService(Constants.DATA_DICTIONARY_SERVICE_NAME);
KNSDocumentEntry docEntry = (KNSDocumentEntry) dataDictionaryService.getDataDictionary().getDocumentEntry(BudgetDocument.class.getName());
return docEntry.getHeaderNavigationList();
}
/**
* Need to suppress buttons here when 'Totals' tab is clicked.
*/
@Override
public ActionForward execute(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
final BudgetForm budgetForm = (BudgetForm) form;
if(budgetForm.getMethodToCall().equals("close")){
setupDocumentExit();
}
ActionForward actionForward = null;
actionForward = super.execute(mapping, budgetForm, request, response);
if (actionForward != null) {
if ("summaryTotals".equals(actionForward.getName())) {
budgetForm.suppressButtonsForTotalPage();
}
}
// check if audit rule check is done from PD
if (budgetForm.isAuditActivated() && !"route".equals(((KualiForm)form).getMethodToCall())) {
KcServiceLocator.getService(KualiRuleService.class).applyRules(new DocumentAuditEvent(budgetForm.getBudgetDocument()));
}
return actionForward;
}
@Override
public ActionForward save(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
BudgetForm budgetForm = (BudgetForm) form;
final BudgetDocument budgetDoc = budgetForm.getBudgetDocument();
Budget budget = budgetDoc.getBudget();
getBudgetCommonService(budget.getBudgetParent()).calculateBudgetOnSave(budget);
ActionForward forward = super.save(mapping, form, request, response);
BudgetForm savedBudgetForm = (BudgetForm) form;
BudgetDocument savedBudgetDoc = savedBudgetForm.getBudgetDocument();
final BudgetTDCValidator tdcValidator = new BudgetTDCValidator(request);
if (budgetForm.toBudgetVersionsPage()
|| "BudgetVersionsAction".equals(budgetForm.getActionName())) {
GlobalVariables.getMessageMap().addToErrorPath(KRADConstants.DOCUMENT_PROPERTY_NAME + ".proposal");
tdcValidator.validateGeneratingErrorsAndWarnings(budgetDoc.getBudget().getBudgetParent().getDocument());
} else {
tdcValidator.validateGeneratingWarnings(budgetDoc.getBudget().getBudgetParent().getDocument());
}
if (budgetForm.getMethodToCall().equals("save") && budgetForm.isAuditActivated()) {
forward = mapping.findForward("budgetActions");
}
return forward;
}
protected BudgetSummaryService getBudgetSummaryService() {
return KcServiceLocator.getService(BudgetSummaryService.class);
}
@Override
public ActionForward reload(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
final ActionForward forward = super.reload(mapping, form, request, response);
updateBudgetAttributes(form, request);
return forward;
}
@Override
public ActionForward reloadWithoutWarning(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
final ActionForward forward = super.reloadWithoutWarning(mapping, form, request, response);
updateBudgetAttributes(form, request);
return forward;
}
@SuppressWarnings("rawtypes")
protected void updateBudgetAttributes(ActionForm form, HttpServletRequest request) {
final BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
BudgetParentDocument parentDocument = budgetDocument.getBudget().getBudgetParent().getDocument();
budgetForm.setFinalBudgetVersion(getFinalBudgetVersion(parentDocument.getBudgetDocumentVersions()));
setBudgetStatuses(budgetDocument.getBudget().getBudgetParent());
final BudgetTDCValidator tdcValidator = new BudgetTDCValidator(request);
tdcValidator.validateGeneratingWarnings(budgetDocument.getBudget().getBudgetParent().getDocument());
populateBudgetPrintForms(budgetDocument.getBudget());
}
public ActionForward versions(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
BudgetParentDocument parentDocument = budgetDocument.getBudget().getBudgetParent().getDocument();
budgetForm.setFinalBudgetVersion(getFinalBudgetVersion(parentDocument.getBudgetDocumentVersions()));
setBudgetStatuses(budgetDocument.getBudget().getBudgetParent());
return mapping.findForward(Constants.BUDGET_VERSIONS_PAGE);
}
public ActionForward parameters(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
reconcileBudgetStatus((BudgetForm) form);
BudgetDocument budgetDocument = ((BudgetForm)form).getBudgetDocument();
getBudgetSummaryService().setupOldStartEndDate(budgetDocument.getBudget(),false);
return mapping.findForward(Constants.BUDGET_PERIOD_PAGE);
}
public ActionForward personnel(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
populatePersonnelHierarchySummary(budgetForm);
populatePersonnelCategoryTypeCodes(budgetForm);
if (budgetForm.getBudgetDocument().getBudget().getBudgetPersons().isEmpty()) {
KcServiceLocator.getService(BudgetPersonService.class).synchBudgetPersonsToProposal(budgetForm.getBudgetDocument().getBudget());
}
reconcilePersonnelRoles(budgetForm.getBudgetDocument());
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
for(BudgetPeriod period : budget.getBudgetPeriods()) {
for(BudgetLineItem lineItem : period.getBudgetLineItems()) {
for(BudgetPersonnelDetails budgetPersonnelDetails : lineItem.getBudgetPersonnelDetailsList()) {
budgetPersonnelDetails.refreshReferenceObject("budgetPerson");
ObjectUtils.materializeObjects(budgetPersonnelDetails.getBudgetPersonnelCalculatedAmounts());
for(BudgetPersonnelCalculatedAmount budgetPersonnelCalculatedAmount:budgetPersonnelDetails.getBudgetPersonnelCalculatedAmounts()){
if(budgetPersonnelCalculatedAmount.getRateClass() == null) {
budgetPersonnelCalculatedAmount.refreshReferenceObject("rateClass");
}
}
}
for(BudgetLineItemCalculatedAmount lineItemCalculatedAmount:lineItem.getBudgetLineItemCalculatedAmounts()){
if(lineItemCalculatedAmount.getRateClass() == null) {
lineItemCalculatedAmount.refreshReferenceObject("rateClass");
}
}
}
}
ParameterService parameterService = KcServiceLocator.getService(ParameterService.class);
String enableBudgetSalaryByPeriod = parameterService.getParameterValueAsString(ProposalDevelopmentDocument.class, Constants.ENABLE_BUDGET_CALCULATED_SALARY);
budgetForm.setEnableBudgetSalaryByPeriod(enableBudgetSalaryByPeriod);
return mapping.findForward(Constants.BUDGET_PERSONNEL_PAGE);
}
protected void populatePersonnelHierarchySummary(BudgetForm budgetForm) {
if (budgetForm.getBudgetDocument().getBudget().isProposalBudget()) {
DevelopmentProposal parent = (DevelopmentProposal) budgetForm.getBudgetDocument().getBudget().getBudgetParent();
String proposalNumber = parent.getProposalNumber();
budgetForm.setHierarchyPersonnelSummaries(getHierarchyHelper().getHierarchyPersonnelSummaries(proposalNumber));
for (HierarchyPersonnelSummary hierarchyPersonnelSummary : budgetForm.getHierarchyPersonnelSummaries()) {
for (Budget budget : hierarchyPersonnelSummary.getHierarchyBudgets()) {
reconcilePersonnelRoles(budgetForm.getBudgetDocument());
}
}
}
}
private String getPersonnelBudgetCategoryTypeCode() {
return this.getParameterService().getParameterValueAsString(BudgetDocument.class, Constants.BUDGET_CATEGORY_TYPE_PERSONNEL);
}
protected void populatePersonnelCategoryTypeCodes(BudgetForm budgetForm) {
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
BudgetCategoryTypeValuesFinder budgetCategoryTypeValuesFinder = new BudgetCategoryTypeValuesFinder();
List<KeyValue> budgetCategoryTypes = new ArrayList<KeyValue>();
String personnelBudgetCategoryTypeCode = getPersonnelBudgetCategoryTypeCode();
for(KeyValue budgetCategoryType: budgetCategoryTypeValuesFinder.getKeyValues()){
String budgetCategoryTypeCode = (String) budgetCategoryType.getKey();
if(StringUtils.isNotBlank(budgetCategoryTypeCode) && StringUtils.equalsIgnoreCase(budgetCategoryTypeCode, personnelBudgetCategoryTypeCode)) {
budgetCategoryTypes.add(budgetCategoryType);
BudgetLineItem newBudgetLineItem = budget.getNewBudgetLineItem();
if (budgetForm.getNewBudgetLineItems() == null) {
budgetForm.setNewBudgetLineItems(new ArrayList<BudgetLineItem>());
}
budgetForm.getNewBudgetLineItems().add(newBudgetLineItem);
}
}
budget.setBudgetCategoryTypeCodes(budgetCategoryTypes);
}
protected void populateNonPersonnelCategoryTypeCodes(BudgetForm budgetForm) {
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
BudgetCategoryTypeValuesFinder budgetCategoryTypeValuesFinder = new BudgetCategoryTypeValuesFinder();
List<KeyValue> budgetCategoryTypes = new ArrayList<KeyValue>();
String personnelBudgetCategoryTypeCode = getPersonnelBudgetCategoryTypeCode();
for(KeyValue budgetCategoryType: budgetCategoryTypeValuesFinder.getKeyValues()){
String budgetCategoryTypeCode = (String) budgetCategoryType.getKey();
if(StringUtils.isNotBlank(budgetCategoryTypeCode) && !StringUtils.equalsIgnoreCase(budgetCategoryTypeCode, personnelBudgetCategoryTypeCode)) {
budgetCategoryTypes.add(budgetCategoryType);
BudgetLineItem newBudgetLineItem = budget.getNewBudgetLineItem();
budgetForm.getNewBudgetLineItems().add(newBudgetLineItem);
}
}
budget.setBudgetCategoryTypeCodes(budgetCategoryTypes);
}
public ActionForward expenses(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
populateNonPersonnelCategoryTypeCodes(budgetForm);
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
budget.refreshReferenceObject("budgetPeriods");
return mapping.findForward(Constants.BUDGET_EXPENSES_PAGE);
}
public ActionForward rates(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
return mapping.findForward(Constants.BUDGET_RATES_PAGE);
}
public ActionForward distributionAndIncome(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetDistributionService budgetDistributionService = KcServiceLocator.getService(BudgetDistributionService.class);
budgetDistributionService.initializeCollectionDefaults(((BudgetForm) form).getBudgetDocument().getBudget());
return mapping.findForward(Constants.BUDGET_DIST_AND_INCOME_PAGE);
}
public ActionForward modularBudget(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetModularService budgetModularService = KcServiceLocator.getService(BudgetModularService.class);
budgetForm.setBudgetModularSummary(budgetModularService.generateModularSummary(budgetForm.getBudgetDocument().getBudget()));
return mapping.findForward(Constants.BUDGET_MODULAR_PAGE);
}
protected void populatePersonnelRoles(BudgetDocument budgetDocument) {
BudgetParent budgetParent = budgetDocument.getBudget().getBudgetParent().getDocument().getBudgetParent();
List<BudgetPerson> budgetPersons = budgetDocument.getBudget().getBudgetPersons();
for (BudgetPerson budgetPerson: budgetPersons) {
String roleDesc = "";
if (budgetPerson.getRolodexId() != null) {
PersonRolodex person = budgetParent.getProposalNonEmployee(budgetPerson.getRolodexId());
ContactRole role = budgetParent.getProposalNonEmployeeRole(budgetPerson.getRolodexId());
if (role != null) {
roleDesc = person.getInvestigatorRoleDescription();
if(person != null && StringUtils.equals(Constants.KEY_PERSON_ROLE, role.getRoleCode()) && StringUtils.isNotEmpty(person.getProjectRole())) {
roleDesc = person.getProjectRole();
}
}
} else if (budgetPerson.getPersonId() != null) {
PersonRolodex person = budgetParent.getProposalEmployee(budgetPerson.getPersonId());
ContactRole role = budgetParent.getProposalEmployeeRole(budgetPerson.getPersonId());
if (role != null) {
roleDesc = person.getInvestigatorRoleDescription();
if(person != null && StringUtils.equals(Constants.KEY_PERSON_ROLE, role.getRoleCode()) && StringUtils.isNotEmpty(person.getProjectRole())) {
roleDesc = person.getProjectRole();
}
}
}
budgetPerson.setRole(roleDesc);
}
}
public ActionForward summaryTotals(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
populatePersonnelRoles(budgetDocument);
Budget budget = budgetDocument.getBudget();
for(BudgetPeriod period : budget.getBudgetPeriods()) {
for(BudgetLineItem lineItem : period.getBudgetLineItems()) {
for(BudgetPersonnelDetails budgetPersonnelDetails : lineItem.getBudgetPersonnelDetailsList()) {
budgetPersonnelDetails.refreshReferenceObject("budgetPerson");
}
}
}
budget.getBudgetTotals();
budgetForm.setProposalHierarchyIndirectObjectCode(getParameterService().getParameterValueAsString(BudgetDocument.class, "proposalHierarchySubProjectIndirectCostElement"));
return mapping.findForward(Constants.BUDGET_SUMMARY_TOTALS_PAGE);
}
public ActionForward proposalHierarchy(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
return mapping.findForward(Constants.PROPOSAL_HIERARCHY_PAGE);
}
public ActionForward hierarchy(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm)form;
DevelopmentProposal pd = (DevelopmentProposal) budgetForm.getBudgetDocument().getBudget().getBudgetParent();
budgetForm.setHierarchyProposalSummaries(getHierarchyHelper().getHierarchyProposalSummaries(pd.getProposalNumber()));
return mapping.findForward(Constants.HIERARCHY_PAGE);
}
public ActionForward budgetActions(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
populateBudgetPrintForms(budget);
KcServiceLocator.getService(PropDevBudgetSubAwardService.class).prepareBudgetSubAwards(budget);
return mapping.findForward(Constants.BUDGET_ACTIONS_PAGE);
}
protected ProposalHierarcyActionHelper getHierarchyHelper() {
if (hierarchyHelper == null) {
hierarchyHelper = new ProposalHierarcyActionHelper();
}
return hierarchyHelper;
}
private void populateBudgetPrintForms(Budget budget) {
if(budget.getBudgetPrintForms().isEmpty()){
BudgetPrintService budgetPrintService = KcServiceLocator.getService(BudgetPrintService.class);
budgetPrintService.populateBudgetPrintForms(budget);
}
}
public ActionForward returnToProposal(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
final BudgetForm budgetForm = (BudgetForm) form;
ActionForward forward = null;
if (!StringUtils.equalsIgnoreCase((String)budgetForm.getEditingMode().get(AuthorizationConstants.EditMode.VIEW_ONLY), "TRUE")) {
forward = this.save(mapping, form, request, response);
}
setupDocumentExit();
if (forward == null || !forward.getPath().contains(KRADConstants.QUESTION_ACTION)) {
return this.getReturnToProposalForward(budgetForm);
}
return forward;
}
public ActionForward returnToAward(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
final BudgetForm budgetForm = (BudgetForm) form;
ActionForward forward = null;
if (!"true".equals(budgetForm.getEditingMode().get(AuthorizationConstants.EditMode.VIEW_ONLY))) {
forward = this.save(mapping, form, request, response);
}
setupDocumentExit();
if (forward == null || !forward.getPath().contains(KRADConstants.QUESTION_ACTION)) {
return this.getReturnToAwardForward(budgetForm);
}
return forward;
}
private ActionForward getReturnToAwardForward(BudgetForm budgetForm) throws Exception{
assert budgetForm != null : "the form is null";
final DocumentService docService = KcServiceLocator.getService(DocumentService.class);
Award award = (Award) budgetForm.getBudgetDocument().getBudget().getBudgetParent();
//find the newest, uncanceled award document to return to
String docNumber = award.getAwardDocument().getDocumentNumber();
List<VersionHistory> versions = KcServiceLocator.getService(VersionHistoryService.class).loadVersionHistory(Award.class, award.getAwardNumber());
for (VersionHistory version : versions) {
if (version.getSequenceOwnerSequenceNumber() > award.getSequenceNumber() &&
version.getStatus() != VersionStatus.CANCELED) {
docNumber = ((Award) version.getSequenceOwner()).getAwardDocument().getDocumentNumber();
}
}
final AwardDocument awardDocument = (AwardDocument) docService.getByDocumentHeaderId(docNumber);
String forwardUrl = buildForwardUrl(awardDocument.getDocumentHeader().getWorkflowDocument().getDocumentId());
if(budgetForm.isAuditActivated()) {
forwardUrl = StringUtils.replace(forwardUrl, "Award.do?", "Actions.do?");
}
//add showAllBudgetVersion to the url to persist that flag until they leave the document
forwardUrl = StringUtils.replace(forwardUrl, ".do?", ".do?showAllBudgetVersions=" + budgetForm.isShowAllBudgetVersions() + "&");
return new ActionForward(forwardUrl, true);
}
/**
* Gets the correct return-to-proposal action forward.
*
* @param form the budget form
* @return the action forward
* @throws WorkflowException if there is a problem interacting with workflow
*/
private ActionForward getReturnToProposalForward(final BudgetForm form) throws WorkflowException {
assert form != null : "the form is null";
final DocumentService docService = KcServiceLocator.getService(DocumentService.class);
final String docNumber = form.getBudgetDocument().getBudget().getBudgetParent().getDocument().getDocumentNumber();
final ProposalDevelopmentDocument pdDoc = (ProposalDevelopmentDocument) docService.getByDocumentHeaderId(docNumber);
String forwardUrl = buildForwardUrl(pdDoc.getDocumentHeader().getWorkflowDocument().getDocumentId());
if(form.isAuditActivated()) {
forwardUrl = StringUtils.replace(forwardUrl, "Proposal.do?", "Actions.do?auditActivated=true&");
}
forwardUrl += "&methodToCallAttribute=methodToCall.reload";
return new ActionForward(forwardUrl, true);
}
public void reconcilePersonnelRoles(BudgetDocument budgetDocument) {
// Populate the person's proposal roles, if they exist
Budget budget = budgetDocument.getBudget();
BudgetParent budgetParent = budget.getBudgetParent();
List<BudgetPerson> budgetPersons = budget.getBudgetPersons();
for (BudgetPerson budgetPerson: budgetPersons) {
if (budgetPerson.getRolodexId() != null) {
PersonRolodex person = budgetParent.getProposalNonEmployee(budgetPerson.getRolodexId());
if (person != null) { budgetPerson.setRole(person.getInvestigatorRoleDescription()); }
} else if (budgetPerson.getPersonId() != null) {
PersonRolodex person = budgetParent.getProposalEmployee(budgetPerson.getPersonId());
if (person != null) { budgetPerson.setRole(person.getInvestigatorRoleDescription()); }
}
}
}
protected void reconcileBudgetStatus(BudgetForm budgetForm) {
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
BudgetParent budgetParent = budgetDocument.getBudget().getBudgetParent().getDocument().getBudgetParent();
if (budgetParent instanceof DevelopmentProposal) {
DevelopmentProposal proposal = (DevelopmentProposal)budgetParent;
KcServiceLocator.getService(ProposalBudgetStatusService.class).loadBudgetStatus(proposal);
}
if (budget.getFinalVersionFlag() != null && Boolean.TRUE.equals(budget.getFinalVersionFlag())) {
budget.setBudgetStatus(budgetParent.getBudgetStatus());
} else {
String budgetStatusIncompleteCode = this.getParameterService().getParameterValueAsString(
BudgetDocument.class, Constants.BUDGET_STATUS_INCOMPLETE_CODE);
budget.setBudgetStatus(budgetStatusIncompleteCode);
}
}
/**
*
* Handy method to stream the byte array to response object
* @param attachmentDataSource
* @param response
* @throws Exception
*/
public void streamToResponse(AttachmentDataSource attachmentDataSource,HttpServletResponse response) throws Exception{
byte[] xbts = attachmentDataSource.getData();
ByteArrayOutputStream baos = null;
if(xbts!=null)
try{
baos = new ByteArrayOutputStream(xbts.length);
baos.write(xbts);
WebUtils.saveMimeOutputStreamAsFile(response, attachmentDataSource.getType(), baos, attachmentDataSource.getName());
}finally{
try{
if(baos!=null){
baos.close();
baos = null;
}
}catch(IOException ioEx){
LOG.warn(ioEx.getMessage(), ioEx);
}
}
}
private void copyLineItemToPersonnelDetails(BudgetDocument budgetDocument) {
for (BudgetPeriod budgetPeriod : budgetDocument.getBudget().getBudgetPeriods()) {
if (budgetPeriod.getBudgetLineItems() != null && !budgetPeriod.getBudgetLineItems().isEmpty()) {
for (BudgetLineItem budgetLineItem : budgetPeriod.getBudgetLineItems()) {
if (budgetLineItem.getBudgetPersonnelDetailsList() != null && !budgetLineItem.getBudgetPersonnelDetailsList().isEmpty()) {
for (BudgetPersonnelDetails budgetPersonnelDetails : budgetLineItem.getBudgetPersonnelDetailsList()) {
budgetPersonnelDetails.setBudgetId(budgetLineItem.getBudgetId());
budgetPersonnelDetails.setBudgetPeriod(budgetLineItem.getBudgetPeriod());
budgetPersonnelDetails.setLineItemNumber(budgetLineItem.getLineItemNumber());
budgetPersonnelDetails.setCostElement(budgetLineItem.getCostElement());
budgetPersonnelDetails.setCostElementBO(budgetLineItem.getCostElementBO());
}
}
}
}
}
}
@Override
protected PessimisticLockService getPessimisticLockService() {
return KcServiceLocator.getService(BudgetLockService.class);
}
public ActionForward reject(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
KualiDocumentFormBase kualiDocumentFormBase = (KualiDocumentFormBase) form;
Object question = request.getParameter(KRADConstants.QUESTION_INST_ATTRIBUTE_NAME);
Object buttonClicked = request.getParameter(KRADConstants.QUESTION_CLICKED_BUTTON);
String reason = request.getParameter(KRADConstants.QUESTION_REASON_ATTRIBUTE_NAME);
String methodToCall = ((KualiForm) form).getMethodToCall();
final String questionText = "Are you sure you want to reject this document?";
ActionForward forward;
if (question == null) {
forward = this.performQuestionWithInput(mapping, form, request, response, DOCUMENT_REJECT_QUESTION,
questionText , KRADConstants.CONFIRMATION_QUESTION, methodToCall, "");
} else if ((DOCUMENT_REJECT_QUESTION.equals(question)) && ConfirmationQuestion.NO.equals(buttonClicked)) {
forward = mapping.findForward(Constants.MAPPING_BASIC);
} else {
if (StringUtils.isEmpty(reason)) {
String context = "";
String errorKey = KeyConstants.ERROR_BUDGET_REJECT_NO_REASON;
String errorPropertyName = DOCUMENT_REJECT_QUESTION;
String errorParameter = "";
reason = reason == null ? "" : reason;
forward = this.performQuestionWithInputAgainBecauseOfErrors(mapping, form, request, response, DOCUMENT_REJECT_QUESTION,
questionText, KRADConstants.CONFIRMATION_QUESTION, methodToCall, context, reason, errorKey, errorPropertyName,
errorParameter);
} else {
//reject the document using the service.
BudgetDocument document = ((BudgetForm)form).getBudgetDocument();
document.documentHasBeenRejected(reason);
KcServiceLocator.getService(KcDocumentRejectionService.class).reject(document.getDocumentNumber(), reason,
GlobalVariables.getUserSession().getPrincipalId());
//tell the document it is being rejected and returned to the initial node.
forward = super.returnToSender(request, mapping, kualiDocumentFormBase);
}
}
return forward;
}
protected BudgetCommonService<BudgetParent> getBudgetCommonService(BudgetParent budgetParent) {
return BudgetCommonServiceFactory.createInstance(budgetParent);
}
/**
* This method is to recalculate the budget period
* @param budgetForm
* @param budget
* @param budgetPeriod
*/
protected void recalculateBudgetPeriod(BudgetForm budgetForm, Budget budget, BudgetPeriod budgetPeriod) {
getBudgetCommonService(budget.getBudgetParent()).recalculateBudgetPeriod(budget, budgetPeriod);
}
protected void calculateBudgetPeriod(Budget budget, BudgetPeriod budgetPeriod) {
getCalculationService().calculateBudgetPeriod(budget, budgetPeriod);
}
/**
* Locates the {@link BudgetCalculationService]
*
* @return {@link BudgetCalculationService} singleton instance
*/
protected BudgetCalculationService getCalculationService() {
return KcServiceLocator.getService(BudgetCalculationService.class);
}
}
|
apache-2.0
|
McLeodMoores/starling
|
projects/financial/src/main/java/com/opengamma/financial/analytics/model/pnl/FXOptionBlackPnLDefaults.java
|
3692
|
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.pnl;
import java.util.Collections;
import java.util.Set;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.financial.analytics.OpenGammaFunctionExclusions;
import com.opengamma.financial.property.DefaultPropertyFunction;
import com.opengamma.financial.security.FinancialSecurity;
import com.opengamma.financial.security.option.FXBarrierOptionSecurity;
import com.opengamma.financial.security.option.FXDigitalOptionSecurity;
import com.opengamma.financial.security.option.FXOptionSecurity;
import com.opengamma.financial.security.option.NonDeliverableFXDigitalOptionSecurity;
import com.opengamma.financial.security.option.NonDeliverableFXOptionSecurity;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class FXOptionBlackPnLDefaults extends DefaultPropertyFunction {
private final String _samplingPeriod;
private final String _scheduleCalculator;
private final String _samplingFunction;
public FXOptionBlackPnLDefaults(final String samplingPeriod, final String scheduleCalculator, final String samplingFunction) {
super(ComputationTargetType.POSITION, true);
ArgumentChecker.notNull(samplingPeriod, "sampling period");
ArgumentChecker.notNull(scheduleCalculator, "schedule calculator");
ArgumentChecker.notNull(samplingFunction, "sampling function");
_samplingPeriod = samplingPeriod;
_scheduleCalculator = scheduleCalculator;
_samplingFunction = samplingFunction;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof FinancialSecurity)) {
return false;
}
final FinancialSecurity security = (FinancialSecurity) target.getPosition().getSecurity();
final boolean isFXOption = security instanceof FXOptionSecurity
|| security instanceof FXBarrierOptionSecurity
|| security instanceof FXDigitalOptionSecurity
|| security instanceof NonDeliverableFXOptionSecurity
|| security instanceof NonDeliverableFXDigitalOptionSecurity;
return isFXOption;
}
@Override
protected void getDefaults(final PropertyDefaults defaults) {
defaults.addValuePropertyName(ValueRequirementNames.PNL_SERIES, ValuePropertyNames.SAMPLING_PERIOD);
defaults.addValuePropertyName(ValueRequirementNames.PNL_SERIES, ValuePropertyNames.SCHEDULE_CALCULATOR);
defaults.addValuePropertyName(ValueRequirementNames.PNL_SERIES, ValuePropertyNames.SAMPLING_FUNCTION);
}
@Override
protected Set<String> getDefaultValue(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue,
final String propertyName) {
if (ValuePropertyNames.SAMPLING_PERIOD.equals(propertyName)) {
return Collections.singleton(_samplingPeriod);
}
if (ValuePropertyNames.SCHEDULE_CALCULATOR.equals(propertyName)) {
return Collections.singleton(_scheduleCalculator);
}
if (ValuePropertyNames.SAMPLING_FUNCTION.equals(propertyName)) {
return Collections.singleton(_samplingFunction);
}
return null;
}
@Override
public String getMutualExclusionGroup() {
return OpenGammaFunctionExclusions.PNL_SERIES;
}
}
|
apache-2.0
|
madvirus/kotlin-web-site
|
pages/docs/reference_ko/visibility-modifiers.md
|
4004
|
---
type: doc
layout: reference_ko
category: "Classes and Objects"
title: "가시성 수식어"
---
# 가시성 수식어
클래스, 오브젝트, 인터페이스, 생성자, 함수, 프로퍼티 및 프로퍼티의 setter는 _가시성 수식어_를 가질 수 있다.
(getter는 항상 프로퍼티와 동일한 가시성을 갖는다.)
코틀린에는 `private`, `protected`, `internal`, `public`의 가시성 수식어가 있다.
수식어를 지정하지 않을 경우 기본 가시성은 `public`이다.
타입과 선언 범위에 따른 가시성에 대해서는 뒤에서 설명한다.
## 패키지
함수, 프로퍼티와 클래스, 오브젝트와 인터페이스는 "최상위(top-level)"에 선언할 수 있다.
예를 들어 패키지에 직접 선언할 수 있다.
``` kotlin
// file name: example.kt
package foo
fun baz() {}
class Bar {}
```
* 가시성 수식어를 명시하지 않으면 기본으로 `public`을 사용한다. 이는 모든 곳에서 접근 가능함을 뜻한다.
* `private`으로 선언하면, 그 선언을 포함한 파일 안에서만 접근할 수 있다.
* `internal`로 선언하면, 같은 [모듈](#modules)에서 접근 가능하다.
* `protected`는 최상위 선언에 사용할 수 없다.
주의: 다른 패키지에서 최상위 선언에 접근하려면 그것을 [임포트](packages.html#imports)해야 한다.
예제:
``` kotlin
// 파일 이름: example.kt
package foo
private fun foo() {} // example.kt 안에서 접근 가능
public var bar: Int = 5 // 모든 곳에서 접근 가능
private set // setter는 example.kt에서만 접근 가능
internal val baz = 6 // 같은 모듈에서 접근 가능
```
## 클래스와 인터페이스
클래스에 선언한 멤버에 대해서는 다음과 같다:
* `private`은 오직 클래스 안에서만(그리고 클래스의 모든 멤버에서) 접근 가능함을 의미한다.
* `protected` --- `private` + 하위클래스에서 접근 가능함과 동일하다.
* `internal` --- 선언한 클래스를 볼 수 있는 *모듈 안의* 모든 클라이언트가 `internal` 멤버를 볼 수 있다.
* `public` --- 선언한 클래스를 볼 수 있는 클라이언트가 `public` 멤버를 볼 수 있다.
*주의* 자바와 달리 코틀린에서 외부 클래스는 내부 클래스의 private 멤버를 볼 수 없다.
`protected` 멤버를 오버라이딩할 때 가시성을 명시적으로 지정하지 않으면,
오버라이딩한 멤버 또한 `protected` 가시성을 갖는다.
예제:
``` kotlin
open class Outer {
private val a = 1
protected open val b = 2
internal val c = 3
val d = 4 // 기본으로 public
protected class Nested {
public val e: Int = 5
}
}
class Subclass : Outer() {
// a는 접근 불가
// b, c, d는 접근 가능
// Nested와 e는 접근 가능
override val b = 5 // 'b'는 protected
}
class Unrelated(o: Outer) {
// o.a, o.b는 접근 불가
// o.c 와 o.d는 접근 가능(같은 모듈)
// Outer.Nested는 접근 불가며, Nested::e 역시 접근 불가
}
```
{:#constructors}
### 생성자
``` kotlin
class C private constructor(a: Int) { ... }
```
위 코드의 생성자는 private이다. 기본적으로 모든 생성자는 `public`이며
실질적으로 클래스를 접근할 수 있는 모든 곳에서 생성자에 접근할 수 있다.
(예를 들어 `internal` 클래스의 생성자는 오직 같은 모듈에서만 보인다.)
### 로컬 선언
로컬 변수, 로컬 함수, 로컬 클래스에는 가시성 수식어를 지정할 수 없다.
## 모듈
{:#modules}
`internal` 가시성 수식어는 같은 모듈에서 멤버에 접근할 수 있음을 의미한다.
더 구체적으로 모듈은 함께 컴파일되는 코틀린 파일 집합이다.
* IntelliJ IDEA 모듈
* 메이븐 프로젝트
* 그레이들 소스 집합
* kotlinc 앤트 태스크를 한 번 호출할 때 컴파일되는 파일 집합
|
apache-2.0
|
StratusLab/storage
|
pdisk-server/war/src/main/webapp/media/js/stratuslab.js
|
387
|
$.STRATUS = function (){
var _init = function () {
$('#logout').click(_logout)
};
var _logout = function (event) {
event.preventDefault();
// Workaround to logout user
// As HTTP is state-less there is no cross-browser clean way to do
$.get(location.href.replace('://', '://x-pdisk-logout@'));
};
return {
init: _init,
};
}();
$(document).ready($.STRATUS.init);
|
apache-2.0
|
mkhuda/AndOfflineMechanism
|
src/com/mkhuda/offlinecache/MainActivity.java
|
4007
|
package com.mkhuda.offlinecache;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.android.volley.Request.Method;
import com.android.volley.toolbox.NetworkImageView;
import com.android.volley.toolbox.StringRequest;
import com.mkhuda.offlinecache.models.CacheModels;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.Button;
import android.widget.TextView;
import android.widget.Toast;
import java.util.UUID;
public class MainActivity extends Activity {
private static final String TAG = MainActivity.class.getSimpleName();
TextView t1, t2;
Button b0, b;
CacheModels cm;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
cm = new CacheModels(getApplicationContext());
b0 = (Button) findViewById(R.id.button0);
b0.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO Auto-generated method stub
loadfromserver();
}
});
b = (Button) findViewById(R.id.button);
b.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO Auto-generated method stub
t2 = (TextView) findViewById(R.id.text2);
try {
String a = (String) InternalStorage.readObject(getApplicationContext(), "John1");
JSONObject jObj = new JSONObject(a);
t2.setText(jObj.getString("movie_name"));
// for()
} catch (ClassNotFoundException | IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
try {
Toast.makeText(getApplicationContext(), cm.getCache1(), Toast.LENGTH_LONG).show();
} catch (ClassNotFoundException | IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
});
}
private void loadfromserver() {
t1 = (TextView) findViewById(R.id.text1);
StringRequest movieReq = new StringRequest(Method.POST,
"http://labs.mkhuda.com/bisanonton/movie-random.php", new Response.Listener<String>() {
@Override
public void onResponse(String response) {
Log.d(TAG, response.toString());
cm.setCache1(response.toString());
t1.setText("Data Loaded");
try {
JSONObject jObj = new JSONObject(response);
Toast.makeText(getApplicationContext(), "String saved is: "+jObj.getString("movie_name"), Toast.LENGTH_LONG).show();
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}, new Response.ErrorListener() {
@Override
public void onErrorResponse(VolleyError error) {
// VolleyLog.d(TAG, "Error: " + error.getMessage());
Toast.makeText(getApplicationContext(),
"Please Connect To Internet!", Toast.LENGTH_SHORT).show();
}
});
AppController.getInstance().addToRequestQueue(movieReq);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
if (id == R.id.action_settings) {
return true;
}
return super.onOptionsItemSelected(item);
}
}
|
apache-2.0
|
googleads/google-ads-java
|
google-ads-stubs-v9/src/main/java/com/google/ads/googleads/v9/services/MutateConversionValueRuleSetResult.java
|
32654
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/ads/googleads/v9/services/conversion_value_rule_set_service.proto
package com.google.ads.googleads.v9.services;
/**
* <pre>
* The result for the conversion value rule set mutate.
* </pre>
*
* Protobuf type {@code google.ads.googleads.v9.services.MutateConversionValueRuleSetResult}
*/
public final class MutateConversionValueRuleSetResult extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
MutateConversionValueRuleSetResultOrBuilder {
private static final long serialVersionUID = 0L;
// Use MutateConversionValueRuleSetResult.newBuilder() to construct.
private MutateConversionValueRuleSetResult(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private MutateConversionValueRuleSetResult() {
resourceName_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new MutateConversionValueRuleSetResult();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MutateConversionValueRuleSetResult(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
java.lang.String s = input.readStringRequireUtf8();
resourceName_ = s;
break;
}
case 18: {
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder subBuilder = null;
if (conversionValueRuleSet_ != null) {
subBuilder = conversionValueRuleSet_.toBuilder();
}
conversionValueRuleSet_ = input.readMessage(com.google.ads.googleads.v9.resources.ConversionValueRuleSet.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(conversionValueRuleSet_);
conversionValueRuleSet_ = subBuilder.buildPartial();
}
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.class, com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.Builder.class);
}
public static final int RESOURCE_NAME_FIELD_NUMBER = 1;
private volatile java.lang.Object resourceName_;
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The resourceName.
*/
@java.lang.Override
public java.lang.String getResourceName() {
java.lang.Object ref = resourceName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
resourceName_ = s;
return s;
}
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The bytes for resourceName.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getResourceNameBytes() {
java.lang.Object ref = resourceName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
resourceName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int CONVERSION_VALUE_RULE_SET_FIELD_NUMBER = 2;
private com.google.ads.googleads.v9.resources.ConversionValueRuleSet conversionValueRuleSet_;
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return Whether the conversionValueRuleSet field is set.
*/
@java.lang.Override
public boolean hasConversionValueRuleSet() {
return conversionValueRuleSet_ != null;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return The conversionValueRuleSet.
*/
@java.lang.Override
public com.google.ads.googleads.v9.resources.ConversionValueRuleSet getConversionValueRuleSet() {
return conversionValueRuleSet_ == null ? com.google.ads.googleads.v9.resources.ConversionValueRuleSet.getDefaultInstance() : conversionValueRuleSet_;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
@java.lang.Override
public com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder getConversionValueRuleSetOrBuilder() {
return getConversionValueRuleSet();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(resourceName_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, resourceName_);
}
if (conversionValueRuleSet_ != null) {
output.writeMessage(2, getConversionValueRuleSet());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(resourceName_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, resourceName_);
}
if (conversionValueRuleSet_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getConversionValueRuleSet());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)) {
return super.equals(obj);
}
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult other = (com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult) obj;
if (!getResourceName()
.equals(other.getResourceName())) return false;
if (hasConversionValueRuleSet() != other.hasConversionValueRuleSet()) return false;
if (hasConversionValueRuleSet()) {
if (!getConversionValueRuleSet()
.equals(other.getConversionValueRuleSet())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + RESOURCE_NAME_FIELD_NUMBER;
hash = (53 * hash) + getResourceName().hashCode();
if (hasConversionValueRuleSet()) {
hash = (37 * hash) + CONVERSION_VALUE_RULE_SET_FIELD_NUMBER;
hash = (53 * hash) + getConversionValueRuleSet().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* <pre>
* The result for the conversion value rule set mutate.
* </pre>
*
* Protobuf type {@code google.ads.googleads.v9.services.MutateConversionValueRuleSetResult}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResultOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.class, com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.Builder.class);
}
// Construct using com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
resourceName_ = "";
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSet_ = null;
} else {
conversionValueRuleSet_ = null;
conversionValueRuleSetBuilder_ = null;
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_descriptor;
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult getDefaultInstanceForType() {
return com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.getDefaultInstance();
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult build() {
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult buildPartial() {
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult result = new com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult(this);
result.resourceName_ = resourceName_;
if (conversionValueRuleSetBuilder_ == null) {
result.conversionValueRuleSet_ = conversionValueRuleSet_;
} else {
result.conversionValueRuleSet_ = conversionValueRuleSetBuilder_.build();
}
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult) {
return mergeFrom((com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult other) {
if (other == com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.getDefaultInstance()) return this;
if (!other.getResourceName().isEmpty()) {
resourceName_ = other.resourceName_;
onChanged();
}
if (other.hasConversionValueRuleSet()) {
mergeConversionValueRuleSet(other.getConversionValueRuleSet());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object resourceName_ = "";
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The resourceName.
*/
public java.lang.String getResourceName() {
java.lang.Object ref = resourceName_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
resourceName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The bytes for resourceName.
*/
public com.google.protobuf.ByteString
getResourceNameBytes() {
java.lang.Object ref = resourceName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
resourceName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @param value The resourceName to set.
* @return This builder for chaining.
*/
public Builder setResourceName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
resourceName_ = value;
onChanged();
return this;
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return This builder for chaining.
*/
public Builder clearResourceName() {
resourceName_ = getDefaultInstance().getResourceName();
onChanged();
return this;
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @param value The bytes for resourceName to set.
* @return This builder for chaining.
*/
public Builder setResourceNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
resourceName_ = value;
onChanged();
return this;
}
private com.google.ads.googleads.v9.resources.ConversionValueRuleSet conversionValueRuleSet_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.ads.googleads.v9.resources.ConversionValueRuleSet, com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder, com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder> conversionValueRuleSetBuilder_;
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return Whether the conversionValueRuleSet field is set.
*/
public boolean hasConversionValueRuleSet() {
return conversionValueRuleSetBuilder_ != null || conversionValueRuleSet_ != null;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return The conversionValueRuleSet.
*/
public com.google.ads.googleads.v9.resources.ConversionValueRuleSet getConversionValueRuleSet() {
if (conversionValueRuleSetBuilder_ == null) {
return conversionValueRuleSet_ == null ? com.google.ads.googleads.v9.resources.ConversionValueRuleSet.getDefaultInstance() : conversionValueRuleSet_;
} else {
return conversionValueRuleSetBuilder_.getMessage();
}
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder setConversionValueRuleSet(com.google.ads.googleads.v9.resources.ConversionValueRuleSet value) {
if (conversionValueRuleSetBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
conversionValueRuleSet_ = value;
onChanged();
} else {
conversionValueRuleSetBuilder_.setMessage(value);
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder setConversionValueRuleSet(
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder builderForValue) {
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSet_ = builderForValue.build();
onChanged();
} else {
conversionValueRuleSetBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder mergeConversionValueRuleSet(com.google.ads.googleads.v9.resources.ConversionValueRuleSet value) {
if (conversionValueRuleSetBuilder_ == null) {
if (conversionValueRuleSet_ != null) {
conversionValueRuleSet_ =
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.newBuilder(conversionValueRuleSet_).mergeFrom(value).buildPartial();
} else {
conversionValueRuleSet_ = value;
}
onChanged();
} else {
conversionValueRuleSetBuilder_.mergeFrom(value);
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder clearConversionValueRuleSet() {
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSet_ = null;
onChanged();
} else {
conversionValueRuleSet_ = null;
conversionValueRuleSetBuilder_ = null;
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder getConversionValueRuleSetBuilder() {
onChanged();
return getConversionValueRuleSetFieldBuilder().getBuilder();
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder getConversionValueRuleSetOrBuilder() {
if (conversionValueRuleSetBuilder_ != null) {
return conversionValueRuleSetBuilder_.getMessageOrBuilder();
} else {
return conversionValueRuleSet_ == null ?
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.getDefaultInstance() : conversionValueRuleSet_;
}
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.ads.googleads.v9.resources.ConversionValueRuleSet, com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder, com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder>
getConversionValueRuleSetFieldBuilder() {
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSetBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.ads.googleads.v9.resources.ConversionValueRuleSet, com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder, com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder>(
getConversionValueRuleSet(),
getParentForChildren(),
isClean());
conversionValueRuleSet_ = null;
}
return conversionValueRuleSetBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
}
// @@protoc_insertion_point(class_scope:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
private static final com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult();
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<MutateConversionValueRuleSetResult>
PARSER = new com.google.protobuf.AbstractParser<MutateConversionValueRuleSetResult>() {
@java.lang.Override
public MutateConversionValueRuleSetResult parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MutateConversionValueRuleSetResult(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<MutateConversionValueRuleSetResult> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<MutateConversionValueRuleSetResult> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
|
apache-2.0
|
daxnet/apworks-core
|
src/Apworks/Messaging/MessageHandlerExecutionContext.cs
|
1024
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace Apworks.Messaging
{
public abstract class MessageHandlerExecutionContext : IMessageHandlerExecutionContext
{
public void RegisterHandler<TMessage, THandler>()
where TMessage : IMessage
where THandler : IMessageHandler<TMessage>
=> RegisterHandler(typeof(TMessage), typeof(THandler));
public bool HandlerRegistered<TMessage, THandler>()
where TMessage : IMessage
where THandler : IMessageHandler<TMessage>
=> HandlerRegistered(typeof(TMessage), typeof(THandler));
public abstract void RegisterHandler(Type messageType, Type handlerType);
public abstract bool HandlerRegistered(Type messageType, Type handlerType);
public abstract Task HandleMessageAsync(IMessage message, CancellationToken cancellationToken = default(CancellationToken));
}
}
|
apache-2.0
|
funcy/functions_go
|
client/routes/delete_apps_app_routes_route_responses.go
|
4056
|
// Code generated by go-swagger; DO NOT EDIT.
package routes
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
"github.com/funcy/functions_go/models"
)
// DeleteAppsAppRoutesRouteReader is a Reader for the DeleteAppsAppRoutesRoute structure.
type DeleteAppsAppRoutesRouteReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteAppsAppRoutesRouteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteAppsAppRoutesRouteOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewDeleteAppsAppRoutesRouteNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
result := NewDeleteAppsAppRoutesRouteDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewDeleteAppsAppRoutesRouteOK creates a DeleteAppsAppRoutesRouteOK with default headers values
func NewDeleteAppsAppRoutesRouteOK() *DeleteAppsAppRoutesRouteOK {
return &DeleteAppsAppRoutesRouteOK{}
}
/*DeleteAppsAppRoutesRouteOK handles this case with default header values.
Route successfully deleted.
*/
type DeleteAppsAppRoutesRouteOK struct {
}
func (o *DeleteAppsAppRoutesRouteOK) Error() string {
return fmt.Sprintf("[DELETE /apps/{app}/routes/{route}][%d] deleteAppsAppRoutesRouteOK ", 200)
}
func (o *DeleteAppsAppRoutesRouteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteAppsAppRoutesRouteNotFound creates a DeleteAppsAppRoutesRouteNotFound with default headers values
func NewDeleteAppsAppRoutesRouteNotFound() *DeleteAppsAppRoutesRouteNotFound {
return &DeleteAppsAppRoutesRouteNotFound{}
}
/*DeleteAppsAppRoutesRouteNotFound handles this case with default header values.
Route does not exist.
*/
type DeleteAppsAppRoutesRouteNotFound struct {
Payload *models.Error
}
func (o *DeleteAppsAppRoutesRouteNotFound) Error() string {
return fmt.Sprintf("[DELETE /apps/{app}/routes/{route}][%d] deleteAppsAppRoutesRouteNotFound %+v", 404, o.Payload)
}
func (o *DeleteAppsAppRoutesRouteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteAppsAppRoutesRouteDefault creates a DeleteAppsAppRoutesRouteDefault with default headers values
func NewDeleteAppsAppRoutesRouteDefault(code int) *DeleteAppsAppRoutesRouteDefault {
return &DeleteAppsAppRoutesRouteDefault{
_statusCode: code,
}
}
/*DeleteAppsAppRoutesRouteDefault handles this case with default header values.
Unexpected error
*/
type DeleteAppsAppRoutesRouteDefault struct {
_statusCode int
Payload *models.Error
}
// Code gets the status code for the delete apps app routes route default response
func (o *DeleteAppsAppRoutesRouteDefault) Code() int {
return o._statusCode
}
func (o *DeleteAppsAppRoutesRouteDefault) Error() string {
return fmt.Sprintf("[DELETE /apps/{app}/routes/{route}][%d] DeleteAppsAppRoutesRoute default %+v", o._statusCode, o.Payload)
}
func (o *DeleteAppsAppRoutesRouteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
|
apache-2.0
|
Intel-Corporation/tensorflow
|
tensorflow/lite/kernels/fully_connected.cc
|
50508
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/sparse_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/fully_connected.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/sparse_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace fully_connected {
namespace {
bool SupportedSparsityFormat(const TfLiteSparsity& sparsity) {
if (sparsity.dim_metadata[0].format == kTfLiteDimDense &&
sparsity.dim_metadata[1].format == kTfLiteDimSparseCSR) {
return true;
}
return false;
}
static const int kDimMetadataSizeRandomSparse = 2;
static const int kDimMetadataSizeBlockSparse = 3;
TfLiteStatus CreateLedgerTensor(const TfLiteSparsity* sparsity,
TfLiteContext* context, TfLiteTensor* ledger) {
TF_LITE_ENSURE(context, sparsity != nullptr);
ledger->type = kTfLiteUInt8;
ledger->allocation_type = kTfLiteArenaRwPersistent;
TfLiteIntArray* ledger_size = TfLiteIntArrayCreate(1);
ledger_size->data[0] = sparsity->dim_metadata[1].array_indices->size +
sparsity->dim_metadata[1].array_segments->size - 1;
return context->ResizeTensor(context, ledger, ledger_size);
}
TfLiteStatus PopulateLedgerData(const TfLiteSparsity* sparsity,
TfLiteContext* context, uint8_t* ledger_data) {
TF_LITE_ENSURE(context, sparsity != nullptr);
const auto* array_segments = sparsity->dim_metadata[1].array_segments;
const auto* array_indices = sparsity->dim_metadata[1].array_indices;
int output_data_ptr = 0;
for (int i = 0; i < array_segments->size - 1; i++) {
int row_start = array_segments->data[i];
int row_end = array_segments->data[i + 1];
if (row_end - row_start > UINT8_MAX) {
return kTfLiteError;
}
// Copy num of non-zero blocks in row i.
ledger_data[output_data_ptr] = static_cast<uint8_t>(row_end - row_start);
output_data_ptr++;
for (int j = row_start; j < row_end; j++) {
if (array_indices->data[j] > UINT8_MAX) {
return kTfLiteError;
}
// Copy indices of non-zero blocks in row i.
ledger_data[output_data_ptr] =
static_cast<uint8_t>(array_indices->data[j]);
output_data_ptr++;
}
}
return kTfLiteOk;
}
} // namespace
// This file has four implementations of FullyConnected
enum KernelType {
kReference,
kGenericOptimized,
kLegacyPie, // Legacy path used by the PIE team and related clients.
};
struct OpData {
// The scaling factor from input to output (aka the 'real multiplier') can
// be represented as a fixed point multiplier plus a left shift.
int32_t output_multiplier;
int output_shift;
// The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255.
int32_t output_activation_min;
int32_t output_activation_max;
// The index of the temporary tensor where the quantized inputs are cached.
int scratch_tensor_index;
bool compute_row_sums = false;
// Only used for sparse hybrid fully connected kernels.
bool ledger_initialized;
};
constexpr int kInputTensor = 0;
constexpr int kWeightsTensor = 1;
constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0;
constexpr int kShuffledInputWorkspaceTensor = 1;
inline TfLiteStatus CheckTypes(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output,
TfLiteFullyConnectedParams* params) {
const bool is_quantized =
((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8));
const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32);
const bool is_shuffled =
is_quantized && (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8);
// optional bias tensor.
const bool is_optional_bias_float = !bias || (bias->type == kTfLiteFloat32);
const bool is_optional_bias_int =
!bias || (bias->type == kTfLiteInt32) || (bias->type == kTfLiteInt64);
if (is_quantized) {
if (is_shuffled) {
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt8);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteUInt8);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, is_optional_bias_int, true);
} else if (is_hybrid) {
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, is_optional_bias_float, true);
} else {
TF_LITE_ENSURE(context, input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt8 ||
input->type == kTfLiteInt16);
TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, is_optional_bias_int, true);
}
} else {
// Only float32 is supported currently
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, is_optional_bias_float, true);
}
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
// This is a builtin op, so we don't use the contents in 'buffer', if any.
// Instead, we allocate a new object to carry information from Prepare() to
// Eval().
auto* op_data = new OpData();
context->AddTensors(context, /*tensors_to_add=*/6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
// Check we have all the inputs and outputs we need.
TF_LITE_ENSURE(context, node->inputs->size == 2 || node->inputs->size == 3);
// Shuffled formats need a workspace to store the shuffled input activations.
const int expected_outputs_count =
params->weights_format == kTfLiteFullyConnectedWeightsFormatDefault ? 1
: 2;
TF_LITE_ENSURE_EQ(context, node->outputs->size, expected_outputs_count);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* bias =
(node->inputs->size == 3)
? GetOptionalInputTensor(context, node, kBiasTensor)
: nullptr;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Check proper datatype match among all Input Tensors
TF_LITE_ENSURE_STATUS(
CheckTypes(context, input, filter, bias, output, params));
// Check all the parameters of tensor match within themselves and match the
// input configuration.
int input_size = 1;
for (int i = 0; i < input->dims->size; i++) {
input_size *= input->dims->data[i];
}
TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2);
TF_LITE_ENSURE(context, filter->dims->data[1] != 0);
const int batch_size = input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
if (bias) {
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0));
}
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8 ||
input->type == kTfLiteInt16) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, input, filter, bias, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent);
data->output_shift = exponent;
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
}
if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
// If we have to perform on-the-fly quantization (with quantized weights and
// float inputs) first we need to quantize the inputs. Allocate a temporary
// buffer to store the intermediate quantized values.
// Additionally, we allocate a temporary buffer to store the accumulated
// quantized values prior to multiplication by the scaling factor.
const bool is_hybrid =
(input->type == kTfLiteFloat32 &&
(filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8));
const bool is_sparse = filter->sparsity != nullptr;
if (is_hybrid) {
TfLiteIntArrayFree(node->temporaries);
data->compute_row_sums = true;
if (is_sparse) {
node->temporaries = TfLiteIntArrayCreate(6);
} else {
node->temporaries = TfLiteIntArrayCreate(5);
}
node->temporaries->data[0] = data->scratch_tensor_index;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/0,
&input_quantized));
input_quantized->type = filter->type;
input_quantized->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
node->temporaries->data[1] = data->scratch_tensor_index + 1;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[2] = data->scratch_tensor_index + 2;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/2, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = num_units;
accum_size->data[1] = batch_size;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[3] = data->scratch_tensor_index + 3;
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/3, &input_offsets));
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) {
TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1);
input_offsets_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets,
input_offsets_size));
}
node->temporaries->data[4] = data->scratch_tensor_index + 4;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/4, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
if (is_sparse) {
data->ledger_initialized = false;
node->temporaries->data[5] = data->scratch_tensor_index + 5;
TfLiteTensor* filter_ledger =
&context->tensors[node->temporaries->data[5]];
auto status =
CreateLedgerTensor(filter->sparsity, context, filter_ledger);
if (status != kTfLiteOk) return status;
}
}
// Resize output.
TfLiteIntArray* output_size_array = nullptr;
if (params->keep_num_dims) {
// When number of dimensions are kept the filter operates along the last
// dimensions. In other words, for an input tensor with shape
// [batch_size, ..., n_inputs] and a filter of shape [n_inputs, n_units]
// this Op produces an output of shape [batch_size, ..., n_units].
TF_LITE_ENSURE_EQ(context, input->dims->data[input->dims->size - 1],
SizeOfDimension(filter, 1));
output_size_array = TfLiteIntArrayCopy(input->dims);
output_size_array->data[output_size_array->size - 1] = num_units;
} else {
// Otherwise, the output is (potentially flattened to) a 2-D matrix.
output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
}
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Check for supported activation types.
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const bool is_quantized =
((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8));
const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32);
const bool is_pie = kernel_type == kLegacyPie;
// Pie and hybrid path supports all kinds of fused activations, otherwise only
// clipping activations are supported.
if (!is_pie && !is_hybrid) {
TF_LITE_ENSURE(context, params->activation == kTfLiteActNone ||
params->activation == kTfLiteActRelu ||
params->activation == kTfLiteActReluN1To1 ||
params->activation == kTfLiteActRelu6);
}
return PrepareImpl(context, node);
}
TfLiteStatus EvalPie(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
int total_input_size = 1;
for (int i = 0; i < input->dims->size; i++) {
total_input_size *= input->dims->data[i];
}
int input_size = filter->dims->data[1];
const int batch_size = total_input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias), num_units,
batch_size,
GetTensorData<float>(output));
} else {
std::fill_n(GetTensorData<float>(output), batch_size * num_units, 0.0f);
}
// Compute output += weight * input
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
GetTensorData<float>(filter), num_units, input_size,
GetTensorData<float>(input), batch_size, GetTensorData<float>(output));
// Apply activation function
tensor_utils::ApplyActivationToVector(
GetTensorData<float>(output), batch_size * num_units, params->activation,
GetTensorData<float>(output));
return kTfLiteOk;
}
TfLiteStatus EvalHybridDense(
TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* input_quantized, TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
int total_input_size = 1;
for (int i = 0; i < input->dims->size; i++) {
total_input_size *= input->dims->data[i];
}
const int input_size = filter->dims->data[1];
const int batch_size = total_input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias), num_units,
batch_size,
GetTensorData<float>(output));
} else {
std::fill_n(GetTensorData<float>(output), batch_size * num_units, 0.0f);
}
// Save matrix multiplication computation for all zero input.
if (tensor_utils::IsZeroVector(GetTensorData<float>(input),
total_input_size)) {
tensor_utils::ApplyActivationToVector(
GetTensorData<float>(output), batch_size * num_units,
params->activation, GetTensorData<float>(output));
return kTfLiteOk;
}
// Quantize input from float to uint8 + quantization params (scaling factor).
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_offset_ptr = GetTensorData<int32_t>(input_offsets);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
int8_t* quant_data = GetTensorData<int8_t>(input_quantized);
const int8_t* filter_data = GetTensorData<int8_t>(filter);
const float* input_ptr = GetTensorData<float>(input);
tensor_utils::BatchQuantizeFloats(
input_ptr, batch_size, input_size, quant_data, scaling_factors_ptr,
input_offset_ptr, params->asymmetric_quantize_inputs);
for (int b = 0; b < batch_size; ++b) {
// Incorporate scaling of the filter.
scaling_factors_ptr[b] *= filter->params.scale;
}
// Compute output += weight * quantized_input
int32_t* scratch = GetTensorData<int32_t>(accum_scratch);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
filter_data, num_units, input_size, quant_data, scaling_factors_ptr,
batch_size, GetTensorData<float>(output), /*per_channel_scale=*/nullptr,
input_offset_ptr, scratch, row_sums_ptr, &data->compute_row_sums,
CpuBackendContext::GetFromContext(context));
// Apply activation function to floats.
tensor_utils::ApplyActivationToVector(
GetTensorData<float>(output), batch_size * num_units, params->activation,
GetTensorData<float>(output));
return kTfLiteOk;
}
void EvalSparseHybridImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, int thread_start,
int thread_end, TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
ruy::profiler::ScopeLabel label("FullyConnected");
ruy::profiler::ScopeLabel inner_label("Sparse Hybrid Kernel");
const auto& input_shape = GetTensorShape(input);
const auto& output_shape = GetTensorShape(output);
const auto& filter_shape = GetTensorShape(filter);
const int input_dims_count = input_shape.DimensionsCount();
const int output_dims_count = output_shape.DimensionsCount();
const int filter_dims_count = filter_shape.DimensionsCount();
const int batch_size = thread_end - thread_start;
const int input_depth = MatchingDim(filter_shape, filter_dims_count - 1,
input_shape, input_dims_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dims_count - 2,
output_shape, output_dims_count - 1);
const int per_thread_input_size = batch_size * input_depth;
const float* per_thread_input =
GetTensorData<float>(input) + thread_start * input_depth;
float* per_thread_output =
GetTensorData<float>(output) + thread_start * output_depth;
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias),
output_depth, batch_size,
per_thread_output);
} else {
std::fill_n(per_thread_output, batch_size * output_depth, 0.0f);
}
// Save matrix multiplication computation for all zero input.
if (tensor_utils::IsZeroVector(per_thread_input, per_thread_input_size)) {
tensor_utils::ApplyActivationToVector(
per_thread_output, batch_size * output_depth, params->activation,
per_thread_output);
return;
}
// Quantize input from float to uint8 + quantization params (scaling factor).
float* scaling_factors_ptr =
GetTensorData<float>(scaling_factors) + thread_start;
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_offset_ptr = GetTensorData<int32_t>(input_offsets) + thread_start;
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
int8_t* quant_data =
GetTensorData<int8_t>(input_quantized) + thread_start * input_depth;
tensor_utils::BatchQuantizeFloats(per_thread_input, batch_size, input_depth,
quant_data, scaling_factors_ptr,
input_offset_ptr,
params->asymmetric_quantize_inputs);
for (int b = 0; b < batch_size; ++b) {
// Incorporate scaling of the filter.
scaling_factors_ptr[b] *= filter->params.scale;
}
if (params->asymmetric_quantize_inputs) {
float* per_thread_output_ptr = per_thread_output;
for (int b = 0; b < batch_size; ++b) {
const float scaled_zp = scaling_factors_ptr[b] * input_offset_ptr[b];
for (int row = 0; row < output_depth; ++row) {
*per_thread_output_ptr++ -= scaled_zp * row_sums_ptr[row];
}
}
}
// Compute output += weight * quantized_input
TfLiteTensor* filter_ledger = &context->tensors[node->temporaries->data[5]];
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
GetTensorData<int8_t>(filter), GetTensorData<uint8_t>(filter_ledger),
output_depth, input_depth, quant_data, scaling_factors_ptr, batch_size,
per_thread_output);
// Apply activation function to floats.
tensor_utils::ApplyActivationToVector(per_thread_output,
batch_size * output_depth,
params->activation, per_thread_output);
}
struct SparseHybridFullyConnectedTask : cpu_backend_threadpool::Task {
SparseHybridFullyConnectedTask(
TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, const int thread_start, const int thread_end,
TfLiteTensor* input_quantized, TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output)
: context(context),
node(node),
params(params),
data(data),
input(input),
filter(filter),
bias(bias),
thread_start(thread_start),
thread_end(thread_end),
input_quantized(input_quantized),
scaling_factors(scaling_factors),
accum_scratch(accum_scratch),
row_sums(row_sums),
input_offsets(input_offsets),
output(output) {}
void Run() override {
EvalSparseHybridImpl(context, node, params, data, input, filter, bias,
thread_start, thread_end, input_quantized,
scaling_factors, accum_scratch, row_sums,
input_offsets, output);
}
private:
TfLiteContext* context;
TfLiteNode* node;
TfLiteFullyConnectedParams* params;
OpData* data;
const TfLiteTensor* input;
const TfLiteTensor* filter;
const TfLiteTensor* bias;
const int thread_start;
const int thread_end;
TfLiteTensor* input_quantized;
TfLiteTensor* scaling_factors;
TfLiteTensor* accum_scratch;
TfLiteTensor* row_sums;
TfLiteTensor* input_offsets;
TfLiteTensor* output;
};
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
const auto& output_shape = GetTensorShape(output);
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const bool is_dense = filter->sparsity == nullptr;
if (is_dense) {
return EvalHybridDense(context, node, params, data, input, filter, bias,
input_quantized, scaling_factors, accum_scratch,
row_sums, input_offsets, output);
}
TfLiteTensor* filter_ledger = &context->tensors[node->temporaries->data[5]];
if (!data->ledger_initialized) {
PopulateLedgerData(filter->sparsity, context,
GetTensorData<uint8_t>(filter_ledger));
data->ledger_initialized = true;
}
// The multi-threaded kernel slices the workload along the batch dimension. If
// there's not enough batches of data, the number of threads used is equal to
// the batch size.
// TODO(b/173442777): If needed, we can improve this later with slicing along
// the row dimension of the weight.
const int max_threads = cpu_backend_context->max_num_threads();
const int batches =
FlatSizeSkipDim(output_shape, output_shape.DimensionsCount() - 1);
const int thread_count = std::max(1, std::min(batches, max_threads));
if (params->asymmetric_quantize_inputs && data->compute_row_sums) {
// Precompute row sums.
static const int kBlockSize = 16;
const uint8_t* ledger_ptr = GetTensorData<uint8_t>(filter_ledger);
const int8_t* row_ptr = GetTensorData<int8_t>(filter);
const int output_depth = filter->dims->data[0];
int32_t* row_sums_ptr = GetTensorData<int32_t>(row_sums);
for (int row = 0; row < output_depth; ++row) {
int32_t row_sum = 0;
int num_nonzero_blocks = *ledger_ptr++;
for (int i = 0; i < num_nonzero_blocks; ++i, ++ledger_ptr) {
for (int c = 0; c < kBlockSize; c++) {
row_sum += (*row_ptr++);
}
}
row_sums_ptr[row] = row_sum;
}
data->compute_row_sums = false;
}
std::vector<SparseHybridFullyConnectedTask> tasks;
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
// This makes sure the workload is relatively balanced when batches is not
// a multiple of thread_count. The first mod(batches, thread_count) tasks
// need to process one more batch than the rest.
int thread_end = thread_start + batches / thread_count;
if (i < batches % thread_count) thread_end++;
tasks.emplace_back(context, node, params, data, input, filter, bias,
thread_start, thread_end, input_quantized,
scaling_factors, accum_scratch, row_sums, input_offsets,
output);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
return kTfLiteOk;
}
namespace {
template <KernelType kernel_type>
void FullyConnectedInt8(const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* output,
CpuBackendContext* cpu_backend_context) {
FullyConnectedParams op_params;
op_params.input_offset = -input->params.zero_point;
op_params.weights_offset = -filter->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
if (kernel_type == kReference) {
reference_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int8_t>(output),
cpu_backend_context);
}
}
} // namespace
namespace {
template <KernelType kernel_type>
void FullyConnectedInt16(const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* output) {
FullyConnectedParams op_params;
op_params.weights_offset = -filter->params.zero_point;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
reference_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int64_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output));
}
} // namespace
template <KernelType kernel_type>
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* output) {
int32_t input_offset = -input->params.zero_point;
int32_t filter_offset = -filter->params.zero_point;
int32_t output_offset = output->params.zero_point;
// Only the Pie path supports quantized models and float inputs/outputs.
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/0,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/2, &accum_scratch));
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/3, &input_offsets));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/4, &row_sums));
return EvalHybrid(context, node, params, data, input, filter, bias,
input_quantized, scaling_factors, accum_scratch, row_sums,
input_offsets, output);
} else {
FullyConnectedParams op_params;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
switch (output->type) {
case kTfLiteUInt8:
if (kernel_type == kReference) {
reference_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output),
CpuBackendContext::GetFromContext(context));
}
break;
case kTfLiteInt8:
FullyConnectedInt8<kernel_type>(
data, input, filter, bias, output,
CpuBackendContext::GetFromContext(context));
break;
case kTfLiteInt16:
if (input->type == kTfLiteInt16) {
// To avoid 32bit accum overflow, it enables RUY only
// when zero_point is 0.
bool has_non_zero_point = input->params.zero_point ||
filter->params.zero_point ||
output->params.zero_point;
if (kernel_type == kReference || has_non_zero_point ||
(bias && bias->type == kTfLiteInt64)) {
FullyConnectedInt16<kernel_type>(data, input, filter, bias, output);
} else {
optimized_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
CpuBackendContext::GetFromContext(context));
}
} else if (kernel_type == kReference) {
reference_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output));
} else {
optimized_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
CpuBackendContext::GetFromContext(context));
}
break;
default:
context->ReportError(context,
"Quantized FullyConnected expects output data "
"type uint8, int8 or int16");
return kTfLiteError;
}
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params,
OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* output,
TfLiteTensor* shuffled_input_workspace) {
// TODO(b/110697972) decide more consistently if / how / where we want
// to perform this kind of runtime data type checks.
if (shuffled_input_workspace->type != kTfLiteUInt8) {
context->ReportError(context, "Unexpected data type");
return kTfLiteError;
}
#define TF_LITE_SHUFFLED_FULLY_CONNECTED(type) \
{ \
type::ShuffledFullyConnected( \
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), \
GetTensorShape(filter), GetTensorData<uint8_t>(filter), \
GetTensorShape(bias), GetTensorData<int32_t>(bias), \
GetTensorShape(output), GetTensorData<int16_t>(output), \
GetTensorData<uint8_t>(shuffled_input_workspace), \
CpuBackendContext::GetFromContext(context)); \
}
FullyConnectedParams op_params;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
if (kernel_type == kReference) {
reference_ops::ShuffledFullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
GetTensorData<uint8_t>(shuffled_input_workspace));
} else {
optimized_ops::ShuffledFullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
GetTensorData<uint8_t>(shuffled_input_workspace),
CpuBackendContext::GetFromContext(context));
}
#undef TF_LITE_SHUFFLED_FULLY_CONNECTED
return kTfLiteOk;
}
// Verifies that sparsity values are valid given input/weight/output.
bool VerifySparsity(const RuntimeShape& weights_shape,
const RuntimeShape& input_shape,
const RuntimeShape& output_shape,
const TfLiteSparsity* sparsity) {
const int weights_dims_count = weights_shape.DimensionsCount();
const int output_dims_count = output_shape.DimensionsCount();
const int w0_size = sparsity->dim_metadata[0].dense_size;
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);
const int output_elements = output_shape.FlatSize();
const int input_elements = input_shape.FlatSize();
const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2,
output_shape, output_dims_count - 1);
const int max_batch_index = batches - 1;
const int max_output = max_batch_index * output_depth + w0_size;
const int max_batch_depth = accum_depth * max_batch_index;
// Verify output size is enough.
if (output_elements < max_output) return false;
// Verify index from sparse in input is valid.
for (int i = 0; i < sparsity->dim_metadata[1].array_indices->size; ++i) {
if (input_elements <=
max_batch_depth + sparsity->dim_metadata[1].array_indices->data[i])
return false;
}
return true;
}
template <KernelType kernel_type>
TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
if (kernel_type == kReference) {
FullyConnectedParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
if (filter->sparsity != nullptr) {
const auto& sparsity = *filter->sparsity;
reference_ops::FullyConnectedSparseWeight(
sparsity, op_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(filter),
GetTensorData<float>(filter), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorShape(output),
GetTensorData<float>(output));
} else {
reference_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output));
}
} else if (kernel_type == kLegacyPie) {
return EvalPie(context, node, params, data, input, filter, bias, output);
} else {
FullyConnectedParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
if (filter->sparsity != nullptr) {
const auto& sparsity = *filter->sparsity;
if (!SupportedSparsityFormat(sparsity)) {
TF_LITE_KERNEL_LOG(context,
"Unsupported sparse fully-connected weight format.");
return kTfLiteError;
}
const auto& input_shape = GetTensorShape(input);
const auto& filter_shape = GetTensorShape(filter);
const auto& output_shape = GetTensorShape(output);
const auto& bias_shape = GetTensorShape(bias);
if (!VerifySparsity(filter_shape, input_shape, output_shape, &sparsity)) {
TF_LITE_KERNEL_LOG(context, "Invalid sparse fully-connected format.");
return kTfLiteError;
}
if (sparsity.dim_metadata_size == kDimMetadataSizeRandomSparse) {
// Random sparse.
optimized_ops::FullyConnectedSparseWeight(
sparsity, op_params, // Disable formatting
input_shape, GetTensorData<float>(input), // Disable formatting
filter_shape, GetTensorData<float>(filter), // Disable formatting
bias_shape, GetTensorData<float>(bias), // Disable formatting
output_shape, GetTensorData<float>(output));
} else if (sparsity.dim_metadata_size == kDimMetadataSizeBlockSparse &&
sparsity.dim_metadata[2].dense_size == 4) {
// Block sparse with block size of 1x4.
optimized_ops::FullyConnectedSparseWeight1x4(
sparsity, op_params, // Disable formatting
input_shape, GetTensorData<float>(input), // Disable formatting
filter_shape, GetTensorData<float>(filter), // Disable formatting
bias_shape, GetTensorData<float>(bias), // Disable formatting
output_shape, GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context));
} else {
TF_LITE_KERNEL_LOG(context,
"Unsupported sparse fully-connected weight format.");
return kTfLiteError;
}
} else {
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
optimized_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context));
}
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* bias =
(node->inputs->size == 3)
? GetOptionalInputTensor(context, node, kBiasTensor)
: nullptr;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Do nothing if expected output is empty.
if (NumElements(output) == 0) {
return kTfLiteOk;
}
switch (filter->type) {
case kTfLiteFloat32:
return EvalFloat<kernel_type>(context, node, params, data, input, filter,
bias, output);
case kTfLiteUInt8:
if (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
TfLiteTensor* shuffled_input_workspace;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kShuffledInputWorkspaceTensor,
&shuffled_input_workspace));
return EvalShuffledQuantized<kernel_type>(context, node, params, data,
input, filter, bias, output,
shuffled_input_workspace);
} else if (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatDefault) {
return EvalQuantized<kernel_type>(context, node, params, data, input,
filter, bias, output);
} else {
context->ReportError(context,
"Unhandled fully-connected weights format");
return kTfLiteError;
}
case kTfLiteInt8:
if (params->weights_format == kTfLiteFullyConnectedWeightsFormatDefault) {
return EvalQuantized<kernel_type>(context, node, params, data, input,
filter, bias, output);
} else {
context->ReportError(context,
"Unhandled fully-connected weights format");
return kTfLiteError;
}
default:
context->ReportError(context,
"Filter data type %s currently not supported.",
TfLiteTypeGetName(filter->type));
return kTfLiteError;
}
return kTfLiteOk;
}
} // namespace fully_connected
TfLiteRegistration* Register_FULLY_CONNECTED_REF() {
static TfLiteRegistration r = {
fully_connected::Init, fully_connected::Free,
fully_connected::Prepare<fully_connected::kReference>,
fully_connected::Eval<fully_connected::kReference>};
return &r;
}
TfLiteRegistration* Register_FULLY_CONNECTED_GENERIC_OPT() {
static TfLiteRegistration r = {
fully_connected::Init, fully_connected::Free,
fully_connected::Prepare<fully_connected::kGenericOptimized>,
fully_connected::Eval<fully_connected::kGenericOptimized>};
return &r;
}
// Legacy path for PIE clients.
TfLiteRegistration* Register_FULLY_CONNECTED_PIE() {
static TfLiteRegistration r = {
fully_connected::Init, fully_connected::Free,
fully_connected::Prepare<fully_connected::kLegacyPie>,
fully_connected::Eval<fully_connected::kLegacyPie>};
return &r;
}
TfLiteRegistration* Register_FULLY_CONNECTED() {
return Register_FULLY_CONNECTED_GENERIC_OPT();
}
} // namespace builtin
} // namespace ops
} // namespace tflite
|
apache-2.0
|
oddacious/oddacious.github.io
|
_posts/2016-08-07-by-process-of-elimination.md
|
3095
|
---
layout: post
title: "In which there are other ways for Trump to exit"
subtitle: "The Upshot avoids discussing awkward scenarios"
article-type: "short"
categories: ["curmudgeon"]
description: "Even if your opinion is that this is highly unlikely (or a distasteful thing to bet on), that doesn't mean that others
feel the same way."
tags: ["Upshot", "Trump"]
date: 2016-08-07 12:00:00
author: "RDJ"
header-img: "img/ends.jpg"
header-img-title: "."
header-img-link: "https://www.flickr.com/photos/carmendcluj/13677246453/"
header-img-author: "carmen_d_cluj"
header-img-author-link: "https://www.flickr.com/photos/carmendcluj/"
header-img-license: "CC BY-NC"
header-img-license-link: "https://creativecommons.org/licenses/by-nc/2.0/"
---
[Source Text]: http://www.nytimes.com/2016/08/04/upshot/prediction-markets-suggest-a-chance-that-donald-trump-will-drop-out.html
[Betfair]: https://www.betfair.com/exchange/politics/marketactivity?id=1.116006120&selectionId=1171580
[Trump odds]: https://www.betfair.com/exchange/politics/marketactivity?id=1.107373419&selectionId=5242353
In [Could Donald Trump Drop Out? Some Bettors Seem to Think So][Source Text], Justin Wolfers extrapolates the
probability that Donald Trump will quit the US presidential election, based on betting markets. The basic facts he uses are
that Betfair has [25.8% odds][Betfair] of the Republican Party winning the 2016 election, but a 24.1% chance of Trump [doing
so][Trump odds].
Wolfers asks "what are the odds he quits?"
However, Donald Trump quitting is not the only way that another Republican wins the presidency.
Particularly, any of these are possibilities:
- Trump could be forced out by the party
- Trump could die of natural causes (he is 70, after all)
- Trump could be assassinated
The election is three months away, and a lot can happen.
I don't think any of these are likely things, nor do I want Trump to die (and I'm certainly not encouraging anyone to
attempt to make that happen). But we're talking about splitting up a small probability to begin with.
Remember, these aren't necessarily correct probabilities. Instead they represent beliefs (which should correlate with
true probabilities). As long as some small percentage of people think there is a decent chance of Trump being
assassinated, then a non-negligible part of that 1.7% will be due to assassination beliefs, not quitting beliefs.
If the Republican chances of victory vary by the circumstances in which Trump is not their candidate, then all of the
math further down in the Upshot article needs further adjustment. Especially if the alternative Republican is more likely
to win if Trump is assassinated than if Trump quits, then a bigger chunk of that 1.7% belongs to that case than simply
the relative likelihood of assassination vis-a-vis the other circumstances.
Even if your opinion is that this is highly unlikely (or a distasteful thing to bet on), that doesn't mean that others
feel the same way. It should have been acknowledged in the article.
How likely do YOU think it is that Trump quits?
|
apache-2.0
|
WiAnnBot/annbot
|
app/src/main/java/com/wistronits/wh/annbot/business/biz/modules/NurseModule.java
|
547
|
package com.wistronits.wh.annbot.business.biz.modules;
import com.wistronits.wh.annbot.business.mvp.contract.MainContract;
import com.wistronits.wh.annbot.business.mvp.contract.NurseListContract;
import dagger.Module;
import dagger.Provides;
/**
* Created by WH1705002 on 2017/9/1.
*/
@Module
public class NurseModule {
private NurseListContract.View mView;
public NurseModule(NurseListContract.View view) {
mView = view;
}
@Provides
NurseListContract.View provideNurseContract() {
return mView;
}
}
|
apache-2.0
|
googleads/google-ads-perl
|
lib/Google/Ads/GoogleAds/V10/Services/OfflineUserDataJobService/RunOfflineUserDataJobRequest.pm
|
1118
|
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::OfflineUserDataJobService::RunOfflineUserDataJobRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
resourceName => $args->{resourceName},
validateOnly => $args->{validateOnly}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
|
apache-2.0
|
resthub/resthub.github.io
|
apidocs/spring/1.1/org/resthub/tapestry5/validation/services/package-summary.html
|
5602
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="fr">
<head>
<!-- Generated by javadoc (version 1.7.0_01) on Sat Mar 24 00:58:28 CET 2012 -->
<meta http-equiv="Content-Type" content="text/html" charset="UTF-8">
<title>org.resthub.tapestry5.validation.services (RESThub 1.1.3 API)</title>
<meta name="date" content="2012-03-24">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="org.resthub.tapestry5.validation.services (RESThub 1.1.3 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/resthub/tapestry5/services/package-summary.html">Prev Package</a></li>
<li><a href="../../../../../org/resthub/test/dbunit/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/resthub/tapestry5/validation/services/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 title="Package" class="title">Package org.resthub.tapestry5.validation.services</h1>
<div class="docSummary">
<div class="block">Provides resthub services and configuration for Tapestry5 bean validation integration</div>
</div>
<p>See: <a href="#package_description">Description</a></p>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<table class="packageSummary" border="0" cellpadding="3" cellspacing="0" summary="Class Summary table, listing classes, and an explanation">
<caption><span>Class Summary</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Class</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="../../../../../org/resthub/tapestry5/validation/services/ResthubValidationModule.html" title="class in org.resthub.tapestry5.validation.services">ResthubValidationModule</a></td>
<td class="colLast">
<div class="block">Provide configuration for Tapestry5 bean integration module.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
<a name="package_description">
<!-- -->
</a>
<h2 title="Package org.resthub.tapestry5.validation.services Description">Package org.resthub.tapestry5.validation.services Description</h2>
<div class="block">Provides resthub services and configuration for Tapestry5 bean validation integration</div>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/resthub/tapestry5/services/package-summary.html">Prev Package</a></li>
<li><a href="../../../../../org/resthub/test/dbunit/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/resthub/tapestry5/validation/services/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2009-2012. All Rights Reserved.</small></p>
</body>
</html>
|
apache-2.0
|
msgpack/msgpack-cli
|
samples/Samples/Sample02_HandlingDynamicObject.cs
|
4711
|
#region -- License Terms --
//
// MessagePack for CLI
//
// Copyright (C) 2010-2012 FUJIWARA, Yusuke
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#endregion -- License Terms --
using System;
using System.Diagnostics;
using System.IO;
using MsgPack;
using MsgPack.Serialization;
using NUnit.Framework; // For running checking
namespace Samples
{
/// <summary>
/// A sample code for explore MessagePackObject.
/// </summary>
[TestFixture]
public class HandlingDynamicObjectSample
{
[Test]
public void SerializeThenDeserialize()
{
// They are object for just description.
var targetObject =
new PhotoEntry // See Sample01_BasicUsage.cs
{
Id = 123,
Title = "My photo",
Date = DateTime.Now,
Image = new byte[] { 1, 2, 3, 4 },
Comment = "This is test object to be serialize/deserialize using MsgPack."
};
targetObject.Tags.Add( "Sample" );
targetObject.Tags.Add( "Excellent" );
var stream = new MemoryStream();
// Set using Map instead of Array to serialize complex object. See Sample03 for details.
var context = new SerializationContext();
context.SerializationMethod = SerializationMethod.Map;
// You can use default context if you want to use map in all serializations which use default context.
// SerializationContext.Default.SerializationMethod = SerializationMethod.Map;
// 1. Create serializer instance.
var serializer = MessagePackSerializer.Get<PhotoEntry>( context );
// 2. Serialize object to the specified stream.
serializer.Pack( stream, targetObject );
// Set position to head of the stream to demonstrate deserialization.
stream.Position = 0;
// 3. Unpack MessagePackObject to get raw representation.
var rawObject = Unpacking.UnpackObject( stream );
// 3-b. You can read MPO tree via Unpacker
// var unpacker = Unpacker.Create( stream );
// 3-c. Or, you can get it from serializer directly.
// var rawObject = MessagePackSerializer.UnpackMessagePackObject( stream );
// Check its type
Debug.WriteLine( "Is array? {0}", rawObject.IsArray ); // IsList is alias
Debug.WriteLine( "Is map? {0}", rawObject.IsMap ); // IsDictionary is alias
Debug.WriteLine( "Type: {0}", rawObject.UnderlyingType );
// Gets serialized fields.
// Note: When the object was serialized as array instead of map, use index instead.
var asDictionary = rawObject.AsDictionary();
Debug.WriteLine( "Id : {0}({1})", asDictionary[ "Id" ], asDictionary[ "Id" ].UnderlyingType );
// String is encoded as utf-8 by default.
Debug.WriteLine( "Title : {0}({1})", asDictionary[ "Title" ], asDictionary[ "Title" ].UnderlyingType );
// Non-primitive is serialized as complex type or encoded primitive type.
// DateTimeOffset is encoded as array[2]{ticks,offset}
Debug.WriteLine( "Date : {0}({1})", asDictionary[ "Date" ], asDictionary[ "Date" ].UnderlyingType );
// byte[] is byte[], as you know.
Debug.WriteLine( "Image : {0}({1})", asDictionary[ "Image" ], asDictionary[ "Image" ].UnderlyingType );
// 4. Now MessagePackSerializer handles MessagePackObject directly.
var mpo = serializer.ToMessagePackObject( targetObject );
var asDictionary2 = mpo.AsDictionary();
Debug.WriteLine( "---- ToMessagePackObject ----" );
Debug.WriteLine( "Id : {0}({1})", asDictionary2[ "Id" ], asDictionary2[ "Id" ].UnderlyingType );
Debug.WriteLine( "Title : {0}({1})", asDictionary2[ "Title" ], asDictionary2[ "Title" ].UnderlyingType );
Debug.WriteLine( "Date : {0}({1})", asDictionary2[ "Date" ], asDictionary2[ "Date" ].UnderlyingType );
Debug.WriteLine( "Image : {0}({1})", asDictionary2[ "Image" ], asDictionary2[ "Image" ].UnderlyingType );
// 5. Use MessagePackSerializer to deserialize target object from MessagePackObject
var targetObject2 = serializer.FromMessagePackObject( mpo );
Debug.WriteLine( "---- FromMessagePackObject ----" );
Debug.WriteLine( "Id : {0}", targetObject2.Id );
Debug.WriteLine( "Title : {0}", targetObject2.Title );
Debug.WriteLine( "Date : {0}", targetObject2.Date );
Debug.WriteLine( "Image : {0}", Convert.ToBase64String( targetObject2.Image ) );
}
}
}
|
apache-2.0
|
wildfly-swarm/wildfly-swarm-javadocs
|
2018.3.3/apidocs/org/wildfly/swarm/config/jgroups/class-use/StackConsumer.html
|
11213
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_151) on Thu Mar 08 14:17:43 MST 2018 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Interface org.wildfly.swarm.config.jgroups.StackConsumer (BOM: * : All 2018.3.3 API)</title>
<meta name="date" content="2018-03-08">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Interface org.wildfly.swarm.config.jgroups.StackConsumer (BOM: * : All 2018.3.3 API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">WildFly Swarm API, 2018.3.3</div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/wildfly/swarm/config/jgroups/class-use/StackConsumer.html" target="_top">Frames</a></li>
<li><a href="StackConsumer.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Interface org.wildfly.swarm.config.jgroups.StackConsumer" class="title">Uses of Interface<br>org.wildfly.swarm.config.jgroups.StackConsumer</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#org.wildfly.swarm.config">org.wildfly.swarm.config</a></td>
<td class="colLast"> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><a href="#org.wildfly.swarm.config.jgroups">org.wildfly.swarm.config.jgroups</a></td>
<td class="colLast"> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="org.wildfly.swarm.config">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a> in <a href="../../../../../../org/wildfly/swarm/config/package-summary.html">org.wildfly.swarm.config</a></h3>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../org/wildfly/swarm/config/package-summary.html">org.wildfly.swarm.config</a> with parameters of type <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code><a href="../../../../../../org/wildfly/swarm/config/JGroups.html" title="type parameter in JGroups">T</a></code></td>
<td class="colLast"><span class="typeNameLabel">JGroups.</span><code><span class="memberNameLink"><a href="../../../../../../org/wildfly/swarm/config/JGroups.html#stack-java.lang.String-org.wildfly.swarm.config.jgroups.StackConsumer-">stack</a></span>(<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> childKey,
<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a> consumer)</code>
<div class="block">Create and configure a Stack object to the list of subresources</div>
</td>
</tr>
</tbody>
</table>
</li>
<li class="blockList"><a name="org.wildfly.swarm.config.jgroups">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a> in <a href="../../../../../../org/wildfly/swarm/config/jgroups/package-summary.html">org.wildfly.swarm.config.jgroups</a></h3>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../org/wildfly/swarm/config/jgroups/package-summary.html">org.wildfly.swarm.config.jgroups</a> that return <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>default <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a><<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="type parameter in StackConsumer">T</a>></code></td>
<td class="colLast"><span class="typeNameLabel">StackConsumer.</span><code><span class="memberNameLink"><a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html#andThen-org.wildfly.swarm.config.jgroups.StackConsumer-">andThen</a></span>(<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a><<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="type parameter in StackConsumer">T</a>> after)</code> </td>
</tr>
</tbody>
</table>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../org/wildfly/swarm/config/jgroups/package-summary.html">org.wildfly.swarm.config.jgroups</a> with parameters of type <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>default <a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a><<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="type parameter in StackConsumer">T</a>></code></td>
<td class="colLast"><span class="typeNameLabel">StackConsumer.</span><code><span class="memberNameLink"><a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html#andThen-org.wildfly.swarm.config.jgroups.StackConsumer-">andThen</a></span>(<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">StackConsumer</a><<a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="type parameter in StackConsumer">T</a>> after)</code> </td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/wildfly/swarm/config/jgroups/StackConsumer.html" title="interface in org.wildfly.swarm.config.jgroups">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">WildFly Swarm API, 2018.3.3</div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/wildfly/swarm/config/jgroups/class-use/StackConsumer.html" target="_top">Frames</a></li>
<li><a href="StackConsumer.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2018 <a href="http://www.jboss.org">JBoss by Red Hat</a>. All rights reserved.</small></p>
</body>
</html>
|
apache-2.0
|
charles-difazio/beer-cellar
|
README.md
|
111
|
beer-cellar
===========
Python scripts for using beer rating data to determine when cellared beers will peak.
|
apache-2.0
|
nafae/developer
|
modules/adwords_axis/src/main/java/com/google/api/ads/adwords/axis/v201409/cm/ReportDefinitionReportType.java
|
12750
|
/**
* ReportDefinitionReportType.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter.
*/
package com.google.api.ads.adwords.axis.v201409.cm;
public class ReportDefinitionReportType implements java.io.Serializable {
private java.lang.String _value_;
private static java.util.HashMap _table_ = new java.util.HashMap();
// Constructor
protected ReportDefinitionReportType(java.lang.String value) {
_value_ = value;
_table_.put(_value_,this);
}
public static final java.lang.String _KEYWORDS_PERFORMANCE_REPORT = "KEYWORDS_PERFORMANCE_REPORT";
public static final java.lang.String _AD_PERFORMANCE_REPORT = "AD_PERFORMANCE_REPORT";
public static final java.lang.String _URL_PERFORMANCE_REPORT = "URL_PERFORMANCE_REPORT";
public static final java.lang.String _ADGROUP_PERFORMANCE_REPORT = "ADGROUP_PERFORMANCE_REPORT";
public static final java.lang.String _CAMPAIGN_PERFORMANCE_REPORT = "CAMPAIGN_PERFORMANCE_REPORT";
public static final java.lang.String _ACCOUNT_PERFORMANCE_REPORT = "ACCOUNT_PERFORMANCE_REPORT";
public static final java.lang.String _GEO_PERFORMANCE_REPORT = "GEO_PERFORMANCE_REPORT";
public static final java.lang.String _SEARCH_QUERY_PERFORMANCE_REPORT = "SEARCH_QUERY_PERFORMANCE_REPORT";
public static final java.lang.String _AUTOMATIC_PLACEMENTS_PERFORMANCE_REPORT = "AUTOMATIC_PLACEMENTS_PERFORMANCE_REPORT";
public static final java.lang.String _CAMPAIGN_NEGATIVE_KEYWORDS_PERFORMANCE_REPORT = "CAMPAIGN_NEGATIVE_KEYWORDS_PERFORMANCE_REPORT";
public static final java.lang.String _CAMPAIGN_NEGATIVE_PLACEMENTS_PERFORMANCE_REPORT = "CAMPAIGN_NEGATIVE_PLACEMENTS_PERFORMANCE_REPORT";
public static final java.lang.String _AD_EXTENSIONS_PERFORMANCE_REPORT = "AD_EXTENSIONS_PERFORMANCE_REPORT";
public static final java.lang.String _DESTINATION_URL_REPORT = "DESTINATION_URL_REPORT";
public static final java.lang.String _SHARED_SET_REPORT = "SHARED_SET_REPORT";
public static final java.lang.String _CAMPAIGN_SHARED_SET_REPORT = "CAMPAIGN_SHARED_SET_REPORT";
public static final java.lang.String _SHARED_SET_CRITERIA_REPORT = "SHARED_SET_CRITERIA_REPORT";
public static final java.lang.String _CREATIVE_CONVERSION_REPORT = "CREATIVE_CONVERSION_REPORT";
public static final java.lang.String _CALL_METRICS_CALL_DETAILS_REPORT = "CALL_METRICS_CALL_DETAILS_REPORT";
public static final java.lang.String _KEYWORDLESS_QUERY_REPORT = "KEYWORDLESS_QUERY_REPORT";
public static final java.lang.String _KEYWORDLESS_CATEGORY_REPORT = "KEYWORDLESS_CATEGORY_REPORT";
public static final java.lang.String _CRITERIA_PERFORMANCE_REPORT = "CRITERIA_PERFORMANCE_REPORT";
public static final java.lang.String _CLICK_PERFORMANCE_REPORT = "CLICK_PERFORMANCE_REPORT";
public static final java.lang.String _BUDGET_PERFORMANCE_REPORT = "BUDGET_PERFORMANCE_REPORT";
public static final java.lang.String _BID_GOAL_PERFORMANCE_REPORT = "BID_GOAL_PERFORMANCE_REPORT";
public static final java.lang.String _DISPLAY_KEYWORD_PERFORMANCE_REPORT = "DISPLAY_KEYWORD_PERFORMANCE_REPORT";
public static final java.lang.String _PLACEHOLDER_FEED_ITEM_REPORT = "PLACEHOLDER_FEED_ITEM_REPORT";
public static final java.lang.String _PLACEMENT_PERFORMANCE_REPORT = "PLACEMENT_PERFORMANCE_REPORT";
public static final java.lang.String _CAMPAIGN_NEGATIVE_LOCATIONS_REPORT = "CAMPAIGN_NEGATIVE_LOCATIONS_REPORT";
public static final java.lang.String _GENDER_PERFORMANCE_REPORT = "GENDER_PERFORMANCE_REPORT";
public static final java.lang.String _AGE_RANGE_PERFORMANCE_REPORT = "AGE_RANGE_PERFORMANCE_REPORT";
public static final java.lang.String _CAMPAIGN_LOCATION_TARGET_REPORT = "CAMPAIGN_LOCATION_TARGET_REPORT";
public static final java.lang.String _CAMPAIGN_AD_SCHEDULE_TARGET_REPORT = "CAMPAIGN_AD_SCHEDULE_TARGET_REPORT";
public static final java.lang.String _CAMPAIGN_PLATFORM_TARGET_REPORT = "CAMPAIGN_PLATFORM_TARGET_REPORT";
public static final java.lang.String _PAID_ORGANIC_QUERY_REPORT = "PAID_ORGANIC_QUERY_REPORT";
public static final java.lang.String _AUDIENCE_PERFORMANCE_REPORT = "AUDIENCE_PERFORMANCE_REPORT";
public static final java.lang.String _DISPLAY_TOPICS_PERFORMANCE_REPORT = "DISPLAY_TOPICS_PERFORMANCE_REPORT";
public static final java.lang.String _SHOPPING_PERFORMANCE_REPORT = "SHOPPING_PERFORMANCE_REPORT";
public static final java.lang.String _PRODUCT_PARTITION_REPORT = "PRODUCT_PARTITION_REPORT";
public static final java.lang.String _PLACEHOLDER_REPORT = "PLACEHOLDER_REPORT";
public static final java.lang.String _AD_CUSTOMIZERS_FEED_ITEM_REPORT = "AD_CUSTOMIZERS_FEED_ITEM_REPORT";
public static final java.lang.String _UNKNOWN = "UNKNOWN";
public static final ReportDefinitionReportType KEYWORDS_PERFORMANCE_REPORT = new ReportDefinitionReportType(_KEYWORDS_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType AD_PERFORMANCE_REPORT = new ReportDefinitionReportType(_AD_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType URL_PERFORMANCE_REPORT = new ReportDefinitionReportType(_URL_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType ADGROUP_PERFORMANCE_REPORT = new ReportDefinitionReportType(_ADGROUP_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_PERFORMANCE_REPORT = new ReportDefinitionReportType(_CAMPAIGN_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType ACCOUNT_PERFORMANCE_REPORT = new ReportDefinitionReportType(_ACCOUNT_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType GEO_PERFORMANCE_REPORT = new ReportDefinitionReportType(_GEO_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType SEARCH_QUERY_PERFORMANCE_REPORT = new ReportDefinitionReportType(_SEARCH_QUERY_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType AUTOMATIC_PLACEMENTS_PERFORMANCE_REPORT = new ReportDefinitionReportType(_AUTOMATIC_PLACEMENTS_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_NEGATIVE_KEYWORDS_PERFORMANCE_REPORT = new ReportDefinitionReportType(_CAMPAIGN_NEGATIVE_KEYWORDS_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_NEGATIVE_PLACEMENTS_PERFORMANCE_REPORT = new ReportDefinitionReportType(_CAMPAIGN_NEGATIVE_PLACEMENTS_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType AD_EXTENSIONS_PERFORMANCE_REPORT = new ReportDefinitionReportType(_AD_EXTENSIONS_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType DESTINATION_URL_REPORT = new ReportDefinitionReportType(_DESTINATION_URL_REPORT);
public static final ReportDefinitionReportType SHARED_SET_REPORT = new ReportDefinitionReportType(_SHARED_SET_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_SHARED_SET_REPORT = new ReportDefinitionReportType(_CAMPAIGN_SHARED_SET_REPORT);
public static final ReportDefinitionReportType SHARED_SET_CRITERIA_REPORT = new ReportDefinitionReportType(_SHARED_SET_CRITERIA_REPORT);
public static final ReportDefinitionReportType CREATIVE_CONVERSION_REPORT = new ReportDefinitionReportType(_CREATIVE_CONVERSION_REPORT);
public static final ReportDefinitionReportType CALL_METRICS_CALL_DETAILS_REPORT = new ReportDefinitionReportType(_CALL_METRICS_CALL_DETAILS_REPORT);
public static final ReportDefinitionReportType KEYWORDLESS_QUERY_REPORT = new ReportDefinitionReportType(_KEYWORDLESS_QUERY_REPORT);
public static final ReportDefinitionReportType KEYWORDLESS_CATEGORY_REPORT = new ReportDefinitionReportType(_KEYWORDLESS_CATEGORY_REPORT);
public static final ReportDefinitionReportType CRITERIA_PERFORMANCE_REPORT = new ReportDefinitionReportType(_CRITERIA_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType CLICK_PERFORMANCE_REPORT = new ReportDefinitionReportType(_CLICK_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType BUDGET_PERFORMANCE_REPORT = new ReportDefinitionReportType(_BUDGET_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType BID_GOAL_PERFORMANCE_REPORT = new ReportDefinitionReportType(_BID_GOAL_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType DISPLAY_KEYWORD_PERFORMANCE_REPORT = new ReportDefinitionReportType(_DISPLAY_KEYWORD_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType PLACEHOLDER_FEED_ITEM_REPORT = new ReportDefinitionReportType(_PLACEHOLDER_FEED_ITEM_REPORT);
public static final ReportDefinitionReportType PLACEMENT_PERFORMANCE_REPORT = new ReportDefinitionReportType(_PLACEMENT_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_NEGATIVE_LOCATIONS_REPORT = new ReportDefinitionReportType(_CAMPAIGN_NEGATIVE_LOCATIONS_REPORT);
public static final ReportDefinitionReportType GENDER_PERFORMANCE_REPORT = new ReportDefinitionReportType(_GENDER_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType AGE_RANGE_PERFORMANCE_REPORT = new ReportDefinitionReportType(_AGE_RANGE_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_LOCATION_TARGET_REPORT = new ReportDefinitionReportType(_CAMPAIGN_LOCATION_TARGET_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_AD_SCHEDULE_TARGET_REPORT = new ReportDefinitionReportType(_CAMPAIGN_AD_SCHEDULE_TARGET_REPORT);
public static final ReportDefinitionReportType CAMPAIGN_PLATFORM_TARGET_REPORT = new ReportDefinitionReportType(_CAMPAIGN_PLATFORM_TARGET_REPORT);
public static final ReportDefinitionReportType PAID_ORGANIC_QUERY_REPORT = new ReportDefinitionReportType(_PAID_ORGANIC_QUERY_REPORT);
public static final ReportDefinitionReportType AUDIENCE_PERFORMANCE_REPORT = new ReportDefinitionReportType(_AUDIENCE_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType DISPLAY_TOPICS_PERFORMANCE_REPORT = new ReportDefinitionReportType(_DISPLAY_TOPICS_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType SHOPPING_PERFORMANCE_REPORT = new ReportDefinitionReportType(_SHOPPING_PERFORMANCE_REPORT);
public static final ReportDefinitionReportType PRODUCT_PARTITION_REPORT = new ReportDefinitionReportType(_PRODUCT_PARTITION_REPORT);
public static final ReportDefinitionReportType PLACEHOLDER_REPORT = new ReportDefinitionReportType(_PLACEHOLDER_REPORT);
public static final ReportDefinitionReportType AD_CUSTOMIZERS_FEED_ITEM_REPORT = new ReportDefinitionReportType(_AD_CUSTOMIZERS_FEED_ITEM_REPORT);
public static final ReportDefinitionReportType UNKNOWN = new ReportDefinitionReportType(_UNKNOWN);
public java.lang.String getValue() { return _value_;}
public static ReportDefinitionReportType fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException {
ReportDefinitionReportType enumeration = (ReportDefinitionReportType)
_table_.get(value);
if (enumeration==null) throw new java.lang.IllegalArgumentException();
return enumeration;
}
public static ReportDefinitionReportType fromString(java.lang.String value)
throws java.lang.IllegalArgumentException {
return fromValue(value);
}
public boolean equals(java.lang.Object obj) {return (obj == this);}
public int hashCode() { return toString().hashCode();}
public java.lang.String toString() { return _value_;}
public java.lang.Object readResolve() throws java.io.ObjectStreamException { return fromValue(_value_);}
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumSerializer(
_javaType, _xmlType);
}
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumDeserializer(
_javaType, _xmlType);
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(ReportDefinitionReportType.class);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("https://adwords.google.com/api/adwords/cm/v201409", "ReportDefinition.ReportType"));
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
}
|
apache-2.0
|
keepsl/keepsmis
|
crm/src/main/java/com/keeps/crm/service/impl/BuyRecordServiceImpl.java
|
3106
|
package com.keeps.crm.service.impl;
import java.text.DecimalFormat;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.keeps.core.service.AbstractService;
import com.keeps.crm.dao.BuyRecordDao;
import com.keeps.crm.service.BuyRecordService;
import com.keeps.model.TBuyRecord;
import com.keeps.tools.exception.CapecException;
import com.keeps.tools.utils.Assert;
import com.keeps.tools.utils.DateUtils;
import com.keeps.tools.utils.StringUtils;
import com.keeps.tools.utils.page.Page;
import com.keeps.tools.utils.threadlocal.UserSchoolThread;
/**
* <p>Title: BuyRecordServiceImpl.java</p>
* <p>Description: 客户购买记录Service实现类 </p>
* <p>Copyright: Copyright (c) KEEPS</p>
* @author keeps
* @version v 1.00
* @date 创建日期:2017年6月23日
* 修改日期:
* 修改人:
* 复审人:
*/
@Service
public class BuyRecordServiceImpl extends AbstractService implements BuyRecordService {
@Autowired
private BuyRecordDao buyRecordDao;
@Override
public Page queryList(TBuyRecord buyRecord) {
return buyRecordDao.queryList(buyRecord,UserSchoolThread.get().isSuperAdmin());
}
@Override
public Page queryStreamList(TBuyRecord buyRecord){
if (!UserSchoolThread.get().isSuperAdmin()) {
buyRecord.setEmpid(UserSchoolThread.get().getUserid());
}
return buyRecordDao.queryStreamList(buyRecord,UserSchoolThread.get().isSuperAdmin());
}
public Page queryStatisticsList(TBuyRecord buyRecord){
if (!UserSchoolThread.get().isSuperAdmin()) {
buyRecord.setEmpid(UserSchoolThread.get().getUserid());
}
return buyRecordDao.queryStatisticsList(buyRecord,UserSchoolThread.get().isSuperAdmin());
}
@Override
public String saveOrUpdate(TBuyRecord buyRecord) {
Assert.isTrue(buyRecord.getClientid()!=null, "客户id不能为空!");
if (StringUtils.notText(buyRecord.getUpdatetimestr())) {
buyRecord.setUpdatetimestr(DateUtils.formatNow());
}
Assert.isTrue(StringUtils.hasText(buyRecord.getProductname()), "产品名称不允许为空!");
String[] nameprices = buyRecord.getProductname().split("¥");
buyRecord.setProductname(nameprices[0]);
Assert.isTrue(buyRecord.getBuynum()!=null, "数量不能为空");
Assert.isTrue(buyRecord.getBuynum()>0, "数量不能小于0");
//Assert.isTrue(buyRecord.getPrice()!=null, "产品价格不允许为空!");
DecimalFormat df = new DecimalFormat("#.##");
try {
buyRecord.setPrice(Float.parseFloat(nameprices[1]));
buyRecord.setTotalprice(Float.parseFloat(df.format(buyRecord.getPrice()*buyRecord.getBuynum())));
} catch (NumberFormatException e) {
throw new CapecException("");
}
Assert.isTrue(StringUtils.hasText(buyRecord.getUpdatetimestr()), "购买时间不允许为空!");
buyRecord.setEmpid(UserSchoolThread.get().getUserid());
buyRecord.setUpdatetime(DateUtils.parse("yyyy-MM-dd HH:mm", buyRecord.getUpdatetimestr()));
//buyRecord.setClientid(clientid);
super.save(buyRecord);
return null;
}
}
|
apache-2.0
|
pavlobaron/log4j2redis
|
src/test/org/pbit/log4j2redis/Log4j2RedisTest.java
|
1410
|
/**
* This file is part of log4j2redis
*
* Copyright (c) 2012 by Pavlo Baron (pb at pbit dot org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Pavlo Baron <pb at pbit dot org>
* @author Landro Silva
* @copyright 2012 Pavlo Baron */
package test.org.pbit.log4j2redis;
import org.apache.log4j.Logger;
public class Log4j2RedisTest {
public static class LogThread extends Thread {
Logger log = Logger.getLogger("LogThread");
public void run() {
for (long i = 0; i < 10000; i++)
log.warn("whatever " + i);
}
}
static Logger log = Logger.getLogger("LogMainThread");
public static void main(String[] args) {
for (int i = 1; i <= 9; i++) {
new Log4j2RedisTest.LogThread().start();
}
for (long i = 0; i < 10000; i++) {
log.error("that's me " + i);
}
}
}
|
apache-2.0
|
SAP/openui5
|
src/sap.ui.core/src/sap/ui/test/matchers/Matcher.js
|
1818
|
/*!
* ${copyright}
*/
sap.ui.define([
"sap/ui/test/_OpaLogger",
"sap/ui/base/ManagedObject"
], function (_OpaLogger, ManagedObject) {
"use strict";
/**
* @class Matchers for Opa5 - needs to implement an isMatching function that returns a boolean and will get a control instance as parameter
* @abstract
* @extends sap.ui.base.ManagedObject
* @public
* @name sap.ui.test.matchers.Matcher
* @author SAP SE
* @since 1.23
*/
var Matcher = ManagedObject.extend("sap.ui.test.matchers.Matcher", {
metadata : {
publicMethods : [ "isMatching" ]
},
constructor: function () {
this._oLogger = _OpaLogger.getLogger(this.getMetadata().getName());
return ManagedObject.prototype.constructor.apply(this, arguments);
},
/**
* Checks if the matcher is matching - will get an instance of sap.ui.core.Control as parameter.
*
* Should be overwritten by subclasses
*
* @param {sap.ui.core.Control} oControl the control that is checked by the matcher
* @return {boolean} true if the Control is matching the condition of the matcher
* @protected
* @name sap.ui.test.matchers.Matcher#isMatching
* @function
*/
isMatching : function (oControl) {
return true;
},
/**
* @return {object} window of the application under test, or the current window if OPA5 is not loaded
* Note: declared matchers are instanciated in the app context (by MatcherFactory)
* while users instanciate matchers in the test context (in a waitFor)
* @private
* @function
*/
_getApplicationWindow: function () {
if (sap.ui.test && sap.ui.test.Opa5) {
// matcher context === test context, because Opa5 is loadded
return sap.ui.test.Opa5.getWindow();
} else {
// matcher context === app context
return window;
}
}
});
return Matcher;
});
|
apache-2.0
|
jerryxing98/jerryxing98.github.io
|
_posts/2017-05-21-SpringBoot-Start2.markdown
|
5532
|
---
layout: keynote
title: "SpringBoot-最佳实践-1.2Spring父子容器"
subtitle: "Slides: Spring父子容器及作用"
header-img: "img/post-bg-js-version.jpg"
navcolor: "invert"
date: 2017-05-21
author: "JerryMinds"
tags:
- Java
- SpringBoot
- microservice
- 云端
---
> 为啥Spring和SpringMVC包要分开扫描包?
---
### 背景
经常在项目看到这样的代码,为什么一个Bean 要注入两次?
``` xml
<!-- spring 配置文件-->
<context:component-scan base-package="com.xxx.xxx.account.front">
<context:exclude-filter type="annotation"
expression="org.springframework.stereotype.Controller" />
</context:component-scan>
<!-- spring mvc -->
<context:component-scan base-package="com.xxx.xxx.account.front.web" use-default-filters="false">
<context:include-filter type="annotation"
expression="org.springframework.stereotype.Controller" />
</context:component-scan>
```
测试Bean
``` java
@Service
public class TestService implements InitializingBean {
@Autowired
private PersonalAddressAjaxController personalAddressAjaxController;
@Override
public void afterPropertiesSet() throws Exception {
System.out.println("--------------------------------");
}
}
```
---
### 相关测试
* I.测试1 - Spring加载全部bean,MVC加载Controller
``` xml
<!-- spring 配置文件-->
<context:component-scan base-package="com.xxx.xxx.account.front">
</context:component-scan>
<!-- spring mvc -->
<context:component-scan base-package="com.xxx.xxx.account.front.web" use-default-filters="false">
<context:include-filter type="annotation"
expression="org.springframework.stereotype.Controller" />
</context:component-scan>
```
测试结果:TestService通过,界面显示正常。
原因:父容器加载了全部bean,所以Service 能访问到Controller。MVC容器默认查找当前容器,能查到有转发的Controller规则所以界面正常跳转。
* II.测试2 - Spring加载全部Bean,MVC容器啥也不加载
``` xml
<!-- spring 配置文件-->
<context:component-scan base-package="com.xxx.xxx.account.front">
</context:component-scan>
<!-- spring mvc -->
```
测试结果:TestService通过,界面显示404。
原因:父容器加载了全部bean,所以Service 能访问到Controller。MVC容器默认查找当前容器的Controller,找不到所以界面出现404。
* III.测试3 - Spring加载所有除了Controller的bean,MVC只加载Controller
``` xml
<!-- spring 配置文件-->
<context:component-scan base-package="com.xxx.xxx.account.front">
<context:exclude-filter type="annotation"
expression="org.springframework.stereotype.Controller" />
</context:component-scan>
<!-- spring mvc -->
<context:component-scan base-package="com.xxx.xxx.account.front.web" use-default-filters="false">
<context:include-filter type="annotation"
expression="org.springframework.stereotype.Controller" />
</context:component-scan>
```
测试结果:TestService初始化失败,如果注释掉该bean,界面正常。
原因:父容器不能访问子容器的bean。
* IIII.测试4 - Spring不加载bean,MVC加载所有的bean
``` xml
<!-- spring 配置文件-->
无
<!-- spring mvc -->
<context:component-scan base-package="com.xxx.xxx.account.front.web" use-default-filters="true">
</context:component-scan>
```
测试结果:TestService通过,界面正常。
原因:因为所有的bean都在子容器中,也能查到当前容器中的Controller,所以没啥问题。
### 原理
Spring 是父容器, Spring MVC是子容器, 子容器可以访问父容器的bean,父容器不能访问子容器的bean。
### 相关应用
通过HierarchicalBeanFactory接口,Spring的IoC容器可以建立父子层级关联的容器体系,子容器可以访问父容器中的Bean,但父容器不能访问子容器的Bean。在容器内,Bean的id必须是唯一的,但子容器可以拥有一个和父容器id相同的Bean。父子容器层级体系增强了Spring容器架构的扩展性和灵活性,因为第三方可以通过编程的方式,为一个已经存在的容器添加一个或多个特殊用途的子容器,以提供一些额外的功能。
通过HierarchicalBeanFactory接口,Spring的IoC容器可以建立父子层级关联的容器体系,子容器可以访问父容器中的Bean,但父容器不能访问子容器的Bean。在容器内,Bean的id必须是唯一的,但子容器可以拥有一个和父容器id相同的Bean。父子容器层级体系增强了Spring容器架构的扩展性和灵活性,因为第三方可以通过编程的方式,为一个已经存在的容器添加一个或多个特殊用途的子容器,以提供一些额外的功能。
Spring使用父子容器实现了很多功能,比如在Spring MVC中,展现层Bean位于一个子容器中,而业务层和持久层的Bean位于父容器中。这样,展现层Bean就可以引用业务层和持久层的Bean,而业务层和持久层的Bean则看不到展现层的Bean。
Spring使用父子容器实现了很多功能,比如在Spring MVC中,展现层Bean位于一个子容器中,而业务层和持久层的Bean位于父容器中。这样,展现层Bean就可以引用业务层和持久层的Bean,而业务层和持久层的Bean则看不到展现层的Bean。
|
apache-2.0
|
flant/dapp
|
pkg/config/git_manager.go
|
84
|
package config
type GitManager struct {
Local []*GitLocal
Remote []*GitRemote
}
|
apache-2.0
|
jmgc/swift
|
stdlib/public/Concurrency/Actor.cpp
|
47851
|
///===--- Actor.cpp - Standard actor implementation ------------------------===///
///
/// This source file is part of the Swift.org open source project
///
/// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
/// Licensed under Apache License v2.0 with Runtime Library Exception
///
/// See https:///swift.org/LICENSE.txt for license information
/// See https:///swift.org/CONTRIBUTORS.txt for the list of Swift project authors
///
///===----------------------------------------------------------------------===///
///
/// The default actor implementation for Swift actors, plus related
/// routines such as generic executor enqueuing and switching.
///
///===----------------------------------------------------------------------===///
#include "swift/Runtime/Concurrency.h"
#include "swift/Runtime/Atomic.h"
#include "swift/Runtime/Mutex.h"
#include "swift/Runtime/ThreadLocal.h"
#include "swift/ABI/Actor.h"
#include "llvm/ADT/PointerIntPair.h"
using namespace swift;
/// Should we yield the thread?
static bool shouldYieldThread() {
// FIXME: system scheduler integration
return false;
}
/*****************************************************************************/
/*********************** DEFAULT ACTOR IMPLEMENTATION ************************/
/*****************************************************************************/
namespace {
class DefaultActorImpl;
/// A job to process a default actor. Allocated inline in the actor.
class ProcessInlineJob : public Job {
public:
ProcessInlineJob(JobPriority priority)
: Job({JobKind::DefaultActorInline, priority}, &process) {}
SWIFT_CC(swiftasync)
static void process(Job *job, ExecutorRef executor);
static bool classof(const Job *job) {
return job->Flags.getKind() == JobKind::DefaultActorInline;
}
};
/// A job to process a default actor that's allocated separately from
/// the actor but doesn't need the override mechanics.
class ProcessOutOfLineJob : public Job {
DefaultActorImpl *Actor;
public:
ProcessOutOfLineJob(DefaultActorImpl *actor, JobPriority priority)
: Job({JobKind::DefaultActorSeparate, priority}, &process),
Actor(actor) {}
SWIFT_CC(swiftasync)
static void process(Job *job, ExecutorRef executor);
static bool classof(const Job *job) {
return job->Flags.getKind() == JobKind::DefaultActorSeparate;
}
};
/// A job to process a default actor with a new priority; allocated
/// separately from the actor.
class ProcessOverrideJob;
/// Information about the currently-running processing job.
struct RunningJobInfo {
enum KindType : uint8_t {
Inline, Override, Other
};
KindType Kind;
JobPriority Priority;
ProcessOverrideJob *OverrideJob;
bool wasInlineJob() const {
return Kind == Inline;
}
static RunningJobInfo forOther(JobPriority priority) {
return {Other, priority, nullptr};
}
static RunningJobInfo forInline(JobPriority priority) {
return {Inline, priority, nullptr};
}
static RunningJobInfo forOverride(ProcessOverrideJob *job);
void setAbandoned();
void setRunning();
bool waitForActivation();
};
class JobRef {
enum : uintptr_t {
NeedsPreprocessing = 0x1,
IsOverride = 0x2,
JobMask = ~uintptr_t(NeedsPreprocessing | IsOverride)
};
/// A Job* that may have one of the two bits above mangled into it.
uintptr_t Value;
JobRef(Job *job, unsigned flags)
: Value(reinterpret_cast<uintptr_t>(job) | flags) {}
public:
constexpr JobRef() : Value(0) {}
/// Return a reference to a job that's been properly preprocessed.
static JobRef getPreprocessed(Job *job) {
/// We allow null pointers here.
return { job, 0 };
}
/// Return a reference to a job that hasn't been preprocesssed yet.
static JobRef getUnpreprocessed(Job *job) {
assert(job && "passing a null job");
return { job, NeedsPreprocessing };
}
/// Return a reference to an override job, which needs special
/// treatment during preprocessing.
static JobRef getOverride(ProcessOverrideJob *job);
/// Is this a null reference?
operator bool() const { return Value != 0; }
/// Does this job need to be pre-processed before we can treat
/// the job queue as a proper queue?
bool needsPreprocessing() const {
return Value & NeedsPreprocessing;
}
/// Is this an unpreprocessed override job?
bool isOverride() const {
return Value & IsOverride;
}
/// Given that this is an override job, return it.
ProcessOverrideJob *getAsOverride() const {
assert(isOverride());
return reinterpret_cast<ProcessOverrideJob*>(Value & JobMask);
}
ProcessOverrideJob *getAsPreprocessedOverride() const;
Job *getAsJob() const {
assert(!isOverride());
return reinterpret_cast<Job*>(Value & JobMask);
}
Job *getAsPreprocessedJob() const {
assert(!isOverride() && !needsPreprocessing());
return reinterpret_cast<Job*>(Value);
}
bool operator==(JobRef other) const {
return Value == other.Value;
}
bool operator!=(JobRef other) const {
return Value != other.Value;
}
};
/// The default actor implementation.
///
/// Ownership of the actor is subtle. Jobs are assumed to keep the actor
/// alive as long as they're executing on it; this allows us to avoid
/// retaining and releasing whenever threads are scheduled to run a job.
/// While jobs are enqueued on the actor, there is a conceptual shared
/// ownership of the currently-enqueued jobs which is passed around
/// between threads and processing jobs and managed using extra retains
/// and releases of the actor. The basic invariant is as follows:
///
/// - Let R be 1 if there are jobs enqueued on the actor or if a job
/// is currently running on the actor; otherwise let R be 0.
/// - Let N be the number of active processing jobs for the actor.
/// - N >= R
/// - There are N - R extra retains of the actor.
///
/// We can think of this as there being one "owning" processing job
/// and K "extra" jobs. If there is a processing job that is actively
/// running the actor, it is always the owning job; otherwise, any of
/// the N jobs may win the race to become the owning job.
///
/// We then have the following ownership rules:
///
/// - When we enqueue the first job on an actor, then R becomes 1, and
/// we must create a processing job so that N >= R. We do not need to
/// retain the actor.
/// - When we create an extra job to process an actor (e.g. because of
/// priority overrides), N increases but R remains the same. We must
/// retain the actor.
/// - When we start running an actor, our job definitively becomes the
/// owning job, but neither N nor R changes. We do not need to retain
/// the actor.
/// - When we go to start running an actor and for whatever reason we
/// don't actually do so, we are eliminating an extra processing job,
/// and so N decreases but R remains the same. We must release the
/// actor.
/// - When we are running an actor and give it up, and there are no
/// remaining jobs on it, then R becomes 0 and N decreases by 1.
/// We do not need to release the actor.
/// - When we are running an actor and give it up, and there are jobs
/// remaining on it, then R remains 1 but N is decreasing by 1.
/// We must either release the actor or create a new processing job
/// for it to maintain the balance.
class DefaultActorImpl : public HeapObject {
enum class Status {
/// The actor is not currently scheduled. Completely redundant
/// with the job list being empty.
Idle,
/// There is currently a job scheduled to process the actor at the
/// stored max priority.
Scheduled,
/// There is currently a thread processing the actor at the stored
/// max priority.
Running
};
struct Flags : public FlagSet<size_t> {
enum : size_t {
Status_offset = 0,
Status_width = 2,
HasActiveInlineJob = 2,
MaxPriority = 8,
MaxPriority_width = JobFlags::Priority_width,
// FIXME: add a reference to the running thread ID so that we
// can boost priorities.
};
/// What is the current high-level status of this actor?
FLAGSET_DEFINE_FIELD_ACCESSORS(Status_offset, Status_width, Status,
getStatus, setStatus)
/// Is there currently an active processing job allocated inline
/// in the actor?
FLAGSET_DEFINE_FLAG_ACCESSORS(HasActiveInlineJob,
hasActiveInlineJob, setHasActiveInlineJob)
/// What is the maximum priority of jobs that are currently running
/// or enqueued on this actor?
///
/// Note that the above isn't quite correct: we don't actually
/// lower this after we finish processing higher-priority tasks.
/// (Doing so introduces some subtleties around kicking off
/// lower-priority processing jobs.)
FLAGSET_DEFINE_FIELD_ACCESSORS(MaxPriority, MaxPriority_width,
JobPriority,
getMaxPriority, setMaxPriority)
};
/// This is designed to fit into two words, which can generally be
/// done lock-free on all our supported platforms.
struct alignas(2 * sizeof(void*)) State {
JobRef FirstJob;
struct Flags Flags;
};
swift::atomic<State> CurrentState;
friend class ProcessInlineJob;
union {
ProcessInlineJob JobStorage;
};
public:
/// Properly construct an actor, except for the heap header.
void initialize() {
new (&CurrentState) std::atomic<State>(State{JobRef(), Flags()});
}
/// Properly destruct an actor, except for the heap header.
void destroy() {
assert(CurrentState.load(std::memory_order_relaxed).Flags.getStatus()
== Status::Idle && "actor not idle during destruction?");
}
/// Add a job to this actor.
void enqueue(Job *job);
/// Take over running this actor in the current thread, if possible.
bool tryAssumeThread(RunningJobInfo runner);
/// Give up running this actor in the current thread.
void giveUpThread(RunningJobInfo runner);
/// Claim the next job off the actor or give it up.
Job *claimNextJobOrGiveUp(bool actorIsOwned, RunningJobInfo runner);
private:
/// Schedule an inline processing job. This can generally only be
/// done if we know nobody else is trying to do it at the same time,
/// e.g. if this thread just sucessfully transitioned the actor from
/// Idle to Scheduled.
void scheduleNonOverrideProcessJob(JobPriority priority,
bool hasActiveInlineJob);
static DefaultActorImpl *fromInlineJob(Job *job) {
assert(isa<ProcessInlineJob>(job));
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
return reinterpret_cast<DefaultActorImpl*>(
reinterpret_cast<char*>(job) - offsetof(DefaultActorImpl, JobStorage));
#pragma clang diagnostic pop
}
class OverrideJobCache {
ProcessOverrideJob *Job = nullptr;
bool IsNeeded = false;
#ifndef NDEBUG
bool WasCommitted = false;
#endif
public:
OverrideJobCache() = default;
OverrideJobCache(const OverrideJobCache &) = delete;
OverrideJobCache &operator=(const OverrideJobCache &) = delete;
~OverrideJobCache() {
assert(WasCommitted && "didn't commit override job!");
}
void addToState(DefaultActorImpl *actor, State &newState);
void setNotNeeded() { IsNeeded = false; }
void commit();
};
};
} /// end anonymous namespace
static_assert(sizeof(DefaultActorImpl) <= sizeof(DefaultActor) &&
alignof(DefaultActorImpl) <= alignof(DefaultActor),
"DefaultActorImpl doesn't fit in DefaultActor");
static DefaultActorImpl *asImpl(DefaultActor *actor) {
return reinterpret_cast<DefaultActorImpl*>(actor);
}
static DefaultActor *asAbstract(DefaultActorImpl *actor) {
return reinterpret_cast<DefaultActor*>(actor);
}
/*****************************************************************************/
/************************** DEFAULT ACTOR TRACKING ***************************/
/*****************************************************************************/
namespace {
enum Mode {
/// Shadow any existing frame, leaving it untouched.
ShadowExistingFrame,
/// Update any existing frame if possible.
UpdateExistingFrame
};
/// A little class for tracking whether there's a frame processing
/// default actors in the current thread.
///
/// The goal of this class is to encapsulate uses of the central variable.
/// We want to potentially use a more efficient access pattern than
/// ordinary thread-locals when that's available.
class DefaultActorProcessingFrame {
using ValueType = llvm::PointerIntPair<DefaultActorImpl*, 1, bool>;
/// The active default actor on the current thread, if any.
/// This may still need to be tracked separately from the active
/// executor, if/when we start tracking that in thread-local storage.
static SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(ValueType, ThreadLocalValue);
ValueType SavedValue;
bool IsNeeded;
public:
/// Flag that this thread is processing the given actor (or null,
/// for generic processing) and set up a processing frame if we
/// don't already have one.
DefaultActorProcessingFrame(DefaultActorImpl *actor, Mode mode) {
// If we should shadow an existing frame, save any value that
// it might have set.
if (mode == ShadowExistingFrame) {
SavedValue = ThreadLocalValue.get();
IsNeeded = true;
// If we should update an existing frame, just replace any value
// that it might have set.
} else {
IsNeeded = !ThreadLocalValue.get().getInt();
SavedValue = ValueType();
}
ThreadLocalValue.set(ValueType(actor, true));
}
DefaultActorProcessingFrame(const DefaultActorProcessingFrame &) = delete;
DefaultActorProcessingFrame &operator=(
const DefaultActorProcessingFrame &) = delete;
/// Return the currently active actor.
DefaultActorImpl *getActiveActor() {
return ThreadLocalValue.get().getPointer();
}
/// Exit the frame. This isn't a destructor intentionally, because
/// we need to be able to tail-call out of frames that might have
/// optimistically made one of these.
void exit() {
ThreadLocalValue.set(SavedValue);
}
/// Return whether this frame was needed; if it was not, then it's
/// okay to abandon it without calling exit(). This is only meaningful
/// when constructed in the UpdateExistingFrame mode.
bool isNeeded() {
return IsNeeded;
}
};
/// Define the thread-local.
SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(
DefaultActorProcessingFrame::ValueType,
DefaultActorProcessingFrame::ThreadLocalValue);
} /// end anonymous namespace
/*****************************************************************************/
/*********************** DEFAULT ACTOR IMPLEMENTATION ************************/
/*****************************************************************************/
/// Given that a job is enqueued normally on a default actor, get/set
/// the next job in the actor's queue.
///
/// Note that this must not be used on the override jobs that can appear
/// in the queue; those jobs are not actually in the actor's queue (they're
/// on the global execution queues). So the actor's actual queue flows
/// through the NextJob field on those objects rather than through
/// the SchedulerPrivate fields.
static JobRef getNextJobInQueue(Job *job) {
return *reinterpret_cast<JobRef*>(job->SchedulerPrivate);
}
static void setNextJobInQueue(Job *job, JobRef next) {
*reinterpret_cast<JobRef*>(job->SchedulerPrivate) = next;
}
/// Schedule a processing job that doesn't have to be an override job.
///
/// We can either do this with inline storage or heap-allocated.
/// To ues inline storage, we need to verify that the hasActiveInlineJob
/// flag is not set in the state and then successfully set it. The
/// argument reports that this has happened correctly.
///
/// We should only schedule a non-override processing job at all if
/// we're transferring ownership of the jobs in it; see the ownership
/// comment on DefaultActorImpl.
void DefaultActorImpl::scheduleNonOverrideProcessJob(JobPriority priority,
bool hasActiveInlineJob) {
Job *job;
if (hasActiveInlineJob) {
job = new ProcessOutOfLineJob(this, priority);
} else {
job = new (&JobStorage) ProcessInlineJob(priority);
}
swift_task_enqueueGlobal(job);
}
namespace {
/// A job to process a specific default actor at a higher priority than
/// it was previously running at.
///
/// When an override job is successfully registered with an actor
/// (not enqueued there), the thread processing the actor and the
/// thread processing the override job coordinate by each calling
/// one of a set of methods on the object.
class ProcessOverrideJob : public Job {
DefaultActorImpl *Actor;
ConditionVariable::Mutex Lock;
ConditionVariable Queue;
/// Has the actor made a decision about this job yet?
bool IsResolvedByActor = false;
/// Has the job made a decision about itself yet?
bool IsResolvedByJob = false;
/// Has this job been abandoned?
bool IsAbandoned = false;
public:
/// SchedulerPrivate in an override job is used for actually scheduling
/// the job, so the actor queue goes through this instead.
///
/// We also use this temporarily for the list of override jobs on
/// the actor that we need to wake up.
JobRef NextJob;
public:
ProcessOverrideJob(DefaultActorImpl *actor, JobPriority priority,
JobRef nextJob)
: Job({JobKind::DefaultActorOverride, priority}, &process),
Actor(actor), NextJob(nextJob) {}
DefaultActorImpl *getActor() const { return Actor; }
/// Called by the job to notify the actor that the job has chosen
/// to abandon its work. This is irrevocable: the job is not going
/// to have a thread behind it.
///
/// This may delete the job or cause it to be deleted on another thread.
void setAbandoned() {
bool shouldDelete = false;
Lock.withLock([&] {
assert(!IsResolvedByJob && "job already resolved itself");
IsResolvedByJob = true;
IsAbandoned = true;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
}
/// Called by the job to notify the actor that the job has successfully
/// taken over the actor and is now running it.
///
/// This may delete the job object or cause it to be deleted on
/// another thread.
void setRunning() {
bool shouldDelete = false;
Lock.withLock([&] {
assert(!IsResolvedByJob && "job already resolved itself");
IsResolvedByJob = true;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
}
/// Called by the job to wait for the actor to resolve what the job
/// should do.
bool waitForActivation() {
bool isActivated = false;
Lock.withLockOrWait(Queue, [&] {
assert(!IsResolvedByJob && "job already resolved itself");
if (IsResolvedByActor) {
isActivated = !IsAbandoned;
IsResolvedByJob = true;
return true;
}
return false;
});
delete this;
return isActivated;
}
/// Called by the actor to notify this job that the actor thinks it
/// should try to take over the actor. It's okay if that doesn't
/// succeed (as long as that's because some other job is going to
/// take over).
///
/// This may delete the job or cause it to be deleted on another
/// thread.
bool wakeAndActivate() {
bool shouldDelete = false;
bool mayHaveBeenActivated = false;
Lock.withLockThenNotifyAll(Queue, [&] {
assert(!IsResolvedByActor && "actor already resolved this sjob");
IsResolvedByActor = true;
mayHaveBeenActivated = IsResolvedByJob && !IsAbandoned;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
return mayHaveBeenActivated;
}
/// Called by the actor to notify this job that the actor does not
/// think it should try to take over the actor. It's okay if the
/// job successfully takes over the actor anyway.
///
/// This may delete the job or cause it to be deleted on another
/// thread.
void wakeAndAbandon() {
bool shouldDelete = false;
Lock.withLockThenNotifyAll(Queue, [&] {
assert(!IsResolvedByActor && "actor already resolved this job");
IsResolvedByActor = true;
IsAbandoned = true;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
}
SWIFT_CC(swiftasync)
static void process(Job *job, ExecutorRef _executor);
static bool classof(const Job *job) {
return job->Flags.getKind() == JobKind::DefaultActorOverride;
}
};
} /// end anonymous namespace
JobRef JobRef::getOverride(ProcessOverrideJob *job) {
return JobRef(job, NeedsPreprocessing | IsOverride);
}
ProcessOverrideJob *JobRef::getAsPreprocessedOverride() const {
return cast_or_null<ProcessOverrideJob>(getAsPreprocessedJob());
}
RunningJobInfo RunningJobInfo::forOverride(ProcessOverrideJob *job) {
return {Override, job->getPriority(), job};
}
/// Flag that the current processing job has been abandoned
/// and will not be running the actor.
void RunningJobInfo::setAbandoned() {
if (OverrideJob) {
OverrideJob->setAbandoned();
OverrideJob = nullptr;
}
}
/// Flag that the current processing job is now running the actor.
void RunningJobInfo::setRunning() {
if (OverrideJob) {
OverrideJob->setRunning();
OverrideJob = nullptr;
}
}
/// Try to wait for the current processing job to be activated,
/// if that's possible. It's okay to call this multiple times
/// (or to call setAbandoned/setRunning after it) as long as
/// it's all on a single value.
bool RunningJobInfo::waitForActivation() {
if (Kind == Override) {
// If we don't have an override job, it's because we've already
// waited for activation successfully.
if (!OverrideJob) return true;
bool result = OverrideJob->waitForActivation();
OverrideJob = nullptr;
return result;
}
return false;
}
/// Wake all the overrides in the given list, activating the first
/// that exactly matches the target priority, if any.
static void wakeOverrides(ProcessOverrideJob *nextOverride,
Optional<JobPriority> targetPriority) {
bool hasAlreadyActivated = false;
while (nextOverride) {
// We have to advance to the next override before we call one of
// the wake methods because they can delete the job immediately
// (and even if they don't, we'd still be racing with deletion).
auto cur = nextOverride;
nextOverride = cur->NextJob.getAsPreprocessedOverride();
if (hasAlreadyActivated ||
!targetPriority ||
cur->getPriority() != *targetPriority)
cur->wakeAndAbandon();
else
hasAlreadyActivated = cur->wakeAndActivate();
}
}
/// Flag that an override job is needed and create it.
void DefaultActorImpl::OverrideJobCache::addToState(DefaultActorImpl *actor,
State &newState) {
IsNeeded = true;
auto newPriority = newState.Flags.getMaxPriority();
auto nextJob = newState.FirstJob;
if (Job) {
Job->Flags.setPriority(newPriority);
Job->NextJob = nextJob;
} else {
// Override jobs are always "extra" from the perspective of our
// ownership rules and so require a retain of the actor. We must
// do this before changing the actor state because other jobs may
// race to release the actor as soon as we change the actor state.
swift_retain(actor);
Job = new ProcessOverrideJob(actor, newPriority, nextJob);
}
newState.FirstJob = JobRef::getOverride(Job);
}
/// Schedule the override job if we created one and still need it.
/// If we created one but didn't end up needing it (which can happen
/// with a race to override), destroy it.
void DefaultActorImpl::OverrideJobCache::commit() {
#ifndef NDEBUG
assert(!WasCommitted && "committing override job multiple timee");
WasCommitted = true;
#endif
if (Job) {
if (IsNeeded) {
swift_task_enqueueGlobal(Job);
} else {
swift_release(Job->getActor());
delete Job;
}
}
}
/// Preprocess the prefix of the actor's queue that hasn't already
/// been preprocessed:
///
/// - Split the jobs into registered overrides and actual jobs.
/// - Append the actual jobs to any already-preprocessed job list.
///
/// The returned job should become the new root of the job queue
/// (or may be immediately dequeued, in which its successor should).
/// All of the jobs in this list are guaranteed to be non-override jobs.
static Job *preprocessQueue(JobRef first,
JobRef previousFirst,
Job *previousFirstNewJob,
ProcessOverrideJob *&overridesToWake) {
assert(previousFirst || previousFirstNewJob == nullptr);
if (!first.needsPreprocessing())
return first.getAsPreprocessedJob();
Job *firstNewJob = nullptr;
while (first != previousFirst) {
// If we find something that doesn't need preprocessing, it must've
// been left by a previous queue-processing, which means that
// this must be our first attempt to preprocess in this processing.
// Just treat the queue from this point as a well-formed whole
// to which we need to add any new items we might've just found.
if (!first.needsPreprocessing()) {
assert(!previousFirst && !previousFirstNewJob);
previousFirstNewJob = first.getAsPreprocessedJob();
break;
}
// If the job is an override, add it to the list of override jobs
// that we need to wake up. Note that the list of override jobs
// flows through NextJob; we must not use getNextJobInQueue because
// that touches queue-private state, and the override job is
// not enqueued on the actor, merely registered with it.
if (first.isOverride()) {
auto overrideJob = first.getAsOverride();
first = overrideJob->NextJob;
overrideJob->NextJob = JobRef::getPreprocessed(overridesToWake);
overridesToWake = overrideJob;
continue;
}
// If the job isn't an override, add it to the front of the list of
// jobs we're building up. Note that this reverses the order of
// jobs; since enqueue() always adds jobs to the front, reversing
// the order effectively makes the actor queue FIFO, which is what
// we want.
// FIXME: but we should also sort by priority
auto job = first.getAsJob();
first = getNextJobInQueue(job);
setNextJobInQueue(job, JobRef::getPreprocessed(firstNewJob));
firstNewJob = job;
}
// If there are jobs already in the queue, put the new jobs at the end.
if (!firstNewJob) {
firstNewJob = previousFirstNewJob;
} else if (previousFirstNewJob) {
auto cur = previousFirstNewJob;
while (true) {
auto next = getNextJobInQueue(cur).getAsPreprocessedJob();
if (!next) {
setNextJobInQueue(cur, JobRef::getPreprocessed(firstNewJob));
break;
}
cur = next;
}
firstNewJob = previousFirstNewJob;
}
return firstNewJob;
}
void DefaultActorImpl::giveUpThread(RunningJobInfo runner) {
auto oldState = CurrentState.load(std::memory_order_acquire);
assert(oldState.Flags.getStatus() == Status::Running);
ProcessOverrideJob *overridesToWake = nullptr;
auto firstNewJob = preprocessQueue(oldState.FirstJob, JobRef(), nullptr,
overridesToWake);
while (true) {
State newState = oldState;
newState.FirstJob = JobRef::getPreprocessed(firstNewJob);
if (firstNewJob) {
newState.Flags.setStatus(Status::Scheduled);
} else {
newState.Flags.setStatus(Status::Idle);
}
// If the runner was an inline job, it's no longer active.
if (runner.wasInlineJob()) {
newState.Flags.setHasActiveInlineJob(false);
}
bool hasMoreJobs = (bool) newState.FirstJob;
bool hasOverrideAtNewPriority =
(runner.Priority < oldState.Flags.getMaxPriority());
bool hasActiveInlineJob = newState.Flags.hasActiveInlineJob();
bool needsNewProcessJob = hasMoreJobs && !hasOverrideAtNewPriority;
// If we want to create a new inline job below, be sure to claim that
// in the new state.
if (needsNewProcessJob && !hasActiveInlineJob) {
newState.Flags.setHasActiveInlineJob(true);
}
auto firstPreprocessed = oldState.FirstJob;
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_acquire)) {
// Preprocess any new queue items.
firstNewJob = preprocessQueue(oldState.FirstJob,
firstPreprocessed,
firstNewJob,
overridesToWake);
// Try again.
continue;
}
// The priority of the remaining work.
auto newPriority = newState.Flags.getMaxPriority();
// Wake any overrides.
wakeOverrides(overridesToWake, newPriority);
// This is the actor's owning job; per the ownership rules (see
// the comment on DefaultActorImpl), if there are remaining
// jobs, we need to balance out our ownership one way or another.
// We also, of course, need to ensure that there's a thread that's
// actually going to process the actor.
if (hasMoreJobs) {
// If we know that there's an override job at the new priority,
// we can let it become the owning job. We just need to release.
if (hasOverrideAtNewPriority) {
swift_release(this);
// Otherwies, enqueue a job that will try to take over running
// with the new priority. This also ensures that there's a job
// at that priority which will actually take over the actor.
} else {
scheduleNonOverrideProcessJob(newPriority, hasActiveInlineJob);
}
}
return;
}
}
/// Claim the next job on the actor or give it up forever.
///
/// The running thread doesn't need to already own the actor to do this.
/// It does need to be participating correctly in the ownership
/// scheme as a "processing job"; see the comment on DefaultActorImpl.
Job *DefaultActorImpl::claimNextJobOrGiveUp(bool actorIsOwned,
RunningJobInfo runner) {
auto oldState = CurrentState.load(std::memory_order_acquire);
// The status had better be Running unless we're trying to acquire
// our first job.
assert(oldState.Flags.getStatus() == Status::Running ||
!actorIsOwned);
// If we don't yet own the actor, we need to try to claim the actor
// first; we cannot safely access the queue memory yet because other
// threads may concurrently be trying to do this.
if (!actorIsOwned) {
while (true) {
// A helper function when the only change we need to try is to
// update for an inline runner.
auto tryUpdateForInlineRunner = [&]{
if (!runner.wasInlineJob()) return true;
auto newState = oldState;
newState.Flags.setHasActiveInlineJob(false);
return CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_relaxed,
/*failure*/ std::memory_order_acquire);
};
// If the actor is out of work, or its priority doesn't match our
// priority, don't try to take over the actor.
if (!oldState.FirstJob ||
oldState.Flags.getMaxPriority() != runner.Priority) {
// The only change we need here is inline-runner bookkeeping.
if (!tryUpdateForInlineRunner())
continue;
// We're eliminating a processing thread; balance ownership.
swift_release(this);
runner.setAbandoned();
return nullptr;
}
// If the actor is currently running, we'd need to wait for
// it to stop. We can do this if we're an override job;
// otherwise we need to exit.
if (oldState.Flags.getStatus() == Status::Running) {
if (!runner.waitForActivation()) {
// The only change we need here is inline-runner bookkeeping.
if (!tryUpdateForInlineRunner())
continue;
swift_release(this);
return nullptr;
}
// Fall through into the compare-exchange below, but anticipate
// that the actor is now Scheduled instead of Running.
oldState.Flags.setStatus(Status::Scheduled);
}
// Try to set the state as Running.
assert(oldState.Flags.getStatus() == Status::Scheduled);
auto newState = oldState;
newState.Flags.setStatus(Status::Running);
// Also do our inline-runner bookkeeping.
if (runner.wasInlineJob())
newState.Flags.setHasActiveInlineJob(false);
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_relaxed,
/*failure*/ std::memory_order_acquire))
continue;
// If that succeeded, we can proceed to the main body.
oldState = newState;
runner.setRunning();
break;
}
}
assert(oldState.Flags.getStatus() == Status::Running);
// We should have taken care of the inline-job bookkeeping now.
assert(!oldState.Flags.hasActiveInlineJob() || !runner.wasInlineJob());
// Okay, now it's safe to look at queue state.
// Preprocess any queue items at the front of the queue.
ProcessOverrideJob *overridesToWake = nullptr;
auto newFirstJob = preprocessQueue(oldState.FirstJob, JobRef(),
nullptr, overridesToWake);
Optional<JobPriority> remainingJobPriority;
while (true) {
State newState = oldState;
// If the priority we're currently running with is adqeuate for
// all the remaining jobs, try to dequeue something.
// FIXME: should this be an exact match in priority instead of
// potentially running jobs with too high a priority?
Job *jobToRun;
if (oldState.Flags.getMaxPriority() <= runner.Priority &&
newFirstJob) {
jobToRun = newFirstJob;
newState.FirstJob = getNextJobInQueue(newFirstJob);
newState.Flags.setStatus(Status::Running);
// Otherwise, we should give up the thread.
} else {
jobToRun = nullptr;
newState.FirstJob = JobRef::getPreprocessed(newFirstJob);
newState.Flags.setStatus(newFirstJob ? Status::Scheduled
: Status::Idle);
}
// Try to update the queue. The changes we've made to the queue
// structure need to be made visible even if we aren't dequeuing
// anything.
auto firstPreprocessed = oldState.FirstJob;
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_acquire)) {
// Preprocess any new queue items, which will have been formed
// into a linked list leading to the last head we observed.
// (The fact that that job may not be the head anymore doesn't
// matter; we're looking for an exact match with that.)
newFirstJob = preprocessQueue(oldState.FirstJob,
firstPreprocessed,
newFirstJob,
overridesToWake);
// Loop to retry updating the state.
continue;
}
// We successfully updated the state.
// If we're giving up the thread with jobs remaining, we need
// to release the actor, and we should wake overrides with the
// right priority.
Optional<JobPriority> remainingJobPriority;
if (!jobToRun && newFirstJob) {
remainingJobPriority = newState.Flags.getMaxPriority();
}
// Wake the overrides.
wakeOverrides(overridesToWake, remainingJobPriority);
// Per the ownership rules (see the comment on DefaultActorImpl),
// release the actor if we're giving up the thread with jobs
// remaining. We intentionally do this after wakeOverrides to
// try to get the overrides running a little faster.
if (remainingJobPriority)
swift_release(this);
return jobToRun;
}
}
/// The primary function for processing an actor on a thread. Start
/// processing the given default actor as the active default actor on
/// the current thread, and keep processing whatever actor we're
/// running when code returns back to us until we're not processing
/// any actors anymore.
static void processDefaultActor(DefaultActorImpl *currentActor,
RunningJobInfo runner) {
// Register that we're processing a default actor in this frame.
DefaultActorProcessingFrame frame(currentActor, ShadowExistingFrame);
bool threadIsRunningActor = false;
while (true) {
assert(currentActor);
// Immediately check if we've been asked to yield the thread.
if (shouldYieldThread())
break;
// Claim another job from the current actor.
auto job = currentActor->claimNextJobOrGiveUp(threadIsRunningActor,
runner);
// If we failed to claim a job, we have nothing to do.
if (!job) {
// We also gave up the actor as part of failing to claim it.
// Make sure we don't try to give up the actor again.
currentActor = nullptr;
break;
}
// Run the job.
job->run(ExecutorRef::forDefaultActor(asAbstract(currentActor)));
// The current actor may have changed after the job.
// If it's become nil, we have nothing to do.
currentActor = frame.getActiveActor();
if (!currentActor)
break;
// Otherwise, we know that we're running the actor on this thread.
threadIsRunningActor = true;
}
frame.exit();
// If we still have an active actor, we should give it up.
if (currentActor)
currentActor->giveUpThread(runner);
}
void ProcessInlineJob::process(Job *job, ExecutorRef _executor) {
DefaultActorImpl *actor = DefaultActorImpl::fromInlineJob(job);
// Pull the priority out of the job before we do anything that might
// invalidate it.
auto targetPriority = job->getPriority();
auto runner = RunningJobInfo::forInline(targetPriority);
// FIXME: force tail call
return processDefaultActor(actor, runner);
}
void ProcessOutOfLineJob::process(Job *job, ExecutorRef _executor) {
auto self = cast<ProcessOutOfLineJob>(job);
DefaultActorImpl *actor = self->Actor;
// Pull the priority out of the job before we do anything that might
// invalidate it.
auto targetPriority = job->getPriority();
auto runner = RunningJobInfo::forOther(targetPriority);
delete self;
// FIXME: force tail call
return processDefaultActor(actor, runner);
}
void ProcessOverrideJob::process(Job *job, ExecutorRef _executor) {
auto self = cast<ProcessOverrideJob>(job);
// Pull the actor and priority out of the job.
auto actor = self->Actor;
auto runner = RunningJobInfo::forOverride(self);
// FIXME: force tail call
return processDefaultActor(actor, runner);
}
void DefaultActorImpl::enqueue(Job *job) {
auto oldState = CurrentState.load(std::memory_order_relaxed);
OverrideJobCache overrideJob;
while (true) {
auto newState = oldState;
// Put the job at the front of the job list (which will get
// reversed during preprocessing).
setNextJobInQueue(job, oldState.FirstJob);
newState.FirstJob = JobRef::getUnpreprocessed(job);
auto oldStatus = oldState.Flags.getStatus();
bool wasIdle = oldStatus == Status::Idle;
// Update the priority: the prriority of the job we're adding
// if the actor was idle, or the max if not. Only the running
// thread can decrease the actor's priority once it's non-idle.
// (But note that the job we enqueue can still observe a
// lowered priority.)
auto oldPriority = oldState.Flags.getMaxPriority();
auto newPriority =
wasIdle ? job->getPriority()
: std::max(oldPriority, job->getPriority());
newState.Flags.setMaxPriority(newPriority);
// If we need an override job, create it (if necessary) and
// register it with the queue.
bool needsOverride = !wasIdle && newPriority != oldPriority;
if (needsOverride) {
overrideJob.addToState(this, newState);
} else {
overrideJob.setNotNeeded();
}
// If we don't need an override job, then we might be able to
// create an inline job; flag that.
bool hasActiveInlineJob = newState.Flags.hasActiveInlineJob();
if (wasIdle && !hasActiveInlineJob)
newState.Flags.setHasActiveInlineJob(true);
// Make sure the status is at least Scheduled. We'll actually
// schedule the job below, if we succeed at this.
if (wasIdle) {
newState.Flags.setStatus(Status::Scheduled);
}
// Try the compare-exchange, and try again if it fails.
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_relaxed))
continue;
// Okay, we successfully updated the status. Schedule a job to
// process the actor if necessary.
// Commit the override job if we created one.
overrideJob.commit();
// If the actor is currently idle, schedule it using the
// invasive job.
if (wasIdle) {
assert(!needsOverride);
scheduleNonOverrideProcessJob(newPriority, hasActiveInlineJob);
}
return;
}
}
bool DefaultActorImpl::tryAssumeThread(RunningJobInfo runner) {
// We have to load-acquire in order to properly order accesses to
// the actor's state for the new task.
auto oldState = CurrentState.load(std::memory_order_acquire);
// If the actor is currently idle, try to mark it as running.
while (oldState.Flags.getStatus() == Status::Idle) {
assert(!oldState.FirstJob);
auto newState = oldState;
newState.Flags.setStatus(Status::Running);
newState.Flags.setMaxPriority(runner.Priority);
if (CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_relaxed,
/*failure*/ std::memory_order_acquire))
return true;
}
return false;
}
void swift::swift_defaultActor_initialize(DefaultActor *_actor) {
asImpl(_actor)->initialize();
}
void swift::swift_defaultActor_destroy(DefaultActor *_actor) {
asImpl(_actor)->destroy();
}
void swift::swift_defaultActor_enqueue(Job *job, DefaultActor *_actor) {
asImpl(_actor)->enqueue(job);
}
/*****************************************************************************/
/****************************** ACTOR SWITCHING ******************************/
/*****************************************************************************/
/// Can the current executor give up its thread?
static bool canGiveUpThreadForSwitch(ExecutorRef currentExecutor) {
// We can certainly "give up" a generic executor to try to run
// a task for an actor.
if (currentExecutor.isGeneric())
return true;
// If the current executor is a default actor, we know how to make
// it give up its thread.
if (currentExecutor.isDefaultActor())
return true;
return false;
}
/// Tell the current executor to give up its thread, given that it
/// returned true from canGiveUpThreadForSwitch().
///
/// Note that we don't update DefaultActorProcessingFrame here; we'll
/// do that in runOnAssumedThread.
static void giveUpThreadForSwitch(ExecutorRef currentExecutor,
RunningJobInfo runner) {
if (currentExecutor.isGeneric())
return;
asImpl(currentExecutor.getDefaultActor())->giveUpThread(runner);
}
/// Try to assume control of the current thread for the given executor
/// in order to run the given job.
///
/// This doesn't actually run the job yet.
///
/// Note that we don't update DefaultActorProcessingFrame here; we'll
/// do that in runOnAssumedThread.
static bool tryAssumeThreadForSwitch(ExecutorRef newExecutor,
RunningJobInfo runner) {
// If the new executor is generic, we don't need to do anything.
if (newExecutor.isGeneric()) {
return true;
}
// If the new executor is a default actor, ask it to assume the thread.
if (newExecutor.isDefaultActor()) {
return asImpl(newExecutor.getDefaultActor())->tryAssumeThread(runner);
}
return false;
}
/// Given that we've assumed control of an executor on this thread,
/// run the given task on it.
SWIFT_CC(swiftasync)
static void runOnAssumedThread(AsyncTask *task, ExecutorRef newExecutor,
RunningJobInfo runner) {
assert(newExecutor.isGeneric() || newExecutor.isDefaultActor());
DefaultActorImpl *actor = newExecutor.isGeneric()
? nullptr
: asImpl(newExecutor.getDefaultActor());
// Set that this actor is now the active default actor on this thread,
// and set up an actor-processing frame if there wasn't one already.
DefaultActorProcessingFrame frame(actor, UpdateExistingFrame);
// If one already existed, we should just tail-call the task; we don't
// want these frames to potentially accumulate linearly.
if (!frame.isNeeded()) {
// FIXME: force tail call
return task->run(newExecutor);
}
// Otherwise, run the new task.
task->run(newExecutor);
// Leave the processing frame, and give up the current actor if
// we have one.
//
// In principle, we could execute more tasks here, but that's probably
// not a reasonable thing to do in an assumed context rather than a
// dedicated actor-processing job.
actor = frame.getActiveActor();
frame.exit();
if (actor)
actor->giveUpThread(runner);
}
void swift::swift_task_switch(AsyncTask *task, ExecutorRef currentExecutor,
ExecutorRef newExecutor) {
assert(task && "no task provided");
// If the current executor is compatible with running the new executor,
// just continue running.
if (!currentExecutor.mustSwitchToRun(newExecutor)) {
// FIXME: force tail call
return task->run(currentExecutor);
}
// Okay, we semantically need to switch.
auto runner = RunningJobInfo::forOther(task->getPriority());
// If the current executor can give up its thread, and the new executor
// can take over a thread, try to do so; but don't do this if we've
// been asked to yield the thread.
if (canGiveUpThreadForSwitch(currentExecutor) &&
!shouldYieldThread() &&
tryAssumeThreadForSwitch(newExecutor, runner)) {
giveUpThreadForSwitch(currentExecutor, runner);
// FIXME: force tail call
return runOnAssumedThread(task, newExecutor, runner);
}
// Otherwise, just asynchronously enqueue the task on the given
// executor.
swift_task_enqueue(task, newExecutor);
}
/*****************************************************************************/
/************************* GENERIC ACTOR INTERFACES **************************/
/*****************************************************************************/
void swift::swift_task_enqueue(Job *job, ExecutorRef executor) {
assert(job && "no job provided");
if (executor.isGeneric())
return swift_task_enqueueGlobal(job);
if (executor.isDefaultActor())
return asImpl(executor.getDefaultActor())->enqueue(job);
// Just assume it's actually a default actor that we haven't tagged
// properly.
// FIXME: call the general method.
return asImpl(reinterpret_cast<DefaultActor*>(executor.getRawValue()))
->enqueue(job);
}
|
apache-2.0
|
jcto/DBWeb
|
utils/clone.js
|
255
|
var CaoTest = CaoTest || {}
//把一个对象的自有属性copy到一个新的对象里 浅克隆
CaoTest.clone = function {
objClone = {};
each(obj, function(value, key) {
if (obj.hasOwnProperty(key)) objClone[key] = value;
});
return objClone;
}
|
apache-2.0
|
notadd/framework
|
src/Translation/Events/LocaleUpdated.php
|
523
|
<?php
/**
* This file is part of Notadd.
*
* @author TwilRoad <[email protected]>
* @copyright (c) 2017, notadd.com
* @datetime 2017-03-01 15:29
*/
namespace Notadd\Foundation\Translation\Events;
/**
* Class LocaleUpdated.
*/
class LocaleUpdated
{
/**
* The new locale.
*
* @var string
*/
public $locale;
/**
* Create a new event instance.
*
* @param string $locale
*/
public function __construct($locale)
{
$this->locale = $locale;
}
}
|
apache-2.0
|
lukaszbudnik/hackaton-portal
|
app/views/news/newsSingle.scala.html
|
868
|
@(news: Option[model.News], showHackathonSubject: Boolean = true)(implicit request: Request[AnyContent], lang: Lang)
@import helper._
@news.map { n =>
<div class="page-header">
<h2>
@if(n.author.avatarUrl) {
<img src="@n.author.avatarUrl" alt="@n.author.name" height="40px" width="40px" />
}
@if(showHackathonSubject) {
@n.hackathon.map { h => @h.subject: }
}
@n.title
</h2>
<p>@Html(n.text)</p>
<p>@helpers.CmsMessages("news.author.label"): @n.author.name @helpers.CmsMessages("news.publishedDate.label"): @tags.dateFormat(n.publishedDate)</p>
<p>@helpers.CmsMessages("news.labels.label"):
@for(l <- n.labels) {
<a href="@routes.News.search(l.value)" style="text-decoration: none"><span class="label">@l.value</span></a>
}
</p>
</div>
}.getOrElse {
<h2>@helpers.CmsMessages("news.notFound")</h2>
}
|
apache-2.0
|
TheRingbearer/HAWKS
|
ode/bpel-runtime/src/main/java/org/apache/ode/bpel/engine/fc/excp/FragmentCompositionException.java
|
418
|
package org.apache.ode.bpel.engine.fc.excp;
/**
*
* @author Alex Hummel
*
*/
public class FragmentCompositionException extends Exception {
private static final long serialVersionUID = 9052461952290680611L;
public FragmentCompositionException() {
super();
}
public FragmentCompositionException(String message) {
super(message);
}
public FragmentCompositionException(Exception e) {
super(e);
}
}
|
apache-2.0
|
kubernetes/enhancements
|
keps/sig-windows/1001-windows-cri-containerd/README.md
|
26106
|
# Supporting CRI-ContainerD on Windows
## Table of Contents
<!-- TOC -->
- [Release Signoff Checklist](#release-signoff-checklist)
- [Summary](#summary)
- [Motivation](#motivation)
- [Goals](#goals)
- [Non-Goals](#non-goals)
- [Proposal](#proposal)
- [User Stories](#user-stories)
- [Improving Kubernetes integration for Windows Server containers](#improving-kubernetes-integration-for-windows-server-containers)
- [Improved isolation and compatibility between Windows pods using Hyper-V](#improved-isolation-and-compatibility-between-windows-pods-using-hyper-v)
- [Improve Control over Memory & CPU Resources with Hyper-V](#improve-control-over-memory--cpu-resources-with-hyper-v)
- [Improved Storage Control with Hyper-V](#improved-storage-control-with-hyper-v)
- [Enable runtime resizing of container resources](#enable-runtime-resizing-of-container-resources)
- [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints)
- [Proposal: Use Runtimeclass Scheduler to simplify deployments based on OS version requirements](#proposal-use-runtimeclass-scheduler-to-simplify-deployments-based-on-os-version-requirements)
- [Proposal: Standardize hypervisor annotations](#proposal-standardize-hypervisor-annotations)
- [Dependencies](#dependencies)
- [Windows Server 2019](#windows-server-2019)
- [CRI-ContainerD](#cri-containerd)
- [CNI: Flannel](#cni-flannel)
- [CNI: Kubenet](#cni-kubenet)
- [CNI: GCE](#cni-gce)
- [Storage: in-tree AzureFile, AzureDisk, Google PD](#storage-in-tree-azurefile-azuredisk-google-pd)
- [Storage: FlexVolume for iSCSI & SMB](#storage-flexvolume-for-iscsi--smb)
- [Risks and Mitigations](#risks-and-mitigations)
- [CRI-ContainerD availability](#cri-containerd-availability)
- [Design Details](#design-details)
- [Test Plan](#test-plan)
- [Graduation Criteria](#graduation-criteria)
- [Alpha release](#alpha-release)
- [Alpha -> Beta Graduation](#alpha---beta-graduation)
- [Beta -> GA Graduation](#beta---ga-graduation)
- [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy)
- [Version Skew Strategy](#version-skew-strategy)
- [Production Readiness Review Questionnaire](#production-readiness-review-questionnaire)
- [Feature enablement and rollback](#feature-enablement-and-rollback)
- [Rollout, Upgrade and Rollback Planning](#rollout-upgrade-and-rollback-planning)
- [Monitoring requirements](#monitoring-requirements)
- [Dependencies](#dependencies-1)
- [Scalability](#scalability)
- [Troubleshooting](#troubleshooting)
- [Implementation History](#implementation-history)
- [Alternatives](#alternatives)
- [CRI-O](#cri-o)
- [Infrastructure Needed](#infrastructure-needed)
<!-- /TOC -->
## Release Signoff Checklist
**ACTION REQUIRED:** In order to merge code into a release, there must be an issue in [kubernetes/enhancements] referencing this KEP and targeting a release milestone **before [Enhancement Freeze](https://github.com/kubernetes/sig-release/tree/master/releases)
of the targeted release**.
For enhancements that make changes to code or processes/procedures in core Kubernetes i.e., [kubernetes/kubernetes], we require the following Release Signoff checklist to be completed.
Check these off as they are completed for the Release Team to track. These checklist items _must_ be updated for the enhancement to be released.
Items marked with (R) are required *prior to targeting to a milestone / release*.
- [x] (R) Enhancement issue in release milestone, which links to KEP dir in [kubernetes/enhancements] (not the initial KEP PR)
- [x] (R) KEP approvers have set the KEP status to `implementable`
- [x] (R) Design details are appropriately documented
- [x] Test plan is in place, giving consideration to SIG Architecture and SIG Testing input
- [x] (R) Graduation criteria is in place
- [ ] (R) Production readiness review completed
- [ ] Production readiness review approved
- [x] "Implementation History" section is up-to-date for milestone
- [ ] User-facing documentation has been created in [kubernetes/website], for publication to [kubernetes.io]
- [ ] Supporting documentation e.g., additional design documents, links to mailing list discussions/SIG meetings, relevant PRs/issues, release notes
**Note:** Any PRs to move a KEP to `implementable` or significant changes once it is marked `implementable` should be approved by each of the KEP approvers. If any of those approvers is no longer appropriate than changes to that list should be approved by the remaining approvers and/or the owning SIG (or SIG-arch for cross cutting KEPs).
**Note:** This checklist is iterative and should be reviewed and updated every time this enhancement is being considered for a milestone.
[kubernetes.io]: https://kubernetes.io/
[kubernetes/enhancements]: https://github.com/kubernetes/enhancements/issues
[kubernetes/kubernetes]: https://github.com/kubernetes/kubernetes
[kubernetes/website]: https://github.com/kubernetes/website
## Summary
The ContainerD maintainers have been working on CRI support which is stable on Linux and Windows support has been added to ContainerD 1.13.
Supporting CRI-ContainerD on Windows means users will be able to take advantage of the latest container platform improvements that shipped in Windows Server 2019 / 1809 and beyond.
## Motivation
Windows Server 2019 includes an updated host container service (HCS v2) that offers more control over how containers are managed. This can remove some limitations and improve some Kubernetes API compatibility. However, the current Docker EE 18.09 release has not been updated to work with the Windows HCSv2, only ContainerD has been migrated. Moving to CRI-ContainerD allows the Windows OS team and Kubernetes developers to focus on an interface designed to work with Kubernetes to improve compatibility and accelerate development.
Additionally, users could choose to run with only CRI-ContainerD instead of Docker EE if they wanted to reduce the install footprint or produce their own self-supported CRI-ContainerD builds.
### Goals
- Improve the matrix of Kubernetes features that can be supported on Windows
- Provide a path forward to implement Kubernetes-specific features that are not available in the Docker API today
- Align with `dockershim` deprecation timelines once they are defined
### Non-Goals
- Running Linux containers on Windows nodes. This would be addressed as a separate KEP since the use cases are different.
- Deprecating `dockershim`. This is out of scope for this KEP. The effort to migrate that code out of tree is in [KEP PR 866](https://github.com/kubernetes/enhancements/pull/866) and deprecation discussions will happen later.
## Proposal
### User Stories
#### Improving Kubernetes integration for Windows Server containers
Moving to the new Windows HCSv2 platform and ContainerD would allow Kubernetes to add support for:
- Mounting single files, not just folders, into containers
- Termination messages (depends on single file mounts)
- /etc/hosts (c:\windows\system32\drivers\etc\hosts) file mapping
#### Improved isolation and compatibility between Windows pods using Hyper-V
Hyper-V enables each pod to run within it’s own hypervisor partition, with a separate kernel. This means that we can build forward-compatibility for containers across Windows OS versions - for example a container built using Windows Server 1809, could be run on a node running Windows Server 1903. This pod would use the Windows Server 1809 kernel to preserve full compatibility, and other pods could run using either a shared kernel with the node, or their own isolated Windows Server 1903 kernels. Containers requiring 1809 and 1903 (or later) cannot be mixed in the same pod, they must be deployed in separate pods so the matching kernel may be used. Running Windows Server version 1903 containers on a Windows Server 2019/1809 host will not work.
In addition, some customers may desire hypervisor-based isolation as an additional line of defense against a container break-out attack.
Adding Hyper-V support would use [RuntimeClass](https://kubernetes.io/docs/concepts/containers/runtime-class/#runtime-class).
3 typical RuntimeClass names would be configured in CRI-ContainerD to support common deployments:
- runhcs-wcow-process [default] - process isolation is used, container & node OS version must match
- runhcs-wcow-hypervisor - Hyper-V isolation is used, Pod will be compatible with containers built with Windows Server 2019 / 1809. Physical memory overcommit is allowed with overages filled from pagefile.
- runhcs-wcow-hypervisor-1903 - Hyper-V isolation is used, Pod will be compatible with containers built with Windows Server 1903. Physical memory overcommit is allowed with overages filled from pagefile.
Using Hyper-V isolation does require some extra memory for the isolated kernel & system processes. This could be accounted for by implementing the [PodOverhead](https://kubernetes.io/docs/concepts/containers/runtime-class/#runtime-class) proposal for those runtime classes. We would include a recommended PodOverhead in the default CRDs, likely between 100-200M.
#### Improve Control over Memory & CPU Resources with Hyper-V
The Windows kernel itself cannot provide reserved memory for pods, containers or processes. They are always fulfilled using virtual allocations which could be paged out later. However, using a Hyper-V partition improves control over memory and CPU cores. Hyper-V can either allocate memory on-demand (while still enforcing a hard limit), or it can be reserved as a physical allocation up front. Physical allocations may be able to enable large page allocations within that range (to be confirmed) and improve cache coherency. CPU core counts may also be limited so a pod only has certain cores available, rather than shares spread across all cores, and applications can tune thread counts to the actually available cores.
Operators could deploy additional RuntimeClasses with more granular control for performance critical workloads:
- 2019-Hyper-V-Reserve: Hyper-V isolation is used, Pod will be compatible with containers built with Windows Server 2019 / 1809. Memory reserve == limit, and is guaranteed to not page out.
- 2019-Hyper-V-Reserve-<N>Core: Same as above, except all but <N> CPU cores are masked out.
- 1903-Hyper-V-Reserve: Hyper-V isolation is used, Pod will be compatible with containers built with Windows Server 1903. Memory reserve == limit, and is guaranteed to not page out.
- 1903-Hyper-V-Reserve-<N>Core: Same as above, except all but <N> CPU cores are masked out.
#### Improved Storage Control with Hyper-V
Hyper-V also brings the capability to attach storage to pods using block-based protocols (SCSI) instead of file-based protocols (host file mapping / NFS / SMB). These capabilities could be enabled in HCSv2 with CRI-ContainerD, so this could be an area of future work. Some examples could include:
Attaching a "physical disk" (such as a local SSD, iSCSI target, Azure Disk or Google Persistent Disk) directly to a pod. The kubelet would need to identify the disk beforehand, then attach it as the pod is created with CRI. It could then be formatted and used within the pod without being mounted or accessible on the host.
Creating [Persistent Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local) using a local virtual disk attached directly to a pod. This would create local, non-resilient storage that could be formatted from the pod without being mounted on the host. This could be used to build out more resource controls such as fixed disk sizes and QoS based on IOPs or throughput and take advantage of high speed local storage such as temporary SSDs offered by cloud providers.
#### Enable runtime resizing of container resources
With virtual-based allocations and Hyper-V, it should be possible to increase the limit for a running pod. This won’t give it a guaranteed allocation, but will allow it to grow without terminating and scheduling a new pod. This could be a path to vertical pod autoscaling. This still needs more investigation and is mentioned as a future possibility.
### Implementation Details/Notes/Constraints
The work needed will span multiple repos, SIG-Windows will be maintaining a [Windows CRI-Containerd Project Board] to track everything in one place.
#### Proposal: Use Runtimeclass Scheduler to simplify deployments based on OS version requirements
As of version 1.14, RuntimeClass is not considered by the Kubernetes scheduler. There’s no guarantee that a node can start a pod, and it could fail until it’s scheduled on an appropriate node. Additional node labels and nodeSelectors are required to avoid this problem. [RuntimeClass Scheduling](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class-scheduling.md) proposes being able to add nodeSelectors automatically when using a RuntimeClass, simplifying the deployment.
Windows forward compatibility will bring a new challenge as well because there are two ways a container could be run:
- Constrained to the OS version it was designed for, using process-based isolation
- Running on a newer OS version using Hyper-V.
This second case could be enabled with a RuntimeClass. If a separate RuntimeClass was used based on OS version, this means the scheduler could find a node with matching class.
#### Proposal: Standardize hypervisor annotations
There are large number of [Windows annotations](https://github.com/Microsoft/hcsshim/blob/master/internal/oci/uvm.go#L15) defined that can control how Hyper-V will configure its hypervisor partition for the pod. Today, these could be set in the runtimeclasses defined in the CRI-ContainerD configuration file on the node, but it would be easier to maintain them if key settings around resources (cpu+memory+storage) could be aligned across multiple hypervisors and exposed in CRI.
Doing this would make pod definitions more portable between different isolation types. It would also avoid the need for a "t-shirt size" list of RuntimeClass instances to choose from:
- 1809-Hyper-V-Reserve-2Core-PhysicalMemory
- 1903-Hyper-V-Reserve-1Core-VirtualMemory
- 1903-Hyper-V-Reserve-4Core-PhysicalMemory
- etc.
### Dependencies
##### Windows Server 2019
This work would be carried out and tested using the already-released Windows Server 2019. That will enable customers a migration path from Docker 18.09 to CRI-ContainerD if they want to get this new functionality. Windows Server 1903 and later will also be supported once they’re tested.
##### CRI-ContainerD
It was announced that the upcoming 1.3 release would include Windows support, but that release and timeline are still in planning as of early April 2019.
The code needed to run ContainerD is merged, and [experimental support in moby](https://github.com/moby/moby/pull/38541) has merged. CRI is in the process of being updated, and open issues are tracked on the [Windows CRI-Containerd Project Board]
The CRI plugin changes needed to enable Hyper-V isolation are still in a development branch [jterry75/cri](https://github.com/jterry75/cri/tree/windows_port/cmd/containerd) and don’t have an upstream PR open yet.
Code: mostly done
CI+CD: lacking
##### CNI: Flannel
Flannel isn’t expected to require any changes since the Windows-specific metaplugins ship outside of the main repo. However, there is still not a stable release supporting Windows so it needs to be built from source. Additionally, the Windows-specific metaplugins to support ContainerD are being developed in a new repo [Microsoft/windows-container-networking](https://github.com/Microsoft/windows-container-networking). It’s still TBD whether this code will be merged into [containernetworking/plugins](https://github.com/containernetworking/plugins/), or maintained in a separate repo.
- Sdnbridge - this works with host-gw mode, replaces win-bridge
- Sdnoverlay - this works with vxlan overlay mode, replaces win-overlay
Code: in progress
CI+CD: lacking
##### CNI: Kubenet
The same sdnbridge plugin should work with kubenet as well. If someone would like to use kubenet instead of flannel, that should be feasible.
##### CNI: GCE
GCE uses the win-bridge meta-plugin today for managing Windows network interfaces. This would also need to migrate to sdnbridge.
##### Storage: in-tree AzureFile, AzureDisk, Google PD
These are expected to work and the same tests will be run for both dockershim and CRI-ContainerD.
##### Storage: FlexVolume for iSCSI & SMB
These out-of-tree plugins are expected to work, and are not tested in prow jobs today. If they graduate to stable we’ll add them to testgrid.
### Risks and Mitigations
#### CRI-ContainerD availability
As mentioned earlier, builds are not yet available. We will publish the setup steps required to build & test in the kubernetes-sigs/windows-testing repo during the course of alpha so testing can commence.
## Design Details
### Test Plan
The existing test cases running on Testgrid that cover Windows Server 2019 with Docker will be reused with CRI-ContainerD. Testgrid will include results for both ContainerD and dockershim.
- TestGrid: SIG-Windows: [flannel-l2bridge-windows-master](https://testgrid.k8s.io/sig-windows#flannel-l2bridge-windows-master) - this uses dockershim
- TestGrid: SIG-Windows: [containerd-l2bridge-windows-master](https://testgrid.k8s.io/sig-windows#containerd-l2bridge-windows-master) - this uses ContainerD
Test cases that depend on ContainerD and won't pass with Dockershim will be marked with `[feature:windows-containerd]` until `dockershim` is deprecated.
### Graduation Criteria
#### Alpha release
> Released with 1.18
- Windows Server 2019 containers can run with process level isolation using containerd
- TestGrid has results for Kubernetes master branch. CRI-ContainerD and CNI built from source and may include non-upstream PRs.
#### Alpha -> Beta Graduation
> Proposed for 1.19 or later
- Feature parity with dockershim, including:
- Group Managed Service Account support
- Named pipe & Unix domain socket mounts
- Support RuntimeClass to enable Hyper-V isolation
- Publicly available builds (beta or better) of CRI-ContainerD, at least one CNI
- TestGrid results for above builds with Kubernetes master branch
#### Beta -> GA Graduation
> Proposed for 1.20 or later
- Stable release of CRI-ContainerD on Windows, at least one CNI
- Master & release branches on TestGrid
- Perf analysis of pod-lifecycle operations performed and guidance around resource reservations and/or limits is updated for Windows node configuration and pod scheduling
### Upgrade / Downgrade Strategy
Because no Kubernetes API changes are expected, there is no planned upgrade/downgrade testing at the cluster level.
Node upgrade/downgrade is currently out of scope of the Kubernetes project, but we'll aim to include CRI-ContainerD in other efforts such as `kubeadm` bootstrapping for nodes.
As discussed in SIG-Node, there's also no testing on switching CRI on an existing node. These are expected to be installed and configured as a prerequisite before joining a node to the cluster.
### Version Skew Strategy
There's no version skew considerations needed for the same reasons described in upgrade/downgrade strategy.
## Production Readiness Review Questionnaire
### Feature enablement and rollback
_This section must be completed when targeting alpha to a release._
- **How can this feature be enabled / disabled in a live cluster?**
- [ ] Feature gate (also fill in values in `kep.yaml`)
- Feature gate name:
- Components depending on the feature gate:
- [x] Other
- Describe the mechanism: Windows agent nodes are expected to have the CRI installed and configured before joining the node to a cluster.
- Will enabling / disabling the feature require downtime of the control
plane? No
- Will enabling / disabling the feature require downtime or reprovisioning
of a node? Yes
- **Does enabling the feature change any default behavior?**
No
- **Can the feature be disabled once it has been enabled (i.e. can we rollback
the enablement)?**
No
- **What happens if we reenable the feature if it was previously rolled back?**
This feature is not enabled/disabled like traditional features - nodes are configured with containerd prior to joining a cluster.
A single Windows node will be configured with either Docker EE or containerd but different nodes running different CRIs can be joined to the same cluster with no negative impact.
- **Are there any tests for feature enablement/disablement?**
No - As mentioned in [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) there is no testing for switching CRI on an existing node.
### Rollout, Upgrade and Rollback Planning
_This section must be completed when targeting beta graduation to a release._
- **How can a rollout fail? Can it impact already running workloads?**
Nodes with improperly configured containerd installations may result in the node never a schedule-able state, pod sandbox creation failures, or issues creating/starting containers.
Ensuring proper configuration containerd installation/configuration is out-of-scope for this document.
- **What specific metrics should inform a rollback?**
All existing node health metrics should be used to determine/monitor node health.
- **Were upgrade and rollback tested? Was upgrade->downgrade->upgrade path tested?**
N/A
- **Is the rollout accompanied by any deprecations and/or removals of features,
APIs, fields of API types, flags, etc.?**
No
### Monitoring requirements
_This section must be completed when targeting beta graduation to a release._
- **How can an operator determine if the feature is in use by workloads?**
The `status.nodeInfo.contaienrRuntimeVersion` property for a node indicates which CRI is being used for a node.
- **What are the SLIs (Service Level Indicators) an operator can use to
determine the health of the service?**
- [ ] Metrics
- Metric name:
- [Optional] Aggregation method:
- Components exposing the metric:
- [x] Other (treat as last resort)
- Details: Checking the health of Windows node running containerd should be no different than checking the health of any other node in a cluster.
- **What are the reasonable SLOs (Service Level Objectives) for the above SLIs?**
No
- **Are there any missing metrics that would be useful to have to improve
observability if this feature?**
No
### Dependencies
_This section must be completed when targeting beta graduation to a release._
- **Does this feature depend on any specific services running in the cluster?**
Windows CRI-containerd does not add any additional dependencies/requirements for joining nodes to a cluster.
### Scalability
- **Will enabling / using this feature result in any new API calls?**
No
- **Will enabling / using this feature result in introducing new API types?**
No
- **Will enabling / using this feature result in any new calls to cloud
provider?**
No
- **Will enabling / using this feature result in increasing size or count
of the existing API objects?**
No
- **Will enabling / using this feature result in increasing time taken by any
operations covered by [existing SLIs/SLOs][]?**
No - But perf testing should be done to validate pod-lifecycle operations are not regressed compared to when equivalent nodes are configured with Docker EE.
- **Will enabling / using this feature result in non-negligible increase of
resource usage (CPU, RAM, disk, IO, ...) in any components?**
There are no expected increases in resource usage when using containerd - Additional perf testing will be done as prior of GA graduation.
### Troubleshooting
Troubleshooting section serves the `Playbook` role as of now. We may consider
splitting it into a dedicated `Playbook` document (potentially with some monitoring
details). For now we leave it here though.
_This section must be completed when targeting beta graduation to a release._
- **How does this feature react if the API server and/or etcd is unavailable?**
- **What are other known failure modes?**
For each of them fill in the following information by copying the below template:
- [Failure mode brief description]
- Detection: How can it be detected via metrics? Stated another way:
how can an operator troubleshoot without loogging into a master or worker node?
- Mitigations: What can be done to stop the bleeding, especially for already
running user workloads?
- Diagnostics: What are the useful log messages and their required logging
levels that could help debugging the issue?
Not required until feature graduated to Beta.
- Testing: Are there any tests for failure mode? If not describe why.
- **What steps should be taken if SLOs are not being met to determine the problem?**
[supported limits]: https://git.k8s.io/community//sig-scalability/configs-and-limits/thresholds.md
[existing SLIs/SLOs]: https://git.k8s.io/community/sig-scalability/slos/slos.md#kubernetes-slisslos
## Implementation History
- 2019-04-24 - KEP started, based on the [earlier doc shared SIG-Windows and SIG-Node](https://docs.google.com/document/d/1NigFz1nxI9XOi6sGblp_1m-rG9Ne6ELUrNO0V_TJqhI/edit)
- 2019-09-20 - Updated with new milestones
- 2020-01-21 - Updated with new milestones
- 2020-05-12 - Minor KEP updates, PRR questionnaire added
## Alternatives
### CRI-O
[CRI-O](https://cri-o.io/) is another runtime that aims to closely support all the fields available in the CRI spec. Currently there aren't any maintainers porting it to Windows so it's not a viable alternative.
## Infrastructure Needed
No new infrastructure is currently needed from the Kubernetes community. The existing test jobs using prow & testgrid will be copied and modified to test CRI-ContainerD in addition to dockershim.
[Windows CRI-Containerd Project Board]: https://github.com/orgs/kubernetes/projects/34
|
apache-2.0
|
karussell/fastutil
|
src/it/unimi/dsi/fastutil/objects/Object2CharSortedMaps.java
|
13714
|
/* Generic definitions */
/* Assertions (useful to generate conditional code) */
/* Current type and class (and size, if applicable) */
/* Value methods */
/* Interfaces (keys) */
/* Interfaces (values) */
/* Abstract implementations (keys) */
/* Abstract implementations (values) */
/* Static containers (keys) */
/* Static containers (values) */
/* Implementations */
/* Synchronized wrappers */
/* Unmodifiable wrappers */
/* Other wrappers */
/* Methods (keys) */
/* Methods (values) */
/* Methods (keys/values) */
/* Methods that have special names depending on keys (but the special names depend on values) */
/* Equality */
/* Object/Reference-only definitions (keys) */
/* Object/Reference-only definitions (values) */
/* Primitive-type-only definitions (values) */
/*
* Copyright (C) 2002-2013 Sebastiano Vigna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.unimi.dsi.fastutil.objects;
import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
import it.unimi.dsi.fastutil.objects.ObjectSortedSets;
import java.util.Comparator;
import java.util.Map;
import java.util.SortedMap;
import java.util.NoSuchElementException;
/** A class providing static methods and objects that do useful things with type-specific sorted maps.
*
* @see java.util.Collections
*/
public class Object2CharSortedMaps {
private Object2CharSortedMaps() {}
/** Returns a comparator for entries based on a given comparator on keys.
*
* @param comparator a comparator on keys.
* @return the associated comparator on entries.
*/
public static <K> Comparator<? super Map.Entry<K, ?>> entryComparator( final Comparator <K> comparator ) {
return new Comparator<Map.Entry<K, ?>>() {
public int compare( Map.Entry<K, ?> x, Map.Entry<K, ?> y ) {
return comparator.compare( x.getKey(), y.getKey() );
}
};
}
/** An immutable class representing an empty type-specific sorted map.
*
* <P>This class may be useful to implement your own in case you subclass
* a type-specific sorted map.
*/
public static class EmptySortedMap <K> extends Object2CharMaps.EmptyMap <K> implements Object2CharSortedMap <K>, java.io.Serializable, Cloneable {
private static final long serialVersionUID = -7046029254386353129L;
protected EmptySortedMap() {}
public Comparator <? super K> comparator() { return null; }
@SuppressWarnings("unchecked")
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { return ObjectSortedSets.EMPTY_SET; }
@SuppressWarnings("unchecked")
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return ObjectSortedSets.EMPTY_SET; }
@SuppressWarnings("unchecked")
public ObjectSortedSet <K> keySet() { return ObjectSortedSets.EMPTY_SET; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> subMap( final K from, final K to ) { return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> headMap( final K to ) { return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> tailMap( final K from ) { return EMPTY_MAP; }
public K firstKey() { throw new NoSuchElementException(); }
public K lastKey() { throw new NoSuchElementException(); }
}
/** An empty type-specific sorted map (immutable). It is serializable and cloneable. */
@SuppressWarnings("rawtypes")
public static final EmptySortedMap EMPTY_MAP = new EmptySortedMap();
/** An immutable class representing a type-specific singleton sorted map.
*
* <P>This class may be useful to implement your own in case you subclass
* a type-specific sorted map.
*/
public static class Singleton <K> extends Object2CharMaps.Singleton <K> implements Object2CharSortedMap <K>, java.io.Serializable, Cloneable {
private static final long serialVersionUID = -7046029254386353129L;
protected final Comparator <? super K> comparator;
protected Singleton( final K key, final char value, Comparator <? super K> comparator ) {
super( key, value );
this.comparator = comparator;
}
protected Singleton( final K key, final char value ) {
this( key, value, null );
}
@SuppressWarnings("unchecked")
final int compare( final K k1, final K k2 ) {
return comparator == null ? ( ((Comparable<K>)(k1)).compareTo(k2) ) : comparator.compare( k1, k2 );
}
public Comparator <? super K> comparator() { return comparator; }
@SuppressWarnings("unchecked")
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { if ( entries == null ) entries = ObjectSortedSets.singleton( (Object2CharMap.Entry <K>)new SingletonEntry(), (Comparator<? super Object2CharMap.Entry <K> >)entryComparator( comparator ) ); return (ObjectSortedSet<Object2CharMap.Entry <K> >)entries; }
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return (ObjectSortedSet)object2CharEntrySet(); }
public ObjectSortedSet <K> keySet() { if ( keys == null ) keys = ObjectSortedSets.singleton( key, comparator ); return (ObjectSortedSet <K>)keys; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> subMap( final K from, final K to ) { if ( compare( from, key ) <= 0 && compare( key, to ) < 0 ) return this; return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> headMap( final K to ) { if ( compare( key, to ) < 0 ) return this; return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> tailMap( final K from ) { if ( compare( from, key ) <= 0 ) return this; return EMPTY_MAP; }
public K firstKey() { return key; }
public K lastKey() { return key; }
}
/** Returns a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, Character value ) {
return new Singleton <K>( (key), ((value).charValue()) );
}
/** RETURNS a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @param comparator the comparator to use in the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, Character value, Comparator <? super K> comparator ) {
return new Singleton <K>( (key), ((value).charValue()), comparator );
}
/** Returns a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, final char value ) {
return new Singleton <K>( key, value );
}
/** Returns a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @param comparator the comparator to use in the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, final char value, Comparator <? super K> comparator ) {
return new Singleton <K>( key, value, comparator );
}
/** A synchronized wrapper class for sorted maps. */
public static class SynchronizedSortedMap <K> extends Object2CharMaps.SynchronizedMap <K> implements Object2CharSortedMap <K>, java.io.Serializable {
private static final long serialVersionUID = -7046029254386353129L;
protected final Object2CharSortedMap <K> sortedMap;
protected SynchronizedSortedMap( final Object2CharSortedMap <K> m, final Object sync ) {
super( m, sync );
sortedMap = m;
}
protected SynchronizedSortedMap( final Object2CharSortedMap <K> m ) {
super( m );
sortedMap = m;
}
public Comparator <? super K> comparator() { synchronized( sync ) { return sortedMap.comparator(); } }
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { if ( entries == null ) entries = ObjectSortedSets.synchronize( sortedMap.object2CharEntrySet(), sync ); return (ObjectSortedSet<Object2CharMap.Entry <K> >)entries; }
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return (ObjectSortedSet)object2CharEntrySet(); }
public ObjectSortedSet <K> keySet() { if ( keys == null ) keys = ObjectSortedSets.synchronize( sortedMap.keySet(), sync ); return (ObjectSortedSet <K>)keys; }
public Object2CharSortedMap <K> subMap( final K from, final K to ) { return new SynchronizedSortedMap <K>( sortedMap.subMap( from, to ), sync ); }
public Object2CharSortedMap <K> headMap( final K to ) { return new SynchronizedSortedMap <K>( sortedMap.headMap( to ), sync ); }
public Object2CharSortedMap <K> tailMap( final K from ) { return new SynchronizedSortedMap <K>( sortedMap.tailMap( from ), sync ); }
public K firstKey() { synchronized( sync ) { return sortedMap.firstKey(); } }
public K lastKey() { synchronized( sync ) { return sortedMap.lastKey(); } }
}
/** Returns a synchronized type-specific sorted map backed by the given type-specific sorted map.
*
* @param m the sorted map to be wrapped in a synchronized sorted map.
* @return a synchronized view of the specified sorted map.
* @see java.util.Collections#synchronizedSortedMap(SortedMap)
*/
public static <K> Object2CharSortedMap <K> synchronize( final Object2CharSortedMap <K> m ) { return new SynchronizedSortedMap <K>( m ); }
/** Returns a synchronized type-specific sorted map backed by the given type-specific sorted map, using an assigned object to synchronize.
*
* @param m the sorted map to be wrapped in a synchronized sorted map.
* @param sync an object that will be used to synchronize the access to the sorted sorted map.
* @return a synchronized view of the specified sorted map.
* @see java.util.Collections#synchronizedSortedMap(SortedMap)
*/
public static <K> Object2CharSortedMap <K> synchronize( final Object2CharSortedMap <K> m, final Object sync ) { return new SynchronizedSortedMap <K>( m, sync ); }
/** An unmodifiable wrapper class for sorted maps. */
public static class UnmodifiableSortedMap <K> extends Object2CharMaps.UnmodifiableMap <K> implements Object2CharSortedMap <K>, java.io.Serializable {
private static final long serialVersionUID = -7046029254386353129L;
protected final Object2CharSortedMap <K> sortedMap;
protected UnmodifiableSortedMap( final Object2CharSortedMap <K> m ) {
super( m );
sortedMap = m;
}
public Comparator <? super K> comparator() { return sortedMap.comparator(); }
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { if ( entries == null ) entries = ObjectSortedSets.unmodifiable( sortedMap.object2CharEntrySet() ); return (ObjectSortedSet<Object2CharMap.Entry <K> >)entries; }
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return (ObjectSortedSet)object2CharEntrySet(); }
public ObjectSortedSet <K> keySet() { if ( keys == null ) keys = ObjectSortedSets.unmodifiable( sortedMap.keySet() ); return (ObjectSortedSet <K>)keys; }
public Object2CharSortedMap <K> subMap( final K from, final K to ) { return new UnmodifiableSortedMap <K>( sortedMap.subMap( from, to ) ); }
public Object2CharSortedMap <K> headMap( final K to ) { return new UnmodifiableSortedMap <K>( sortedMap.headMap( to ) ); }
public Object2CharSortedMap <K> tailMap( final K from ) { return new UnmodifiableSortedMap <K>( sortedMap.tailMap( from ) ); }
public K firstKey() { return sortedMap.firstKey(); }
public K lastKey() { return sortedMap.lastKey(); }
}
/** Returns an unmodifiable type-specific sorted map backed by the given type-specific sorted map.
*
* @param m the sorted map to be wrapped in an unmodifiable sorted map.
* @return an unmodifiable view of the specified sorted map.
* @see java.util.Collections#unmodifiableSortedMap(SortedMap)
*/
public static <K> Object2CharSortedMap <K> unmodifiable( final Object2CharSortedMap <K> m ) { return new UnmodifiableSortedMap <K>( m ); }
}
|
apache-2.0
|
dgraph-io/experiments
|
intersects/gnuplot/convert.sh
|
146
|
cat ./tues-afternoon.bench | awk '{if (length($0) > 0) { print $2"."$4,$8,$10}}' > /tmp/bench.data
cat /tmp/bench.data | awk '{print > $1".dat"}'
|
apache-2.0
|
storozhukBM/javaslang-circuitbreaker
|
src/jmh/java/io/github/robwin/circuitbreaker/RingBitSetBenachmark.java
|
1513
|
/*
*
* Copyright 2016 Robert Winkler
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package io.github.robwin.circuitbreaker;
import io.github.robwin.circuitbreaker.internal.RingBitSet;
import org.openjdk.jmh.annotations.*;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@BenchmarkMode(Mode.All)
public class RingBitSetBenachmark {
private RingBitSet ringBitSet;
private static final int ITERATION_COUNT = 2;
private static final int WARMUP_COUNT = 2;
private static final int THREAD_COUNT = 10;
public static final int FORK_COUNT = 1;
@Setup
public void setUp() {
ringBitSet = new RingBitSet(1000);
}
@Benchmark
@Fork(value = FORK_COUNT)
@Threads(value = THREAD_COUNT)
@Warmup(iterations = WARMUP_COUNT)
@Measurement(iterations = ITERATION_COUNT)
public void setBits(){
ringBitSet.setNextBit(true);
ringBitSet.setNextBit(false);
}
}
|
apache-2.0
|
UKHomeOffice/drt-scalajs-spa-exploration
|
shared/src/main/scala/drt/shared/api/WalkTime.scala
|
2292
|
package drt.shared.api
import drt.shared.Terminals.Terminal
import drt.shared.TimeUtil._
import drt.shared.{MinuteAsNoun, MinuteAsAdjective}
import upickle.default.{macroRW, _}
import scala.collection.immutable.Map
case class WalkTime(gateOrStand: String, terminal: Terminal, walkTimeMillis: Long) {
val inMinutes: Int = millisToMinutes(walkTimeMillis)
}
case class TerminalWalkTimes(gateWalktimes: Map[String, WalkTime], standWalkTimes: Map[String, WalkTime])
object TerminalWalkTimes {
implicit val rw: ReadWriter[TerminalWalkTimes] = macroRW
}
case class WalkTimes(byTerminal: Map[Terminal, TerminalWalkTimes]) {
def walkTimeStringForArrival(defaultWalkTime: Long)
(gate: Option[String], stand: Option[String], terminal: Terminal): String = {
val defaultString = s"${MinuteAsNoun(millisToMinutes(defaultWalkTime)).display} (default walk time for terminal)"
val maybeWalkTime: Option[String] = (gate, stand, byTerminal.get(terminal)) match {
case (Some(g), _, Some(t)) if t.gateWalktimes.contains(g) =>
byTerminal(terminal).gateWalktimes.get(g).map(g => MinuteAsAdjective(g.inMinutes).display + " walk time")
case (_, Some(s), Some(t)) if t.standWalkTimes.contains(s) =>
byTerminal(terminal).standWalkTimes.get(s).map(g => MinuteAsAdjective(g.inMinutes).display + " walk time")
case _ => None
}
maybeWalkTime.getOrElse(defaultString)
}
def isEmpty = byTerminal.isEmpty
}
object WalkTimes {
implicit val rw: ReadWriter[WalkTimes] = macroRW
def apply(gateWalkTimes: Seq[WalkTime], standWalkTimes: Seq[WalkTime]): WalkTimes = {
val gatesByTerminal = byTerminal(gateWalkTimes)
val standsByTerminal = byTerminal(standWalkTimes)
val keys = gatesByTerminal.keys ++ standsByTerminal.keys
val twt: Map[Terminal, TerminalWalkTimes] = keys.map(key =>
key -> TerminalWalkTimes(gatesByTerminal.getOrElse(key, Map()), standsByTerminal.getOrElse(key, Map()))
).toMap
WalkTimes(twt)
}
def byTerminal(gateWalkTimes: Seq[WalkTime]): Map[Terminal, Map[String, WalkTime]] = gateWalkTimes
.groupBy(_.terminal)
.mapValues(
_.groupBy(_.gateOrStand)
.mapValues(_.head)
)
}
object WalkTime {
implicit val rw: ReadWriter[WalkTime] = macroRW
}
|
apache-2.0
|
vishwesh3/zulip-mobile
|
src/utils/internalLinks.js
|
4379
|
/* @flow strict-local */
import { addBreadcrumb } from '@sentry/react-native';
import type { Narrow, Stream, User } from '../types';
import { topicNarrow, streamNarrow, groupNarrow, specialNarrow } from './narrow';
import { isUrlOnRealm } from './url';
const getPathsFromUrl = (url: string = '', realm: string) => {
const paths = url
.split(realm)
.pop()
.split('#narrow/')
.pop()
.split('/');
if (paths.length > 0 && paths[paths.length - 1] === '') {
// url ends with /
paths.splice(-1, 1);
}
return paths;
};
/** PRIVATE -- exported only for tests. */
export const isInternalLink = (url: string, realm: string): boolean =>
isUrlOnRealm(url, realm) ? /^(\/#narrow|#narrow)/i.test(url.split(realm).pop()) : false;
/** PRIVATE -- exported only for tests. */
export const isMessageLink = (url: string, realm: string): boolean =>
isInternalLink(url, realm) && url.includes('near');
type LinkType = 'external' | 'home' | 'pm' | 'topic' | 'stream' | 'special';
export const getLinkType = (url: string, realm: string): LinkType => {
if (!isInternalLink(url, realm)) {
return 'external';
}
const paths = getPathsFromUrl(url, realm);
if (
(paths.length === 2 && paths[0] === 'pm-with')
|| (paths.length === 4 && paths[0] === 'pm-with' && paths[2] === 'near')
) {
return 'pm';
}
if (
(paths.length === 4 || paths.length === 6)
&& paths[0] === 'stream'
&& (paths[2] === 'subject' || paths[2] === 'topic')
) {
return 'topic';
}
if (paths.length === 2 && paths[0] === 'stream') {
return 'stream';
}
if (paths.length === 2 && paths[0] === 'is' && /^(private|starred|mentioned)/i.test(paths[1])) {
return 'special';
}
return 'home';
};
/** Decode a dot-encoded string. */
// The Zulip webapp uses this encoding in narrow-links:
// https://github.com/zulip/zulip/blob/1577662a6/static/js/hash_util.js#L18-L25
export const decodeHashComponent = (string: string): string => {
try {
return decodeURIComponent(string.replace(/\./g, '%'));
} catch (err) {
// `decodeURIComponent` throws strikingly uninformative errors
addBreadcrumb({
level: 'info',
type: 'decoding',
message: 'decodeHashComponent error',
data: { input: string },
});
throw err;
}
};
/** Parse the operand of a `stream` operator, returning a stream name. */
const parseStreamOperand = (operand, streamsById): string => {
// "New" (2018) format: ${stream_id}-${stream_name} .
const match = /^(\d+)-/.exec(operand);
if (match) {
const stream = streamsById.get(parseInt(match[0], 10));
if (stream) {
return stream.name;
}
}
// Old format: just stream name. This case is relevant indefinitely,
// so that links in old conversations continue to work.
return decodeHashComponent(operand);
};
/** Parse the operand of a `topic` or `subject` operator. */
const parseTopicOperand = operand => decodeHashComponent(operand);
/** Parse the operand of a `pm-with` operator. */
const parsePmOperand = (operand, usersById) => {
const recipientIds = operand.split('-')[0].split(',');
const recipientEmails = [];
for (let i = 0; i < recipientIds.length; ++i) {
const user = usersById.get(parseInt(recipientIds[i], 10));
if (user === undefined) {
return null;
}
recipientEmails.push(user.email);
}
return recipientEmails;
};
export const getNarrowFromLink = (
url: string,
realm: string,
usersById: Map<number, User>,
streamsById: Map<number, Stream>,
): Narrow | null => {
const type = getLinkType(url, realm);
const paths = getPathsFromUrl(url, realm);
switch (type) {
case 'pm': {
const recipientEmails = parsePmOperand(paths[1], usersById);
if (recipientEmails === null) {
return null;
}
return groupNarrow(recipientEmails);
}
case 'topic':
return topicNarrow(parseStreamOperand(paths[1], streamsById), parseTopicOperand(paths[3]));
case 'stream':
return streamNarrow(parseStreamOperand(paths[1], streamsById));
case 'special':
return specialNarrow(paths[1]);
default:
return null;
}
};
export const getMessageIdFromLink = (url: string, realm: string): number => {
const paths = getPathsFromUrl(url, realm);
return isMessageLink(url, realm) ? parseInt(paths[paths.lastIndexOf('near') + 1], 10) : 0;
};
|
apache-2.0
|
jose0luis41/Risk
|
doc/interfaz/package-use.html
|
5635
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="es">
<head>
<!-- Generated by javadoc (version 1.7.0_45) on Mon Sep 29 00:49:51 COT 2014 -->
<title>Uses of Package interfaz</title>
<meta name="date" content="2014-09-29">
<link rel="stylesheet" type="text/css" href="../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Package interfaz";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li>Class</li>
<li class="navBarCell1Rev">Use</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../deprecated-list.html">Deprecated</a></li>
<li><a href="../index-files/index-1.html">Index</a></li>
<li><a href="../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../index.html?interfaz/package-use.html" target="_top">Frames</a></li>
<li><a href="package-use.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 title="Uses of Package interfaz" class="title">Uses of Package<br>interfaz</h1>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../interfaz/package-summary.html">interfaz</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#interfaz">interfaz</a></td>
<td class="colLast"> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList"><a name="interfaz">
<!-- -->
</a>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing classes, and an explanation">
<caption><span>Classes in <a href="../interfaz/package-summary.html">interfaz</a> used by <a href="../interfaz/package-summary.html">interfaz</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Class and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colOne"><a href="../interfaz/class-use/Fondo.html#interfaz">Fondo</a> </td>
</tr>
<tr class="rowColor">
<td class="colOne"><a href="../interfaz/class-use/Fondo2.html#interfaz">Fondo2</a> </td>
</tr>
<tr class="altColor">
<td class="colOne"><a href="../interfaz/class-use/Inicio.html#interfaz">Inicio</a> </td>
</tr>
<tr class="rowColor">
<td class="colOne"><a href="../interfaz/class-use/InterfazOpciones.html#interfaz">InterfazOpciones</a> </td>
</tr>
<tr class="altColor">
<td class="colOne"><a href="../interfaz/class-use/InterfazPrincipal.html#interfaz">InterfazPrincipal</a> </td>
</tr>
<tr class="rowColor">
<td class="colOne"><a href="../interfaz/class-use/Mapa.html#interfaz">Mapa</a> </td>
</tr>
<tr class="altColor">
<td class="colOne"><a href="../interfaz/class-use/PanelOpciones.html#interfaz">PanelOpciones</a> </td>
</tr>
</tbody>
</table>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li>Class</li>
<li class="navBarCell1Rev">Use</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../deprecated-list.html">Deprecated</a></li>
<li><a href="../index-files/index-1.html">Index</a></li>
<li><a href="../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../index.html?interfaz/package-use.html" target="_top">Frames</a></li>
<li><a href="package-use.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
|
apache-2.0
|
BenDol/gwt-jui-demo
|
src/main/java/nz/co/doltech/gwtjui/demo/client/application/home/HomePresenter.java
|
1700
|
/*
* Copyright 2015 Doltech Systems Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package nz.co.doltech.gwtjui.demo.client.application.home;
import com.google.inject.Inject;
import com.google.web.bindery.event.shared.EventBus;
import com.gwtplatform.mvp.client.HasUiHandlers;
import com.gwtplatform.mvp.client.Presenter;
import com.gwtplatform.mvp.client.View;
import com.gwtplatform.mvp.client.annotations.NameToken;
import com.gwtplatform.mvp.client.annotations.ProxyCodeSplit;
import com.gwtplatform.mvp.client.proxy.ProxyPlace;
import nz.co.doltech.gwtjui.demo.client.application.ApplicationPresenter;
import nz.co.doltech.gwtjui.demo.client.place.NameTokens;
public class HomePresenter extends Presenter<HomePresenter.MyView, HomePresenter.MyProxy> implements HomeUiHandlers {
interface MyView extends View, HasUiHandlers<HomeUiHandlers> {
}
@NameToken(NameTokens.home)
@ProxyCodeSplit
interface MyProxy extends ProxyPlace<HomePresenter> {
}
@Inject
HomePresenter(EventBus eventBus, MyView view, MyProxy proxy) {
super(eventBus, view, proxy, ApplicationPresenter.TYPE_SetMainContent);
getView().setUiHandlers(this);
}
}
|
apache-2.0
|
oclc-developer-house/thirdpartyapi
|
index.php
|
6390
|
<?php
/*
*---------------------------------------------------------------
* APPLICATION ENVIRONMENT
*---------------------------------------------------------------
*
* You can load different configurations depending on your
* current environment. Setting the environment also influences
* things like logging and error reporting.
*
* This can be set to anything, but default usage is:
*
* development
* testing
* production
*
* NOTE: If you change these, also change the error_reporting() code below
*
*/
define('ENVIRONMENT', 'development');
/*
*---------------------------------------------------------------
* ERROR REPORTING
*---------------------------------------------------------------
*
* Different environments will require different levels of error reporting.
* By default development will show errors but testing and live will hide them.
*/
if (defined('ENVIRONMENT'))
{
switch (ENVIRONMENT)
{
case 'development':
error_reporting(E_ALL);
break;
case 'testing':
case 'production':
error_reporting(0);
break;
default:
exit('The application environment is not set correctly.');
}
}
/*
*---------------------------------------------------------------
* SYSTEM FOLDER NAME
*---------------------------------------------------------------
*
* This variable must contain the name of your "system" folder.
* Include the path if the folder is not in the same directory
* as this file.
*
*/
$system_path = 'vendor/rogeriopradoj/codeigniter/system';
/*
*---------------------------------------------------------------
* APPLICATION FOLDER NAME
*---------------------------------------------------------------
*
* If you want this front controller to use a different "application"
* folder then the default one you can set its name here. The folder
* can also be renamed or relocated anywhere on your server. If
* you do, use a full server path. For more info please see the user guide:
* http://codeigniter.com/user_guide/general/managing_apps.html
*
* NO TRAILING SLASH!
*
*/
$application_folder = 'application';
/*
* --------------------------------------------------------------------
* DEFAULT CONTROLLER
* --------------------------------------------------------------------
*
* Normally you will set your default controller in the routes.php file.
* You can, however, force a custom routing by hard-coding a
* specific controller class/function here. For most applications, you
* WILL NOT set your routing here, but it's an option for those
* special instances where you might want to override the standard
* routing in a specific front controller that shares a common CI installation.
*
* IMPORTANT: If you set the routing here, NO OTHER controller will be
* callable. In essence, this preference limits your application to ONE
* specific controller. Leave the function name blank if you need
* to call functions dynamically via the URI.
*
* Un-comment the $routing array below to use this feature
*
*/
// The directory name, relative to the "controllers" folder. Leave blank
// if your controller is not in a sub-folder within the "controllers" folder
// $routing['directory'] = '';
// The controller class file name. Example: Mycontroller
// $routing['controller'] = '';
// The controller function you wish to be called.
// $routing['function'] = '';
/*
* -------------------------------------------------------------------
* CUSTOM CONFIG VALUES
* -------------------------------------------------------------------
*
* The $assign_to_config array below will be passed dynamically to the
* config class when initialized. This allows you to set custom config
* items or override any default config values found in the config.php file.
* This can be handy as it permits you to share one application between
* multiple front controller files, with each file containing different
* config values.
*
* Un-comment the $assign_to_config array below to use this feature
*
*/
// $assign_to_config['name_of_config_item'] = 'value of config item';
// --------------------------------------------------------------------
// END OF USER CONFIGURABLE SETTINGS. DO NOT EDIT BELOW THIS LINE
// --------------------------------------------------------------------
/*
* ---------------------------------------------------------------
* Resolve the system path for increased reliability
* ---------------------------------------------------------------
*/
// Set the current directory correctly for CLI requests
if (defined('STDIN'))
{
chdir(dirname(__FILE__));
}
if (realpath($system_path) !== FALSE)
{
$system_path = realpath($system_path).'/';
}
// ensure there's a trailing slash
$system_path = rtrim($system_path, '/').'/';
// Is the system path correct?
if ( ! is_dir($system_path))
{
exit("Your system folder path does not appear to be set correctly. Please open the following file and correct this: ".pathinfo(__FILE__, PATHINFO_BASENAME));
}
/*
* -------------------------------------------------------------------
* Now that we know the path, set the main path constants
* -------------------------------------------------------------------
*/
// The name of THIS file
define('SELF', pathinfo(__FILE__, PATHINFO_BASENAME));
// The PHP file extension
// this global constant is deprecated.
define('EXT', '.php');
// Path to the system folder
define('BASEPATH', str_replace("\\", "/", $system_path));
// Path to the front controller (this file)
define('FCPATH', str_replace(SELF, '', __FILE__));
// Name of the "system folder"
define('SYSDIR', trim(strrchr(trim(BASEPATH, '/'), '/'), '/'));
// The path to the "application" folder
if (is_dir($application_folder))
{
define('APPPATH', $application_folder.'/');
}
else
{
if ( ! is_dir(BASEPATH.$application_folder.'/'))
{
exit("Your application folder path does not appear to be set correctly. Please open the following file and correct this: ".SELF);
}
define('APPPATH', BASEPATH.$application_folder.'/');
}
/*
* --------------------------------------------------------------------
* LOAD THE BOOTSTRAP FILE
* --------------------------------------------------------------------
*
* And away we go...
*
*/
require_once BASEPATH.'core/CodeIgniter.php';
/* End of file index.php */
/* Location: ./index.php */
|
apache-2.0
|
resthub/resthub.github.io
|
apidocs/spring/1.1/org/resthub/identity/dao/class-use/JpaUserDao.html
|
4308
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="fr">
<head>
<!-- Generated by javadoc (version 1.7.0_01) on Sat Mar 24 00:58:28 CET 2012 -->
<meta http-equiv="Content-Type" content="text/html" charset="UTF-8">
<title>Uses of Class org.resthub.identity.dao.JpaUserDao (RESThub 1.1.3 API)</title>
<meta name="date" content="2012-03-24">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.resthub.identity.dao.JpaUserDao (RESThub 1.1.3 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../org/resthub/identity/dao/JpaUserDao.html" title="class in org.resthub.identity.dao">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/resthub/identity/dao/\class-useJpaUserDao.html" target="_top">Frames</a></li>
<li><a href="JpaUserDao.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.resthub.identity.dao.JpaUserDao" class="title">Uses of Class<br>org.resthub.identity.dao.JpaUserDao</h2>
</div>
<div class="classUseContainer">No usage of org.resthub.identity.dao.JpaUserDao</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../org/resthub/identity/dao/JpaUserDao.html" title="class in org.resthub.identity.dao">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/resthub/identity/dao/\class-useJpaUserDao.html" target="_top">Frames</a></li>
<li><a href="JpaUserDao.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2009-2012. All Rights Reserved.</small></p>
</body>
</html>
|
apache-2.0
|
thiagonego/cofagra_bi
|
src/main/java/br/com/cofagra/bi/renders/ExtPrimeInputMaskRenderer.java
|
4004
|
package br.com.cofagra.bi.renders;
import java.io.IOException;
import javax.faces.component.UIComponent;
import javax.faces.context.FacesContext;
import javax.faces.context.ResponseWriter;
import lombok.extern.java.Log;
import org.primefaces.component.inputmask.InputMask;
import org.primefaces.component.inputmask.InputMaskRenderer;
import org.primefaces.util.ComponentUtils;
import org.primefaces.util.HTML;
/**
* Render respons�vel por sobrescrever o componente
* <p:inputMask/>
*
* @author thiagosampaio
*/
@Log
public class ExtPrimeInputMaskRenderer extends InputMaskRenderer{
@Override
public void decode(FacesContext context, UIComponent component) {
InputMask inputMask = (InputMask) component;
if (inputMask.isDisabled() || inputMask.isReadonly()) {
return;
}
decodeBehaviors(context, inputMask);
String clientId = inputMask.getClientId(context);
String submittedValue = (String) context.getExternalContext().getRequestParameterMap().get(clientId);
if (submittedValue != null) {
inputMask.setSubmittedValue(submittedValue);
}
}
@Override
public void encodeEnd(FacesContext context, UIComponent component) throws IOException {
InputMask inputMask = (InputMask) component;
encodeMarkup(context, inputMask);
encodeScript(context, inputMask);
}
protected void encodeScript(FacesContext context, InputMask inputMask) throws IOException {
ResponseWriter writer = context.getResponseWriter();
String clientId = inputMask.getClientId(context);
String mask = inputMask.getMask();
startScript(writer, clientId);
writer.write("PrimeFaces.cw('InputMask','" + inputMask.resolveWidgetVar() + "',{");
writer.write("id:'" + clientId + "'");
if (mask != null) {
writer.write(",mask:'" + inputMask.getMask() + "'");
if (inputMask.getPlaceHolder() != null)
writer.write(",placeholder:'" + inputMask.getPlaceHolder() + "'");
}
encodeClientBehaviors(context, inputMask);
writer.write("});");
endScript(writer);
}
protected void encodeMarkup(FacesContext context, InputMask inputMask) throws IOException {
ResponseWriter writer = context.getResponseWriter();
String clientId = inputMask.getClientId(context);
String styleClass = inputMask.getStyleClass();
String defaultClass = InputMask.STYLE_CLASS;
defaultClass = !inputMask.isValid() ? defaultClass + " ui-state-error" : defaultClass;
// Comentando pois quando o disable esta trus a classe 'ui-state-disabled' estava quabrando o twitter bootstrap
// defaultClass = inputMask.isDisabled() ? defaultClass + " ui-state-disabled" : defaultClass;
styleClass = styleClass == null ? defaultClass : defaultClass + " " + styleClass;
writer.startElement("input", null);
writer.writeAttribute("id", clientId, null);
writer.writeAttribute("name", clientId, null);
writer.writeAttribute("type", "text", null);
String valueToRender = ComponentUtils.getValueToRender(context, inputMask);
if (valueToRender != null) {
writer.writeAttribute("value", valueToRender, null);
}
renderPassThruAttributes(context, inputMask, HTML.INPUT_TEXT_ATTRS);
if (inputMask.isDisabled())
writer.writeAttribute("disabled", "disabled", "disabled");
if (inputMask.isReadonly())
writer.writeAttribute("readonly", "readonly", "readonly");
if (inputMask.getStyle() != null)
writer.writeAttribute("style", inputMask.getStyle(), "style");
writer.writeAttribute("class", styleClass, "styleClass");
writer.endElement("input");
}
}
|
apache-2.0
|
volcacius/Giftlist
|
app/src/main/java/it/polimi/dima/giftlist/presentation/presenter/WishlistPresenter.java
|
8395
|
package it.polimi.dima.giftlist.presentation.presenter;
import com.pushtorefresh.storio.sqlite.StorIOSQLite;
import com.pushtorefresh.storio.sqlite.operations.put.PutResults;
import com.pushtorefresh.storio.sqlite.queries.DeleteQuery;
import com.pushtorefresh.storio.sqlite.queries.Query;
import com.pushtorefresh.storio.sqlite.queries.RawQuery;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import java.io.File;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import javax.inject.Inject;
import hugo.weaving.DebugLog;
import it.polimi.dima.giftlist.data.db.table.EbayProductTable;
import it.polimi.dima.giftlist.data.db.table.EtsyProductTable;
import it.polimi.dima.giftlist.data.db.table.WishlistTable;
import it.polimi.dima.giftlist.data.model.EbayProduct;
import it.polimi.dima.giftlist.data.model.EtsyProduct;
import it.polimi.dima.giftlist.data.model.Product;
import it.polimi.dima.giftlist.data.model.Wishlist;
import it.polimi.dima.giftlist.domain.interactor.GetDbProductListUseCase;
import it.polimi.dima.giftlist.presentation.event.ProductRemovedEvent;
import it.polimi.dima.giftlist.presentation.event.WishlistAddedEvent;
import it.polimi.dima.giftlist.presentation.view.WishlistView;
import it.polimi.dima.giftlist.presentation.view.activity.BaseActivity;
import rx.Observer;
import rx.SingleSubscriber;
import rx.android.schedulers.AndroidSchedulers;
import timber.log.Timber;
/**
* Created by Alessandro on 18/03/16.
*/
public class WishlistPresenter extends BaseRxLcePresenter<WishlistView, List<Product>, GetDbProductListUseCase> {
@Inject
public WishlistPresenter(GetDbProductListUseCase getDbProductListUseCase, StorIOSQLite db) {
super(getDbProductListUseCase, db);
}
@Override
public void subscribe(boolean pullToRefresh) {
if (!useCase.isUnsubscribed()) {
unsubscribe();
}
useCase.execute(new BaseSubscriber(pullToRefresh));
if (isViewAttached()) {
getView().showLoading(pullToRefresh);
}
}
@Override
protected void onCompleted() {
//DB subscriptions do not complete
}
@Override
protected void onError(Throwable e, boolean pullToRefresh) {
if (isViewAttached()) {
getView().showError(e, pullToRefresh);
}
unsubscribe();
}
@Override
@DebugLog
protected void onNext(List<Product> data) {
List<Product> orderedList = new LinkedList<>(data);
Collections.sort(orderedList);
getView().setData(orderedList);
if (isViewAttached()) {
getView().showContent();
}
}
public void setActionBarDetails(long wishlistId) {
db.get()
.object(Wishlist.class)
.withQuery(Query.builder()
.table(WishlistTable.TABLE)
.where("id = ?")
.whereArgs(wishlistId)
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleSubscriber<Wishlist>() {
@Override
public void onSuccess(Wishlist value) {
getView().initCollapsingToolbar(value.getName(), value.getOccasion());
}
@Override
public void onError(Throwable error) {
Timber.d("Can't load wishlist details");
}
});
}
//I do not want the observer to emit an unnecessary onNext
//So I manually run an update query without adding the affected table
public void updateProductListOrder(List<Product> productList) {
for (Product p : productList) {
if (p instanceof EbayProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EbayProductTable.getDisplayOrderUpdateQuery(p.getId(), p.getDisplayOrder()))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Product %d is set at order %d in DB", p.getId(), p.getDisplayOrder());
}
@Override
public void onError(Throwable error) {
Timber.d("Error in setting product %d at order %d in DB", p.getId(), p.getDisplayOrder());
}
});
}
if (p instanceof EtsyProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EtsyProductTable.getDisplayOrderUpdateQuery(p.getId(), p.getDisplayOrder()))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Product %d is set at order %d in DB", p.getId(), p.getDisplayOrder());
}
@Override
public void onError(Throwable error) {
Timber.d("Error in setting product %d at order %d in DB", p.getId(), p.getDisplayOrder());
}
});
}
}
}
public void removeProduct(Product product) {
deleteImages(product.getImageUri());
long productId = product.getId();
if (product instanceof EtsyProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EtsyProductTable.getCustomDeleteQuery(productId))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread()) //all Observables in StorIO already subscribed on Schedulers.io(), you just need to set observeOn()
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Success in deleting the product %d", productId);
}
@Override
public void onError(Throwable error) {
Timber.d("Error in deleting the product %d", productId);
}
});
}
if (product instanceof EbayProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EbayProductTable.getCustomDeleteQuery(productId))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread()) //all Observables in StorIO already subscribed on Schedulers.io(), you just need to set observeOn()
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Success in deleting the product %d", productId);
}
@Override
public void onError(Throwable error) {
Timber.d("Error in deleting the product %d", productId);
}
});
}
}
private void deleteImages(String uri) {
File fdelete = new File(uri);
if (fdelete.exists()) {
if (fdelete.delete()) {
Timber.d("file Deleted: %s", uri);
} else {
Timber.d("file not Deleted %s", uri);
}
}
}
}
|
apache-2.0
|
apexstudios/phabricator
|
src/applications/packager/controller/PhabricatorPackagerCreateController.php
|
1924
|
<?php
final class PhabricatorPackagerCreateController
extends PhabricatorPackagerController {
public function processRequest() {
$request = $this->getRequest();
$user = $request->getUser();
$e_url = null;
$errors = array();
if ($request->isFormPost()) {
$url = $request->getStr('url');
if (empty($url)) {
$e_url = pht("Required");
$errors[] = pht("Package URL must not be empty!");
} else {
$packageObject = new PhabricatorFilePackage();
$packageObject->setAuthorPHID($user->getPHID());
$packageObject->setPackageUrl($url);
$packageObject->setDownloads(0);
$packageObject->save();
return id(new AphrontRedirectResponse())
->setURI($this->getApplicationURI('view/' . $packageObject->getID()));
}
}
$error_view = null;
if ($errors) {
$error_view = new AphrontErrorView();
$error_view->setTitle(pht('Form Errors'));
$error_view->setErrors($errors);
}
$instructions =
phutil_tag(
'p',
array(
'class' => 'aphront-form-instructions',
),
pht('Just paste a clean URL to the file in Amazon S3 here, '.
'and everything will be fine.'));
$form = id(new AphrontFormLayoutView())
->appendChild($instructions)
->appendChild(
id(new AphrontFormTextControl())
->setLabel(pht("Package Url"))
->setName("url")
->setError($e_url)
->setValue("")
->setCaption("The clean url to the file on S3"));
$dialog = new AphrontDialogView();
$dialog->setUser($user);
$dialog->setTitle(pht("Register Package"));
$dialog->appendChild($form);
$dialog->addCancelButton($this->getApplicationURI());
$dialog->addSubmitButton(pht("Register this package"));
$resp = new AphrontDialogResponse();
return $resp->setDialog($dialog);
}
}
|
apache-2.0
|
H5C3JS/hxsd
|
1606/二阶/week3/day05/横向菜单.html
|
1471
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>菜单</title>
</head>
<style type="text/css">
*{ margin: 0; padding: 0; }
ul{ list-style: none; }
a{text-decoration: none;}
li a{line-height: 50px; height: 50px;display: block;font-size: 14px;color: #666;padding-left: 2em; }
</style>
<body>
<ul>
<li><a href="javascript:;">一级菜单</a>
<ul>
<li><a href="javascript:;">二级菜单</a></li>
<li><a href="javascript:;">二级菜单</a>
<ul>
<li><a href="javascript:;">三级菜单</a></li>
<li><a href="javascript:;">三级菜单</a></li>
<li><a href="javascript:;">三级菜单</a></li>
<li><a href="javascript:;">三级菜单</a></li>
</ul>
</li>
<li><a href="javascript:;">三级菜单</a></li>
<li><a href="javascript:;">三级菜单</a></li>
</ul>
</li>
<li><a href="javascript:;">一级菜单</a></li>
<li><a href="javascript:;">一级菜单</a></li>
<li><a href="javascript:;">一级菜单</a>
<ul>
<li><a href="javascript:;">二级菜单</a></li>
<li><a href="javascript:;">二级菜单</a>
<ul>
<li><a href="javascript:;">3级菜单</a></li>
<li><a href="javascript:;">二级菜单</a></li>
<li><a href="javascript:;">二级菜单</a></li>
<li><a href="javascript:;">二级菜单</a></li>
</ul>
</li>
<li><a href="javascript:;">二级菜单</a></li>
<li><a href="javascript:;">二级菜单</a></li>
</ul>
</li>
</ul>
</body>
</html>
|
apache-2.0
|
hazendaz/assertj-core
|
src/test/java/org/assertj/core/api/list/ListAssert_filteredOn_consumer_with_navigation_Test.java
|
3541
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2021 the original author or authors.
*/
package org.assertj.core.api.list;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.function.Consumer;
import org.assertj.core.data.TolkienCharacter;
import org.assertj.core.data.TolkienCharacterAssert;
import org.assertj.core.data.TolkienCharacterAssertFactory;
import org.junit.jupiter.api.Test;
class ListAssert_filteredOn_consumer_with_navigation_Test extends ListAssert_filteredOn_BaseTest {
private static Consumer<? super TolkienCharacter> nameStartingWithFro = hobbit -> assertThat(hobbit.getName()).startsWith("Fro");
@Test
void should_honor_AssertFactory_strongly_typed_navigation_assertions() {
// GIVEN
Iterable<TolkienCharacter> hobbits = hobbits();
TolkienCharacterAssertFactory tolkienCharacterAssertFactory = new TolkienCharacterAssertFactory();
// THEN
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.first()
.hasAge(33);
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.last()
.hasAge(33);
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.element(0)
.hasAge(33);
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.elements(0)
.first()
.hasAge(33);
}
@Test
void should_honor_ClassBased_strongly_typed_navigation_assertions() {
// GIVEN
Iterable<TolkienCharacter> hobbits = hobbits();
// THEN
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.first()
.hasAge(33);
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.last()
.hasAge(33);
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.element(0)
.hasAge(33);
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.elements(0)
.first()
.hasAge(33);
}
}
|
apache-2.0
|
spaco/spo
|
src/gui/static/src/app/app.component.html
|
1901
|
<md-toolbar color="primary" id="top-menu">
<img src="/assets/logo-white.png" class="logo">
<!--<span><app-breadcrumb></app-breadcrumb></span>-->
<!-- This fills the remaining space of the current row -->
<span class="fill-remaining-space"></span>
<span *ngIf="loading()" class="syncing">
Syncing blocks {{ current && highest ? '(' + current + '/' + highest + ')' : '..' }}
</span>
<span *ngIf="!loading()">{{ walletService.sum() | async | sky }}</span>
<md-menu #settingsMenu="mdMenu">
<button md-menu-item [disabled]="true"> Version {{ version }} </button>
<button md-menu-item [routerLink]="['/settings/network']"> Networking </button>
<button md-menu-item [routerLink]="['/settings/blockchain']"> Blockchain </button>
<button md-menu-item [routerLink]="['/settings/outputs']"> Outputs </button>
<button md-menu-item [routerLink]="['/settings/pending-transactions']"> Pending Transactions </button>
<button md-menu-item [routerLink]="['/settings/backup']"> Back-up wallet </button>
</md-menu>
<button md-button [mdMenuTriggerFor]="settingsMenu">Settings</button>
</md-toolbar>
<md-toolbar class="main-menu">
<button md-button [routerLink]="['/wallets']" routerLinkActive="active">Wallets</button>
<button md-button [routerLink]="['/send']" routerLinkActive="active">Send</button>
<button md-button [routerLink]="['/history']" routerLinkActive="active">History</button>
<button md-button [routerLink]="['/buy']" routerLinkActive="active" *ngIf="otcEnabled">Donation</button>
<button md-button [routerLink]="['/explorer']" routerLinkActive="active">Explorer</button>
<span class="fill-remaining-space"></span>
</md-toolbar>
<md-progress-bar
*ngIf="loading()"
class="example-margin"
color="primary"
mode="determinate"
[value]="percentage"></md-progress-bar>
<div class="sky-container">
<router-outlet></router-outlet>
</div>
|
apache-2.0
|
manoelfranca/cilppp
|
foldsCreator/files/datasets/alzheimer_choline_0.3noisyOnTest/train10.f
|
8918
|
great(b1,o1).
great(jj1,u1).
great(y1,w1).
great(ee1,ll1).
great(jj1,i1).
great(o1,n1).
great(jj1,x1).
great(kk1,o1).
great(r1,o1).
great(h1,k1).
great(r1,ll1).
great(l1,g1).
great(ii1,a1).
great(y1,r1).
great(m1,n1).
great(h1,jj1).
great(k1,ll1).
great(k1,jj1).
great(dd1,s1).
great(hh1,l1).
great(ii1,o1).
great(ff1,m1).
great(c1,t1).
great(cc1,p1).
great(ff1,ee1).
great(jj1,bb1).
great(ii1,jj1).
great(ii1,s1).
great(h1,p1).
great(x1,aa1).
great(v1,ll1).
great(f1,ff1).
great(jj1,b1).
great(c1,q1).
great(z1,aa1).
great(g1,j1).
great(ii1,ff1).
great(kk1,t1).
great(d1,w1).
great(m1,o1).
great(h1,bb1).
great(j1,jj1).
great(f1,d1).
great(k1,q1).
great(d1,y1).
great(z1,q1).
great(a1,cc1).
great(z1,i1).
great(t1,n1).
great(i1,ee1).
great(kk1,m1).
great(j1,aa1).
great(e1,ee1).
great(e1,cc1).
great(j1,m1).
great(ii1,dd1).
great(kk1,n1).
great(cc1,aa1).
great(ee1,m1).
great(hh1,ee1).
great(x1,ll1).
great(k1,j1).
great(ii1,n1).
great(y1,kk1).
great(l1,q1).
great(q1,x1).
great(e1,i1).
great(cc1,o1).
great(ee1,o1).
great(ii1,ee1).
great(y1,b1).
great(z1,bb1).
great(d1,q1).
great(f1,jj1).
great(k1,ee1).
great(jj1,t1).
great(k1,y1).
great(l1,m1).
great(q1,b1).
great(y1,n1).
great(bb1,p1).
great(f1,r1).
great(h1,dd1).
great(hh1,cc1).
great(h1,u1).
great(hh1,t1).
great(bb1,o1).
great(d1,z1).
great(c1,m1).
great(e1,v1).
great(aa1,b1).
great(a1,r1).
great(w1,o1).
great(jj1,p1).
great(g1,o1).
great(hh1,a1).
great(bb1,w1).
great(h1,y1).
great(u1,cc1).
great(ff1,o1).
great(q1,n1).
great(k1,kk1).
great(aa1,n1).
great(v1,o1).
great(hh1,g1).
great(e1,j1).
great(h1,j1).
great(l1,w1).
great(hh1,aa1).
great(dd1,aa1).
great(c1,dd1).
great(a1,j1).
great(c1,l1).
great(hh1,i1).
great(a1,kk1).
great(g1,v1).
great(c1,p1).
great(k1,l1).
great(c1,kk1).
great(p1,t1).
great(cc1,kk1).
great(hh1,z1).
great(y1,ee1).
great(k1,aa1).
great(q1,w1).
great(jj1,o1).
great(e1,t1).
great(x1,w1).
great(i1,bb1).
great(j1,t1).
great(h1,l1).
great(aa1,ll1).
great(ee1,n1).
great(ff1,u1).
great(c1,d1).
great(f1,ll1).
great(q1,ee1).
great(h1,x1).
great(i1,kk1).
great(cc1,n1).
great(hh1,o1).
great(q1,p1).
great(a1,q1).
great(u1,s1).
great(kk1,s1).
great(e1,r1).
great(c1,n1).
great(p1,m1).
great(z1,dd1).
great(dd1,w1).
great(jj1,ff1).
great(l1,cc1).
great(e1,l1).
great(c1,f1).
great(dd1,m1).
great(d1,u1).
great(a1,x1).
great(s1,ll1).
great(ff1,cc1).
great(jj1,m1).
great(f1,hh1).
great(f1,cc1).
great(k1,cc1).
great(h1,g1).
great(d1,kk1).
great(r1,s1).
great(b1,p1).
great(k1,t1).
great(f1,x1).
great(e1,dd1).
great(i1,p1).
great(b1,ll1).
great(h1,q1).
great(e1,bb1).
great(f1,kk1).
great(hh1,r1).
great(jj1,n1).
great(dd1,ee1).
great(e1,d1).
great(q1,ff1).
great(g1,ll1).
great(e1,b1).
great(z1,jj1).
great(j1,n1).
great(k1,m1).
great(ff1,n1).
great(z1,cc1).
great(hh1,q1).
great(f1,q1).
great(q1,i1).
great(a1,jj1).
great(bb1,b1).
great(dd1,v1).
great(bb1,aa1).
great(ee1,bb1).
great(kk1,b1).
great(j1,dd1).
great(g1,cc1).
great(l1,n1).
great(jj1,y1).
great(k1,x1).
great(a1,o1).
great(jj1,s1).
great(g1,y1).
great(d1,i1).
great(j1,w1).
great(u1,b1).
great(v1,t1).
great(q1,cc1).
great(y1,ll1).
great(e1,s1).
great(j1,p1).
great(k1,p1).
great(h1,a1).
great(hh1,jj1).
great(v1,s1).
great(h1,o1).
great(z1,g1).
great(s1,w1).
great(p1,w1).
great(w1,m1).
great(h1,s1).
great(d1,ff1).
great(d1,t1).
great(g1,jj1).
great(z1,u1).
great(l1,jj1).
great(cc1,x1).
great(q1,dd1).
great(u1,w1).
great(ii1,b1).
great(c1,o1).
great(d1,k1).
great(j1,s1).
great(h1,z1).
great(ff1,b1).
great(a1,g1).
great(e1,y1).
great(r1,v1).
great(e1,u1).
great(c1,ff1).
great(z1,ll1).
great(h1,cc1).
great(z1,b1).
great(e1,aa1).
great(u1,bb1).
great(j1,y1).
great(cc1,s1).
great(a1,v1).
great(c1,r1).
great(d1,j1).
great(hh1,w1).
great(e1,n1).
great(jj1,ll1).
great(d1,r1).
great(c1,ll1).
great(l1,z1).
great(dd1,ll1).
great(i1,v1).
great(l1,ll1).
great(r1,kk1).
great(hh1,u1).
great(l1,aa1).
great(kk1,aa1).
great(u1,aa1).
great(d1,m1).
great(i1,n1).
great(g1,t1).
great(ii1,v1).
great(g1,p1).
great(l1,a1).
great(k1,i1).
great(e1,x1).
great(t1,m1).
great(d1,cc1).
great(r1,w1).
great(r1,n1).
great(e1,a1).
great(j1,o1).
great(a1,p1).
great(dd1,bb1).
great(kk1,x1).
great(l1,v1).
great(hh1,dd1).
great(hh1,ll1).
great(f1,h1).
great(z1,x1).
great(d1,x1).
great(f1,y1).
great(kk1,w1).
great(ii1,k1).
great(e1,k1).
great(f1,dd1).
great(aa1,t1).
great(h1,v1).
great(dd1,n1).
great(w1,n1).
great(h1,kk1).
great(j1,v1).
great(v1,p1).
great(l1,t1).
great(jj1,ee1).
great(e1,ll1).
great(f1,s1).
great(r1,dd1).
great(jj1,w1).
great(c1,j1).
great(b1,n1).
great(x1,b1).
great(r1,t1).
great(x1,s1).
great(j1,b1).
great(ii1,d1).
great(ii1,cc1).
great(l1,y1).
great(e1,m1).
great(b1,s1).
great(hh1,e1).
great(f1,m1).
great(bb1,m1).
great(q1,t1).
great(hh1,b1).
great(q1,r1).
great(kk1,p1).
great(k1,bb1).
great(cc1,t1).
great(ii1,h1).
great(q1,kk1).
great(ii1,hh1).
great(c1,x1).
great(l1,ee1).
great(h1,n1).
great(f1,t1).
great(dd1,x1).
great(u1,i1).
great(dd1,p1).
great(e1,q1).
great(cc1,b1).
great(k1,n1).
great(j1,ee1).
great(y1,bb1).
great(y1,u1).
great(f1,u1).
great(z1,w1).
great(ff1,kk1).
great(jj1,aa1).
great(e1,w1).
great(l1,i1).
great(e1,jj1).
great(ff1,t1).
great(f1,z1).
great(bb1,n1).
great(a1,w1).
great(ee1,b1).
great(ii1,ll1).
great(f1,bb1).
great(ii1,l1).
great(d1,n1).
great(b1,w1).
great(d1,ee1).
great(f1,l1).
great(h1,t1).
great(h1,ll1).
great(c1,z1).
great(x1,t1).
great(r1,m1).
great(d1,p1).
great(ii1,kk1).
great(q1,o1).
great(jj1,q1).
great(z1,t1).
great(r1,p1).
great(j1,q1).
great(h1,m1).
great(l1,j1).
great(l1,b1).
great(dd1,kk1).
great(k1,a1).
great(cc1,m1).
great(cc1,v1).
great(ff1,w1).
great(ff1,x1).
great(cc1,bb1).
great(ff1,dd1).
great(bb1,t1).
great(aa1,o1).
great(g1,kk1).
great(a1,bb1).
great(l1,s1).
great(s1,t1).
great(t1,o1).
great(k1,dd1).
great(bb1,x1).
great(g1,x1).
great(hh1,d1).
great(f1,ee1).
great(i1,b1).
great(a1,s1).
great(kk1,ll1).
great(ff1,s1).
great(d1,s1).
great(y1,m1).
great(c1,ii1).
great(x1,o1).
great(f1,v1).
great(c1,b1).
great(hh1,x1).
great(dd1,o1).
great(g1,s1).
great(l1,u1).
great(y1,cc1).
great(ff1,r1).
great(j1,cc1).
great(d1,jj1).
great(g1,aa1).
great(aa1,s1).
great(z1,y1).
great(ii1,r1).
great(f1,n1).
great(y1,x1).
great(i1,ll1).
great(q1,ll1).
great(f1,a1).
great(g1,w1).
great(ii1,q1).
great(z1,ff1).
great(v1,ee1).
great(f1,w1).
great(v1,aa1).
great(j1,bb1).
great(z1,v1).
great(j1,i1).
great(y1,o1).
great(i1,s1).
great(a1,u1).
great(d1,bb1).
great(c1,ee1).
great(r1,b1).
great(ee1,p1).
great(y1,p1).
great(y1,v1).
great(g1,r1).
great(l1,o1).
great(hh1,m1).
great(ff1,ll1).
great(ii1,u1).
great(b1,m1).
great(q1,aa1).
great(c1,i1).
great(x1,p1).
great(u1,ll1).
great(dd1,b1).
great(ee1,dd1).
great(m1,u1).
great(jj1,l1).
great(i1,c1).
great(s1,e1).
great(e1,f1).
great(z1,k1).
great(q1,h1).
great(v1,dd1).
great(ll1,c1).
great(i1,a1).
great(z1,l1).
great(ii1,c1).
great(t1,q1).
great(w1,r1).
great(z1,d1).
great(q1,a1).
great(y1,ii1).
great(w1,j1).
great(t1,j1).
great(u1,j1).
great(b1,q1).
great(w1,a1).
great(i1,f1).
great(ll1,q1).
great(p1,b1).
great(bb1,i1).
great(t1,w1).
great(n1,m1).
great(cc1,jj1).
great(n1,s1).
great(i1,e1).
great(z1,c1).
great(n1,g1).
great(cc1,l1).
great(u1,e1).
great(l1,f1).
great(v1,i1).
great(x1,y1).
great(t1,p1).
great(t1,u1).
great(dd1,ii1).
great(b1,kk1).
great(n1,v1).
great(s1,q1).
great(b1,ff1).
great(y1,z1).
great(v1,y1).
great(z1,e1).
great(u1,z1).
great(m1,w1).
great(n1,b1).
great(y1,jj1).
great(ll1,ee1).
great(g1,d1).
great(cc1,k1).
great(i1,y1).
great(l1,h1).
great(j1,z1).
great(u1,y1).
great(p1,i1).
great(kk1,ee1).
great(n1,r1).
great(p1,a1).
great(m1,v1).
great(aa1,j1).
great(p1,aa1).
great(jj1,k1).
great(b1,g1).
great(v1,q1).
great(q1,jj1).
great(s1,dd1).
great(t1,h1).
great(o1,u1).
great(t1,kk1).
great(l1,hh1).
great(g1,e1).
great(u1,hh1).
great(ee1,v1).
great(m1,t1).
great(m1,s1).
great(t1,x1).
great(o1,x1).
great(aa1,u1).
great(ff1,k1).
great(aa1,y1).
great(ee1,c1).
great(u1,a1).
great(l1,e1).
great(n1,ll1).
great(n1,d1).
great(x1,a1).
great(u1,k1).
great(kk1,u1).
great(x1,dd1).
great(x1,ff1).
great(n1,p1).
great(cc1,ii1).
great(g1,f1).
great(a1,hh1).
great(o1,q1).
great(w1,cc1).
great(bb1,ii1).
great(t1,i1).
great(dd1,h1).
great(d1,h1).
great(bb1,c1).
great(r1,c1).
great(t1,v1).
great(jj1,ii1).
great(ff1,h1).
great(s1,g1).
great(u1,h1).
great(ff1,z1).
great(dd1,ff1).
great(j1,d1).
great(v1,hh1).
great(bb1,l1).
great(i1,h1).
great(z1,a1).
great(w1,g1).
great(j1,hh1).
great(v1,ff1).
great(z1,f1).
great(w1,k1).
great(i1,j1).
great(ee1,hh1).
great(bb1,q1).
great(r1,jj1).
great(kk1,v1).
great(m1,h1).
great(i1,d1).
great(m1,r1).
great(n1,h1).
great(t1,a1).
great(o1,y1).
great(bb1,y1).
great(t1,b1).
great(ll1,aa1).
great(bb1,h1).
great(x1,ii1).
great(n1,cc1).
great(x1,j1).
great(y1,l1).
great(ff1,a1).
great(dd1,hh1).
great(jj1,e1).
great(b1,r1).
great(p1,x1).
great(v1,cc1).
great(kk1,i1).
great(j1,l1).
great(m1,ii1).
great(s1,b1).
great(b1,y1).
great(dd1,a1).
great(y1,hh1).
great(ee1,a1).
great(v1,h1).
great(t1,l1).
|
apache-2.0
|
ddalu5/logs-analyzer
|
logs_analyzer/lib.py
|
11644
|
import re
import calendar
from logs_analyzer.settings import *
from logs_analyzer.validators import *
from datetime import datetime
def get_service_settings(service_name):
"""
Get default settings for the said service
:param service_name: service name (example: nginx, apache2...)
:return: service settings if found or None
"""
if service_name in SERVICES_SWITCHER:
return SERVICES_SWITCHER.get(service_name)
else:
raise Exception("Service \""+service_name+"\" doesn't exists!")
def get_date_filter(settings, minute=datetime.now().minute, hour=datetime.now().hour,
day=datetime.now().day, month=datetime.now().month,
year=datetime.now().year):
"""
Get the date pattern that can be used to filter data from logs based on the params
:raises Exception:
:param settings: dict
:param minute: int
:param hour: int
:param day: int
:param month: int
:param year: int
:return: string
"""
if not is_valid_year(year) or not is_valid_month(month) or not is_valid_day(day) \
or not is_valid_hour(hour) or not is_valid_minute(minute):
raise Exception("Date elements aren't valid")
if minute != '*' and hour != '*':
date_format = settings['dateminutes_format']
date_filter = datetime(year, month, day, hour, minute).strftime(date_format)
elif minute == '*' and hour != '*':
date_format = settings['datehours_format']
date_filter = datetime(year, month, day, hour).strftime(date_format)
elif minute == '*' and hour == '*':
date_format = settings['datedays_format']
date_filter = datetime(year, month, day).strftime(date_format)
else:
raise Exception("Date elements aren't valid")
return date_filter
def filter_data(log_filter, data=None, filepath=None, is_casesensitive=True, is_regex=False, is_reverse=False):
"""
Filter received data/file content and return the results
:except IOError:
:except EnvironmentError:
:raises Exception:
:param log_filter: string
:param data: string
:param filepath: string
:param is_casesensitive: boolean
:param is_regex: boolean
:param is_reverse: boolean to inverse selection
:return: string
"""
return_data = ""
if filepath:
try:
with open(filepath, 'r') as file_object:
for line in file_object:
if check_match(line, log_filter, is_regex, is_casesensitive, is_reverse):
return_data += line
return return_data
except (IOError, EnvironmentError) as e:
print(e.strerror)
exit(2)
elif data:
for line in data.splitlines():
if check_match(line, log_filter, is_regex, is_casesensitive, is_reverse):
return_data += line+"\n"
return return_data
else:
raise Exception("Data and filepath values are NULL!")
def check_match(line, filter_pattern, is_regex, is_casesensitive, is_reverse):
"""
Check if line contains/matches filter pattern
:param line: string
:param filter_pattern: string
:param is_regex: boolean
:param is_casesensitive: boolean
:param is_reverse: boolean
:return: boolean
"""
if is_regex:
check_result = re.match(filter_pattern, line) if is_casesensitive \
else re.match(filter_pattern, line, re.IGNORECASE)
else:
check_result = (filter_pattern in line) if is_casesensitive else (filter_pattern.lower() in line.lower())
return check_result and not is_reverse
def get_web_requests(data, pattern, date_pattern=None, date_keys=None):
"""
Analyze data (from the logs) and return list of requests formatted as the model (pattern) defined.
:param data: string
:param pattern: string
:param date_pattern: regex|None
:param date_keys: dict|None
:return: list
"""
if date_pattern and not date_keys:
raise Exception("date_keys is not defined")
requests_dict = re.findall(pattern, data)
requests = []
for request_tuple in requests_dict:
if date_pattern:
str_datetime = __get_iso_datetime(request_tuple[1], date_pattern, date_keys)
else:
str_datetime = request_tuple[1]
requests.append({'IP': request_tuple[0], 'DATETIME': str_datetime,
'METHOD': request_tuple[2], 'ROUTE': request_tuple[3], 'CODE': request_tuple[4],
'REFERRER': request_tuple[5], 'USERAGENT': request_tuple[6]})
return requests
def get_auth_requests(data, pattern, date_pattern=None, date_keys=None):
"""
Analyze data (from the logs) and return list of auth requests formatted as the model (pattern) defined.
:param data: string
:param pattern: string
:param date_pattern:
:param date_keys:
:return: list of dicts
"""
requests_dict = re.findall(pattern, data)
requests = []
for request_tuple in requests_dict:
if date_pattern:
str_datetime = __get_iso_datetime(request_tuple[0], date_pattern, date_keys)
else:
str_datetime = request_tuple[0]
data = analyze_auth_request(request_tuple[2])
data['DATETIME'] = str_datetime
data['SERVICE'] = request_tuple[1]
requests.append(data)
return requests
def analyze_auth_request(request_info):
"""
Analyze request info and returns main data (IP, invalid user, invalid password's user, is_preauth, is_closed)
:param request_info: string
:return: dicts
"""
ipv4 = re.findall(IPv4_REGEX, request_info)
is_preauth = '[preauth]' in request_info.lower()
invalid_user = re.findall(AUTH_USER_INVALID_USER, request_info)
invalid_pass_user = re.findall(AUTH_PASS_INVALID_USER, request_info)
is_closed = 'connection closed by ' in request_info.lower()
return {'IP': ipv4[0] if ipv4 else None,
'INVALID_USER': invalid_user[0] if invalid_user else None,
'INVALID_PASS_USER': invalid_pass_user[0] if invalid_pass_user else None,
'IS_PREAUTH': is_preauth,
'IS_CLOSED': is_closed}
def __get_iso_datetime(str_date, pattern, keys):
"""
Change raw datetime from logs to ISO 8601 format.
:param str_date: string
:param pattern: regex (date_pattern from settings)
:param keys: dict (date_keys from settings)
:return: string
"""
months_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
a_date = re.findall(pattern, str_date)[0]
d_datetime = datetime(int(a_date[keys['year']]) if 'year' in keys else __get_auth_year(),
months_dict[a_date[keys['month']]], int(a_date[keys['day']].strip()),
int(a_date[keys['hour']]), int(a_date[keys['minute']]), int(a_date[keys['second']]))
return d_datetime.isoformat(' ')
def __get_auth_year():
# TODO: Add support for analysis done in different terms
"""
Return the year when the requests happened so there will be no bug if the analyze is done in the new year eve,
the library was designed to be used for hourly analysis.
:return: int
"""
if datetime.now().month == 1 and datetime.now().day == 1 and datetime.now().hour == 0:
return datetime.now().year - 1
else:
return datetime.now().year
class LogsAnalyzer:
def __init__(self, service, data=None, filepath=None):
"""
Constructor, define service (nginx, apache2...), set data or filepath if needed
:param service: string: service name (nginx, apache2...)
:param data: string: data to be filtered if not from a file
:param filepath: string: file path from which the data will be loaded if data isn't defined
and you are not using the default service logs filepath
:return:
"""
self.__filters = []
self.__settings = get_service_settings(service)
self.data = data
if filepath:
self.filepath = filepath
else:
self.filepath = self.__settings['dir_path']+self.__settings['accesslog_filename']
def add_filter(self, filter_pattern, is_casesensitive=True, is_regex=False, is_reverse=False):
"""
Add filter data the filters list
:param filter_pattern: boolean
:param is_casesensitive: boolean
:param is_regex: boolean
:param is_reverse: boolean
:return:
"""
self.__filters.append({
'filter_pattern': filter_pattern,
'is_casesensitive': is_casesensitive,
'is_regex': is_regex,
'is_reverse': is_reverse
})
def add_date_filter(self, minute=datetime.now().minute, hour=datetime.now().hour,
day=datetime.now().day, month=datetime.now().month, year=datetime.now().year):
"""
Set datetime filter
:param minute: int
:param hour: int
:param day: int
:param month: int
:param year: int
"""
date_filter = get_date_filter(self.__settings, minute, hour, day, month, year)
self.add_filter(date_filter)
def get_all_filters(self):
"""
return all defined filters
:return: List
"""
return self.__filters
def get_filter(self, index):
"""
Get a filter data by index
:param index:
:return: Dictionary
"""
return self.__filters[index]
def remove_filter(self, index):
"""
Remove one filter from filters list using it's index
:param index:
:return:
"""
self.__filters.remove(index)
def clear_all_filters(self):
"""
Clear all filters
:return:
"""
self.__filters = []
def check_all_matches(self, line, filter_patterns):
"""
Check if line contains/matches all filter patterns
:param line: String
:param filter_patterns: List of dictionaries containing
:return: boolean
"""
to_return = None
for pattern_data in filter_patterns:
tmp_result = check_match(line=line, **pattern_data)
to_return = tmp_result if to_return is None else (tmp_result and to_return)
return to_return
def filter_all(self):
"""
Apply all defined patterns and return filtered data
:return: string
"""
to_return = ""
if self.data:
for line in self.data.splitlines():
if self.check_all_matches(line, self.__filters):
to_return += line+"\n"
else:
with open(self.filepath, 'r') as file_object:
for line in file_object:
if self.check_all_matches(line, self.__filters):
to_return += line
return to_return
def get_requests(self):
"""
Analyze data (from the logs) and return list of auth requests formatted as the model (pattern) defined.
:return:
"""
data = self.filter_all()
request_pattern = self.__settings['request_model']
date_pattern = self.__settings['date_pattern']
date_keys = self.__settings['date_keys']
if self.__settings['type'] == 'web0':
return get_web_requests(data, request_pattern, date_pattern, date_keys)
elif self.__settings['type'] == 'auth':
return get_auth_requests(data, request_pattern, date_pattern, date_keys)
else:
return None
|
apache-2.0
|
mldbai/mldb
|
arch/gc_lock.h
|
11590
|
/* gc_lock.h -*- C++ -*-
Jeremy Barnes, 19 November 2011
Copyright (c) 2011 mldb.ai inc. All rights reserved.
This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
"Lock" that works by deferring the destruction of objects into a garbage collection
process which is only run when nothing could be using them.
*/
#pragma once
#include "mldb/base/exc_assert.h"
#include "mldb/arch/thread_specific.h"
#include <vector>
#include <memory>
#include <functional>
/** Deterministic memory reclamation is a fundamental problem in lock-free
algorithms and data structures.
When concurrently updating a lock-free data structure, it is not safe to
immediately reclaim the old value since a bazillon other threads might still
hold a reference to the old value. What we need is a safe memory reclamation
mechanism. That is why GcLock exists. GcLock works by deferring the
destruction of an object until it is safe to do; when the system decides
that nobody still holds a reference to it.
GcLock works by defining "critical sections" that a thread should hold to
safely read a shared object.
Note that this class contains many types of critical sections which are all
specialized for various situations:
- Shared CS: Regular read side critical sections.
- Exclusive CS: Acts as the write op in a read-write lock with the shared CS
- Speculative CS: Optimization of Shared CS whereby the CS may or may not be
unlocked when requested to save on repeated accesses to the CS.
Further details is available in the documentation of each respective
operand.
*/
namespace MLDB {
extern int32_t SpeculativeThreshold;
/*****************************************************************************/
/* GC LOCK BASE */
/*****************************************************************************/
struct GcLockBase {
GcLockBase(const GcLockBase &) = delete;
void operator = (const GcLockBase &) = delete;
public:
/** Enum for type safe specification of whether or not we run deferrals on
entry or exit to a critical sections. Thoss places that are latency
sensitive should use RD_NO.
*/
enum RunDefer {
RD_NO = 0, ///< Don't run deferred work on this call
RD_YES = 1 ///< Potentially run deferred work on this call
};
/// A thread's bookkeeping info about each GC area
struct ThreadGcInfoEntry {
ThreadGcInfoEntry();
~ThreadGcInfoEntry();
int inEpoch; // 0, 1, -1 = not in
int readLocked;
int writeLocked;
int specLocked;
int specUnlocked;
GcLockBase *owner;
void init(const GcLockBase * const self);
void lockShared(RunDefer runDefer);
void unlockShared(RunDefer runDefer);
bool isLockedShared();
void lockExclusive();
void unlockExclusive();
void lockSpeculative(RunDefer runDefer);
void unlockSpeculative(RunDefer runDefer);
void forceUnlock(RunDefer runDefer);
std::string print() const;
};
typedef ThreadSpecificInstanceInfo<ThreadGcInfoEntry> GcInfo;
typedef typename GcInfo::PerThreadInfo ThreadGcInfo;
struct Atomic;
struct Data;
void enterCS(ThreadGcInfoEntry * entry = 0, RunDefer runDefer = RD_YES);
void exitCS(ThreadGcInfoEntry * entry = 0, RunDefer runDefer = RD_YES);
void enterCSExclusive(ThreadGcInfoEntry * entry = 0);
void exitCSExclusive(ThreadGcInfoEntry * entry = 0);
int myEpoch(GcInfo::PerThreadInfo * threadInfo = 0) const;
int currentEpoch() const;
MLDB_ALWAYS_INLINE ThreadGcInfoEntry &
getEntry(GcInfo::PerThreadInfo * info = 0) const
{
ThreadGcInfoEntry *entry = gcInfo.get(info);
entry->init(this);
return *entry;
//return *gcInfo.get(info);
}
GcLockBase();
virtual ~GcLockBase();
/** Permanently deletes any resources associated with this lock. */
virtual void unlink() = 0;
void lockShared(GcInfo::PerThreadInfo * info = 0,
RunDefer runDefer = RD_YES);
void unlockShared(GcInfo::PerThreadInfo * info = 0,
RunDefer runDefer = RD_YES);
/** Speculative critical sections should be used for hot loops doing
repeated but short reads on shared objects where it's acceptable to keep
hold of the section in between read operations because you're likely to
need it again soon.
This is an optimization of lockShared since it can avoid repeated entry
and exit of the CS when it's likely to be reused shortly after. It also
has the effect of heavily reducing contention on the lock under heavy
contention scenarios.
Usage example:
GcLock gc;
for (condition) {
gc.enterSpeculative();
// In critical section
gc.exitSpeculative();
// After the call, gc might or might not be unlocked
}
gc.forceUnlock();
Note the call to forceUnlock() after the loop which ensure that we've
exited the critical section. Also note that the speculative functions
are called directly in this example for illustrative purposes. In actual
code, use the SpeculativeGuard class which provides full RAII
guarantees.
*/
void lockSpeculative(GcInfo::PerThreadInfo * info = 0,
RunDefer runDefer = RD_YES);
void unlockSpeculative(GcInfo::PerThreadInfo * info = 0,
RunDefer runDefer = RD_YES);
/** Ensures that after the call, the Gc is "unlocked".
This should be used in conjunction with the speculative lock to notify
the Gc Lock to exit any leftover speculative sections for the current
thread. If multiple threads can hold a speculative region, this function
has to be called in each thread respectively. Note that it will be
called automatically when a thread is destroyed.
*/
void forceUnlock(GcInfo::PerThreadInfo * info = 0,
RunDefer runDefer = RD_YES);
int isLockedShared(GcInfo::PerThreadInfo * info = 0) const;
int lockedInEpoch(GcInfo::PerThreadInfo * info = 0) const;
void lockExclusive(GcInfo::PerThreadInfo * info = 0);
void unlockExclusive(GcInfo::PerThreadInfo * info = 0);
int isLockedExclusive(GcInfo::PerThreadInfo * info = 0) const;
bool isLockedByAnyThread() const;
enum DoLock {
DONT_LOCK = 0,
DO_LOCK = 1
};
struct SharedGuard {
SharedGuard(GcLockBase & lock,
RunDefer runDefer = RD_YES,
DoLock doLock = DO_LOCK)
: lock_(lock),
runDefer_(runDefer),
doLock_(doLock)
{
if (doLock_)
lock_.lockShared(0, runDefer_);
}
~SharedGuard()
{
if (doLock_)
lock_.unlockShared(0, runDefer_);
}
void lock()
{
if (doLock_)
return;
lock_.lockShared(0, runDefer_);
doLock_ = DO_LOCK;
}
void unlock()
{
if (!doLock_)
return;
lock_.unlockShared(0, runDefer_);
doLock_ = DONT_LOCK;
}
GcLockBase & lock_;
const RunDefer runDefer_; ///< Can this do deferred work?
DoLock doLock_; ///< Do we really lock?
};
struct ExclusiveGuard {
ExclusiveGuard(GcLockBase & lock)
: lock(lock)
{
lock.lockExclusive();
}
~ExclusiveGuard()
{
lock.unlockExclusive();
}
GcLockBase & lock;
};
struct SpeculativeGuard {
SpeculativeGuard(GcLockBase &lock,
RunDefer runDefer = RD_YES)
: lock(lock),
runDefer_(runDefer)
{
lock.lockSpeculative(0, runDefer_);
}
~SpeculativeGuard()
{
lock.unlockSpeculative(0, runDefer_);
}
GcLockBase & lock;
const RunDefer runDefer_;
};
/** Wait until everything that's currently visible is no longer
accessible.
You can't call this if a guard is held, as it would deadlock waiting
for itself to exit from the critical section.
*/
void visibleBarrier();
/** Wait until all defer functions that have been registered have been
run.
You can't call this if a guard is held, as it would deadlock waiting
for itself to exit from the critical section.
*/
void deferBarrier();
void defer(std::function<void ()> work);
typedef void (WorkFn1) (void *);
typedef void (WorkFn2) (void *, void *);
typedef void (WorkFn3) (void *, void *, void *);
void defer(void (work) (void *), void * arg);
void defer(void (work) (void *, void *), void * arg1, void * arg2);
void defer(void (work) (void *, void *, void *), void * arg1, void * arg2, void * arg3);
template<typename T>
void defer(void (*work) (T *), T * arg)
{
defer((WorkFn1 *)work, (void *)arg);
}
template<typename T>
static void doDelete(T * arg)
{
delete arg;
}
template<typename T>
void deferDelete(T * toDelete)
{
if (!toDelete) return;
defer(doDelete<T>, toDelete);
}
template<typename... Args>
void doDefer(void (fn) (Args...), Args...);
template<typename Fn, typename... Args>
void deferBind(Fn fn, Args... args)
{
std::function<void ()> bound = std::bind<void>(fn, args...);
this->defer(bound);
}
void dump();
protected:
Data* data;
/// How many bytes does data require?
static size_t dataBytesRequired();
/// Placement construct a data instance
static Data* uninitializedConstructData(void * memory);
private:
struct Deferred;
struct DeferredList;
GcInfo gcInfo;
Deferred * deferred; ///< Deferred workloads (hidden structure)
/** Update with the new value after first checking that the current
value is the same as the old value. Returns true if it
succeeded; otherwise oldValue is updated with the new old
value.
As part of doing this, it will calculate the correct value for
visibleEpoch() and, if it has changed, wake up anything waiting
on that value, and will run any deferred handlers registered for
that value.
*/
bool updateAtomic(Atomic & oldValue, Atomic & newValue, RunDefer runDefer);
/** Executes any available deferred work. */
void runDefers();
/** Check what deferred updates need to be run and do them. Must be
called with deferred locked.
*/
std::vector<DeferredList *> checkDefers();
};
/*****************************************************************************/
/* GC LOCK */
/*****************************************************************************/
/** GcLock for use within a single process. */
struct GcLock : public GcLockBase
{
GcLock();
virtual ~GcLock();
virtual void unlink();
private:
std::unique_ptr<Data> localData;
};
} // namespace MLDB
|
apache-2.0
|
TKiura/WeDGenS
|
frontend/source/docs/phpdoc/Controller/Helper/Helper_Api_Log_ApiRegisterDataLog.html
|
18841
|
<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<!-- template designed by Marco Von Ballmoos -->
<title>Docs For Class Helper_Api_Log_ApiRegisterDataLog</title>
<link rel="stylesheet" href="../../media/stylesheet.css" />
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'/>
<script src="../../media/lib/classTree.js"></script>
<script language="javascript" type="text/javascript">
var imgPlus = new Image();
var imgMinus = new Image();
imgPlus.src = "../../media/images/plus.png";
imgMinus.src = "../../media/images/minus.png";
function showNode(Node){
switch(navigator.family){
case 'nn4':
// Nav 4.x code fork...
var oTable = document.layers["span" + Node];
var oImg = document.layers["img" + Node];
break;
case 'ie4':
// IE 4/5 code fork...
var oTable = document.all["span" + Node];
var oImg = document.all["img" + Node];
break;
case 'gecko':
// Standards Compliant code fork...
var oTable = document.getElementById("span" + Node);
var oImg = document.getElementById("img" + Node);
break;
}
oImg.src = imgMinus.src;
oTable.style.display = "block";
}
function hideNode(Node){
switch(navigator.family){
case 'nn4':
// Nav 4.x code fork...
var oTable = document.layers["span" + Node];
var oImg = document.layers["img" + Node];
break;
case 'ie4':
// IE 4/5 code fork...
var oTable = document.all["span" + Node];
var oImg = document.all["img" + Node];
break;
case 'gecko':
// Standards Compliant code fork...
var oTable = document.getElementById("span" + Node);
var oImg = document.getElementById("img" + Node);
break;
}
oImg.src = imgPlus.src;
oTable.style.display = "none";
}
function nodeIsVisible(Node){
switch(navigator.family){
case 'nn4':
// Nav 4.x code fork...
var oTable = document.layers["span" + Node];
break;
case 'ie4':
// IE 4/5 code fork...
var oTable = document.all["span" + Node];
break;
case 'gecko':
// Standards Compliant code fork...
var oTable = document.getElementById("span" + Node);
break;
}
return (oTable && oTable.style.display == "block");
}
function toggleNodeVisibility(Node){
if (nodeIsVisible(Node)){
hideNode(Node);
}else{
showNode(Node);
}
}
</script>
</head>
<body>
<div class="page-body">
<h2 class="class-name"><img src="../../media/images/Class_logo.png"
alt=" Class"
title=" Class"
style="vertical-align: middle"> Helper_Api_Log_ApiRegisterDataLog</h2>
<a name="sec-description"></a>
<div class="info-box">
<div class="info-box-title">Description</div>
<div class="nav-bar">
<span class="disabled">Description</span> |
<a href="#sec-vars">Vars</a>
| <a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>)
<a href="#sec-const-summary">Constants</a> (<a href="#sec-consts">details</a>)
</div>
<div class="info-box-body">
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">Helper_Api_Log_ApiRegisterDataLog クラス</p>
<p class="description"><p>Apiモジュール register-dataコントローラ ログ出力ヘルパー定義クラス</p></p>
<ul class="tags">
<li><span class="field">author:</span> Mitsubishi Space Software Co.,Ltd.</li>
</ul>
<p class="notes">
Located in <a class="field" href="_application---modules---api---controllers---helpers---Log---ApiRegisterDataLog.php.html">/application/modules/api/controllers/helpers/Log/ApiRegisterDataLog.php</a> (line <span class="field">26</span>)
</p>
<pre>Zend_Controller_Action_Helper_Abstract
|
--<a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html">App_Controller_Action_Helper_Log</a>
|
--Helper_Api_Log_ApiRegisterDataLog</pre>
</div>
</div>
<a name="sec-const-summary"></a>
<div class="info-box">
<div class="info-box-title">Class Constant Summary</span></div>
<div class="nav-bar">
<a href="#sec-description">Description</a> |
<span class="disabled">Constants</span> (<a href="#sec-consts">details</a>)
<a href="#sec-vars">Vars</a>
|
|
<a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>)
</div>
<div class="info-box-body">
<div class="const-summary">
<div class="const-title">
<img src="../../media/images/Constant.png" alt=" " />
<a href="#CODE_REGISTER_DATA_FAILURE" title="details" class="const-name">CODE_REGISTER_DATA_FAILURE</a> = <span class="var-type"> 20071</span>
</div>
<div class="const-title">
<img src="../../media/images/Constant.png" alt=" " />
<a href="#CODE_REGISTER_DATA_SUCCESS" title="details" class="const-name">CODE_REGISTER_DATA_SUCCESS</a> = <span class="var-type"> 20070</span>
</div>
<div class="const-title">
<img src="../../media/images/Constant.png" alt=" " />
<a href="#CODE_UPLOAD_FAILURE" title="details" class="const-name">CODE_UPLOAD_FAILURE</a> = <span class="var-type"> 20072</span>
</div>
</div>
</div>
</div>
<a name="sec-method-summary"></a>
<div class="info-box">
<div class="info-box-title">Method Summary</span></div>
<div class="nav-bar">
<a href="#sec-description">Description</a> |
<a href="#sec-const-summary">Constants</a> (<a href="#sec-consts">details</a>)
<a href="#sec-vars">Vars</a>
|
<span class="disabled">Methods</span> (<a href="#sec-methods">details</a>)
</div>
<div class="info-box-body">
<div class="method-summary">
<div class="method-definition">
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-result">string</span>
<a href="#getName" title="details" class="method-name">getName</a>
()
</div>
<div class="method-definition">
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-result">void</span>
<a href="#registerDataFailure" title="details" class="method-name">registerDataFailure</a>
(<span class="var-type">Exception</span> <span class="var-name">$exception</span>)
</div>
<div class="method-definition">
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-result">void</span>
<a href="#registerDataSuccess" title="details" class="method-name">registerDataSuccess</a>
()
</div>
<div class="method-definition">
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-result">void</span>
<a href="#uploadFailure" title="details" class="method-name">uploadFailure</a>
(<span class="var-type"><a href="../../FileTransfer/App_FileTransfer.html">App_FileTransfer</a></span> <span class="var-name">$fileTransfer</span>)
</div>
</div>
</div>
</div>
<a name="sec-vars"></a>
<div class="info-box">
<div class="info-box-title">Variables</div>
<div class="nav-bar">
<a href="#sec-description">Description</a> |
<a href="#sec-var-summary">Vars</a> (<span class="disabled">details</span>)
<a href="#sec-const-summary">Constants</a> (<a href="#sec-consts">details</a>)
|
<a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>)
</div>
<div class="info-box-body">
<h4>Inherited Variables</h4>
<A NAME='inherited_vars'><!-- --></A>
<p>Inherited from <span class="classname"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html">App_Controller_Action_Helper_Log</a></span></p>
<blockquote>
<img src="../../media/images/Variable.png" />
<span class="var-title">
<span class="var-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#var$_messages">App_Controller_Action_Helper_Log::$_messages</a></span><br>
</span>
</blockquote>
</div>
</div>
<a name="sec-methods"></a>
<div class="info-box">
<div class="info-box-title">Methods</div>
<div class="nav-bar">
<a href="#sec-description">Description</a> |
<a href="#sec-vars">Vars</a>
<a href="#sec-const-summary">Constants</a> (<a href="#sec-consts">details</a>)
<a href="#sec-method-summary">Methods</a> (<span class="disabled">details</span>)
</div>
<div class="info-box-body">
<A NAME='method_detail'></A>
<a name="methodgetName" id="getName"><!-- --></a>
<div class="oddrow">
<div class="method-header">
<img src="../../media/images/Method.png" />
<span class="method-title">getName</span> (line <span class="line-number">52</span>)
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ヘルパー名の取得</p>
<ul class="tags">
<li><span class="field">return:</span> ヘルパー名</li>
<li><span class="field">access:</span> public</li>
</ul>
<div class="method-signature">
<span class="method-result">string</span>
<span class="method-name">
getName
</span>
()
</div>
<hr class="separator" />
<div class="notes">Redefinition of:</div>
<dl>
<dt><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodgetName">App_Controller_Action_Helper_Log::getName()</a></dt>
<dd>ヘルパー名の取得</dd>
</dl>
</div>
<a name="methodregisterDataFailure" id="registerDataFailure"><!-- --></a>
<div class="evenrow">
<div class="method-header">
<img src="../../media/images/Method.png" />
<span class="method-title">registerDataFailure</span> (line <span class="line-number">80</span>)
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ログ出力:ユーザデータ登録失敗</p>
<ul class="tags">
<li><span class="field">access:</span> public</li>
</ul>
<div class="method-signature">
<span class="method-result">void</span>
<span class="method-name">
registerDataFailure
</span>
(<span class="var-type">Exception</span> <span class="var-name">$exception</span>)
</div>
<ul class="parameters">
<li>
<span class="var-type">Exception</span>
<span class="var-name">$exception</span><span class="var-description">: 例外</span> </li>
</ul>
</div>
<a name="methodregisterDataSuccess" id="registerDataSuccess"><!-- --></a>
<div class="oddrow">
<div class="method-header">
<img src="../../media/images/Method.png" />
<span class="method-title">registerDataSuccess</span> (line <span class="line-number">64</span>)
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ログ出力:ユーザデータ登録成功</p>
<ul class="tags">
<li><span class="field">access:</span> public</li>
</ul>
<div class="method-signature">
<span class="method-result">void</span>
<span class="method-name">
registerDataSuccess
</span>
()
</div>
</div>
<a name="methoduploadFailure" id="uploadFailure"><!-- --></a>
<div class="evenrow">
<div class="method-header">
<img src="../../media/images/Method.png" />
<span class="method-title">uploadFailure</span> (line <span class="line-number">97</span>)
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ログ出力:アップロード失敗</p>
<ul class="tags">
<li><span class="field">access:</span> public</li>
</ul>
<div class="method-signature">
<span class="method-result">void</span>
<span class="method-name">
uploadFailure
</span>
(<span class="var-type"><a href="../../FileTransfer/App_FileTransfer.html">App_FileTransfer</a></span> <span class="var-name">$fileTransfer</span>)
</div>
<ul class="parameters">
<li>
<span class="var-type"><a href="../../FileTransfer/App_FileTransfer.html">App_FileTransfer</a></span>
<span class="var-name">$fileTransfer</span><span class="var-description">: App_FileTransferインスタンス</span> </li>
</ul>
</div>
<h4>Inherited Methods</h4>
<a name='inherited_methods'><!-- --></a>
<!-- =========== Summary =========== -->
<p>Inherited From <span class="classname"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html">App_Controller_Action_Helper_Log</a></span></p>
<blockquote>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodaccess">App_Controller_Action_Helper_Log::access()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodcritical">App_Controller_Action_Helper_Log::critical()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methoddebug">App_Controller_Action_Helper_Log::debug()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methoddirect">App_Controller_Action_Helper_Log::direct()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methoddump">App_Controller_Action_Helper_Log::dump()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methoderror">App_Controller_Action_Helper_Log::error()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodexceptionMessage">App_Controller_Action_Helper_Log::exceptionMessage()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodgetAccessLogger">App_Controller_Action_Helper_Log::getAccessLogger()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodgetErrorLogger">App_Controller_Action_Helper_Log::getErrorLogger()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodgetName">App_Controller_Action_Helper_Log::getName()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodnotice">App_Controller_Action_Helper_Log::notice()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodvalidateMessage">App_Controller_Action_Helper_Log::validateMessage()</a></span><br>
<img src="../../media/images/Method.png" alt=" "/>
<span class="method-name"><a href="../../Controller/ActionHelper/App_Controller_Action_Helper_Log.html#methodwarning">App_Controller_Action_Helper_Log::warning()</a></span><br>
</blockquote>
</div>
</div>
<a name="sec-consts"></a>
<div class="info-box">
<div class="info-box-title">Class Constants</div>
<div class="nav-bar">
<a href="#sec-description">Description</a> |
<a href="#sec-var-summary">Constants</a> (<span class="disabled">details</span>)
<a href="#sec-vars">Vars</a>
|
<a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>)
</div>
<div class="info-box-body">
<a name="constCODE_REGISTER_DATA_FAILURE" id="CODE_REGISTER_DATA_FAILURE"><!-- --></A>
<div class="oddrow">
<div class="const-header">
<img src="../../media/images/Variable.png" />
<span class="const-title">
<span class="const-name">CODE_REGISTER_DATA_FAILURE</span>
= <span class="const-default"> 20071</span>
(line <span class="line-number">38</span>)
</span>
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ログ番号:ユーザデータ登録失敗</p>
</div>
<a name="constCODE_REGISTER_DATA_SUCCESS" id="CODE_REGISTER_DATA_SUCCESS"><!-- --></A>
<div class="evenrow">
<div class="const-header">
<img src="../../media/images/Variable.png" />
<span class="const-title">
<span class="const-name">CODE_REGISTER_DATA_SUCCESS</span>
= <span class="const-default"> 20070</span>
(line <span class="line-number">33</span>)
</span>
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ログ番号:ユーザデータ登録成功</p>
</div>
<a name="constCODE_UPLOAD_FAILURE" id="CODE_UPLOAD_FAILURE"><!-- --></A>
<div class="oddrow">
<div class="const-header">
<img src="../../media/images/Variable.png" />
<span class="const-title">
<span class="const-name">CODE_UPLOAD_FAILURE</span>
= <span class="const-default"> 20072</span>
(line <span class="line-number">43</span>)
</span>
</div>
<!-- ========== Info from phpDoc block ========= -->
<p class="short-description">ログ番号:アップロード失敗</p>
</div>
</div>
</div>
<p class="notes" id="credit">
Documentation generated on Thu, 27 Mar 2014 17:37:11 +0900 by <a href="http://www.phpdoc.org" target="_blank">phpDocumentor 1.4.4</a>
</p>
</div></body>
</html>
|
apache-2.0
|
vivisect/synapse
|
synapse/tests/test_lib_certdir.py
|
22334
|
import os
from contextlib import contextmanager
from OpenSSL import crypto, SSL
import synapse.common as s_common
from synapse.tests.common import *
import synapse.lib.certdir as s_certdir
class CertDirTest(SynTest):
@contextmanager
def getCertDir(self):
'''
Get a test CertDir object.
Yields:
s_certdir.CertDir: A certdir object based out of a temp directory.
'''
# create a temp folder and make it a cert dir
with self.getTestDir() as dirname:
s_scope.set('testdir', dirname)
cdir = s_certdir.CertDir(path=dirname)
yield cdir
def basic_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(cert)
self.nn(key)
# Make sure the certs were generated with the expected number of bits
self.eq(cert.get_pubkey().bits(), cdir.crypto_numbits)
self.eq(key.bits(), cdir.crypto_numbits)
# Make sure the certs were generated with the correct version number
self.eq(cert.get_version(), 2)
# ensure we can sign / verify data with our keypair
buf = b'The quick brown fox jumps over the lazy dog.'
sig = crypto.sign(key, buf, 'sha256')
sig2 = crypto.sign(key, buf + b'wut', 'sha256')
self.none(crypto.verify(cert, sig, buf, 'sha256'))
self.raises(crypto.Error, crypto.verify, cert, sig2, buf, 'sha256')
# ensure that a ssl context using both cert/key match
sslcontext = SSL.Context(SSL.TLSv1_2_METHOD)
sslcontext.use_certificate(cert)
sslcontext.use_privatekey(key)
self.none(sslcontext.check_privatekey())
if cacert:
# Make sure the cert was signed by the CA
self.eq(cert.get_issuer().der(), cacert.get_subject().der())
store = crypto.X509Store()
ctx = crypto.X509StoreContext(store, cert)
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cert)
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# Generate a separate CA that did not sign the certificate
try:
cdir.genCaCert('otherca')
except DupFileName:
pass
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cdir.getCaCert('otherca'))
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# OpenSSL should be able to verify the certificate, once its CA is loaded
store.add_cert(cacert)
self.none(ctx.verify_certificate()) # valid
def p12_assertions(self, cdir, cert, key, p12, cacert=None):
'''
test basic p12 certificate bundle assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
p12 (crypto.PKCS12): PKCS12 object to test
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(p12)
# Pull out the CA cert and keypair data
p12_cacert = None
if cacert:
p12_cacert = p12.get_ca_certificates()
self.nn(p12_cacert)
self.len(1, p12_cacert)
p12_cacert = p12_cacert[0]
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cacert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cacert))
p12_cert = p12.get_certificate()
p12_key = p12.get_privatekey()
self.basic_assertions(cdir, p12_cert, p12_key, cacert=p12_cacert)
# Make sure that the CA cert and keypair files are the same as the CA cert and keypair contained in the p12 file
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cert))
self.eq(crypto.dump_privatekey(crypto.FILETYPE_ASN1, key), crypto.dump_privatekey(crypto.FILETYPE_ASN1, p12_key))
def user_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'client')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.notin(b'subjectAltName', exts)
def host_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'server')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature,keyEncipherment')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'serverAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.isin(b'subjectAltName', exts)
def test_certdir_cas(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
inter_name = 'testsyn-intermed'
base = cdir._getPathJoin()
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getCaCert(caname))
self.none(cdir.getCaKey(caname))
self.false(cdir.isCaCert(caname))
self.none(cdir.getCaCertPath(caname))
self.none(cdir.getCaKeyPath(caname))
# Generate a self-signed CA =======================================
cdir.genCaCert(caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getCaCert(caname), crypto.X509)
self.isinstance(cdir.getCaKey(caname), crypto.PKey)
self.true(cdir.isCaCert(caname))
self.eq(cdir.getCaCertPath(caname), base + '/cas/' + caname + '.crt')
self.eq(cdir.getCaKeyPath(caname), base + '/cas/' + caname + '.key')
# Run basic assertions on the CA keypair
cacert = cdir.getCaCert(caname)
cakey = cdir.getCaKey(caname)
self.basic_assertions(cdir, cacert, cakey)
# Generate intermediate CA ========================================
cdir.genCaCert(inter_name, signas=caname)
# Run basic assertions, make sure that it was signed by the root CA
inter_cacert = cdir.getCaCert(inter_name)
inter_cakey = cdir.getCaKey(inter_name)
self.basic_assertions(cdir, inter_cacert, inter_cakey, cacert=cacert)
def test_certdir_hosts(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
hostname_unsigned = 'unsigned.vertex.link'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getHostCert(hostname_unsigned))
self.none(cdir.getHostKey(hostname_unsigned))
self.false(cdir.isHostCert(hostname_unsigned))
self.none(cdir.getHostCertPath(hostname_unsigned))
self.none(cdir.getHostKeyPath(hostname_unsigned))
self.none(cdir.getHostCaPath(hostname_unsigned))
# Generate a self-signed host keypair =============================
cdir.genHostCert(hostname_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname_unsigned), crypto.X509)
self.isinstance(cdir.getHostKey(hostname_unsigned), crypto.PKey)
self.true(cdir.isHostCert(hostname_unsigned))
self.eq(cdir.getHostCertPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.crt')
self.eq(cdir.getHostKeyPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.key')
self.none(cdir.getHostCaPath(hostname_unsigned)) # the cert is self-signed, so there is no ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname_unsigned)
key = cdir.getHostKey(hostname_unsigned)
self.basic_assertions(cdir, cert, key)
self.host_assertions(cdir, cert, key)
# Generate a signed host keypair ==================================
cdir.genHostCert(hostname, signas=caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname), crypto.X509)
self.isinstance(cdir.getHostKey(hostname), crypto.PKey)
self.true(cdir.isHostCert(hostname))
self.eq(cdir.getHostCertPath(hostname), base + '/hosts/' + hostname + '.crt')
self.eq(cdir.getHostKeyPath(hostname), base + '/hosts/' + hostname + '.key')
self.eq(cdir.getHostCaPath(hostname), base + '/cas/' + caname + '.crt') # the cert is signed, so there is a ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.host_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = '[email protected]'
username_unsigned = '[email protected]'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getUserCert(username_unsigned))
self.none(cdir.getUserKey(username_unsigned))
self.none(cdir.getClientCert(username_unsigned))
self.false(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.none(cdir.getUserCertPath('nope'))
self.none(cdir.getUserKeyPath('nope'))
self.none(cdir.getUserCaPath('nope'))
self.none(cdir.getUserForHost('nope', 'host.vertex.link'))
# Generate a self-signed user keypair =============================
cdir.genUserCert(username_unsigned)
self.raises(NoSuchFile, cdir.genClientCert, username_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username_unsigned), crypto.X509)
self.isinstance(cdir.getUserKey(username_unsigned), crypto.PKey)
self.none(cdir.getClientCert(username_unsigned))
self.true(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.eq(cdir.getUserCertPath(username_unsigned), base + '/users/' + username_unsigned + '.crt')
self.eq(cdir.getUserKeyPath(username_unsigned), base + '/users/' + username_unsigned + '.key')
self.none(cdir.getUserCaPath(username_unsigned)) # no CA
self.eq(cdir.getUserForHost('unsigned', 'host.vertex.link'), username_unsigned)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username_unsigned)
key = cdir.getUserKey(username_unsigned)
self.basic_assertions(cdir, cert, key)
self.user_assertions(cdir, cert, key)
# Generate a signed user keypair ==================================
cdir.genUserCert(username, signas=caname)
cdir.genClientCert(username)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username), crypto.X509)
self.isinstance(cdir.getUserKey(username), crypto.PKey)
self.isinstance(cdir.getClientCert(username), crypto.PKCS12)
self.true(cdir.isUserCert(username))
self.true(cdir.isClientCert(username))
self.eq(cdir.getUserCertPath(username), base + '/users/' + username + '.crt')
self.eq(cdir.getUserKeyPath(username), base + '/users/' + username + '.key')
self.eq(cdir.getUserCaPath(username), base + '/cas/' + caname + '.crt')
self.eq(cdir.getUserForHost('visi', 'host.vertex.link'), username)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
p12 = cdir.getClientCert(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.user_assertions(cdir, cert, key, cacert=cacert)
self.p12_assertions(cdir, cert, key, p12, cacert=cacert)
# Test missing files for generating a client cert
os.remove(base + '/users/' + username + '.key')
self.raises(NoSuchFile, cdir.genClientCert, username) # user key
os.remove(base + '/cas/' + caname + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # ca crt
os.remove(base + '/users/' + username + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # user crt
def test_certdir_hosts_sans(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
cdir.genCaCert(caname)
# Host cert with multiple SANs ====================================
hostname = 'visi.vertex.link'
sans = 'DNS:vertex.link,DNS:visi.vertex.link,DNS:vertex.link'
cdir.genHostCert(hostname, signas=caname, sans=sans)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x1f\x82\x0bvertex.link\x82\x10visi.vertex.link') # ASN.1 encoded subjectAltName data
# Host cert with no specified SANs ================================
hostname = 'visi2.vertex.link'
cdir.genHostCert(hostname, signas=caname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi2.vertex.link') # ASN.1 encoded subjectAltName data
# Self-signed Host cert with no specified SANs ====================
hostname = 'visi3.vertex.link'
cdir.genHostCert(hostname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi3.vertex.link') # ASN.1 encoded subjectAltName data
def test_certdir_hosts_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
# Generate CA cert and host CSR
cdir.genCaCert(caname)
cdir.genHostCsr(hostname)
path = cdir._getPathJoin('hosts', hostname + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signHostCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = '[email protected]'
# Generate CA cert and user CSR
cdir.genCaCert(caname)
cdir.genUserCsr(username)
path = cdir._getPathJoin('users', username + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signUserCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_importfile(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
with self.getTestDir() as testpath:
# File doesn't exist
fpath = s_common.genpath(testpath, 'not_real.crt')
self.raises(NoSuchFile, cdir.importFile, fpath, 'cas')
# File has unsupported extension
fpath = s_common.genpath(testpath, 'coolpic.bmp')
with s_common.genfile(fpath) as fd:
self.raises(BadFileExt, cdir.importFile, fpath, 'cas')
tests = (
('cas', 'coolca.crt'),
('cas', 'coolca.key'),
('hosts', 'coolhost.crt'),
('hosts', 'coolhost.key'),
('users', 'cooluser.crt'),
('users', 'cooluser.key'),
('users', 'cooluser.p12'),
)
data = b'arbitrary data'
for ftype, fname in tests:
srcpath = s_common.genpath(testpath, fname)
dstpath = s_common.genpath(cdir.path, ftype, fname)
with s_common.genfile(srcpath) as fd:
fd.write(b'arbitrary data')
fd.seek(0)
# Make sure the file is not there
self.raises(NoSuchFile, s_common.reqfile, dstpath)
# Import it and make sure it exists
self.none(cdir.importFile(srcpath, ftype))
with s_common.reqfile(dstpath) as dstfd:
self.eq(dstfd.read(), b'arbitrary data')
# Make sure it can't be overwritten
self.raises(FileExists, cdir.importFile, srcpath, ftype)
def test_certdir_valUserCert(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
base = cdir._getPathJoin()
cdir.genCaCert('syntest')
cdir.genCaCert('newp')
cacerts = cdir.getCaCerts()
syntestca = cdir.getCaCert('syntest')
newpca = cdir.getCaCert('newp')
self.raises(crypto.Error, cdir.valUserCert, b'')
cdir.genUserCert('cool')
path = cdir.getUserCertPath('cool')
byts = cdir._getPathBytes(path)
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts)
cdir.genUserCert('cooler', signas='syntest')
path = cdir.getUserCertPath('cooler')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(syntestca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(newpca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
cdir.genUserCert('coolest', signas='newp')
path = cdir.getUserCertPath('coolest')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(newpca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(syntestca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
|
apache-2.0
|
TomNong/Project2-Intel-Edison
|
documentation/javadoc/com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html
|
22084
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_80) on Thu Oct 15 19:27:02 UTC 2015 -->
<title>ListStreamingDistributionsResult (AWS SDK for Java - 1.10.27)</title>
<meta name="date" content="2015-10-15">
<link rel="stylesheet" type="text/css" href="../../../../../JavaDoc.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="ListStreamingDistributionsResult (AWS SDK for Java - 1.10.27)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><em>
<!-- Scripts for Syntax Highlighter START-->
<script id="syntaxhighlight_script_core" type="text/javascript" src = "http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/resources/syntaxhighlight/scripts/shCore.js">
</script>
<script id="syntaxhighlight_script_java" type="text/javascript" src = "http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/resources/syntaxhighlight/scripts/shBrushJava.js">
</script>
<link id="syntaxhighlight_css_core" rel="stylesheet" type="text/css" href = "http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/resources/syntaxhighlight/styles/shCoreDefault.css"/>
<link id="syntaxhighlight_css_theme" rel="stylesheet" type="text/css" href = "http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/resources/syntaxhighlight/styles/shThemeDefault.css"/>
<!-- Scripts for Syntax Highlighter END-->
<div>
<!-- BEGIN-SECTION -->
<div id="divsearch" style="float:left;">
<span id="lblsearch" for="searchQuery">
<label>Search</label>
</span>
<form id="nav-search-form" target="_parent" method="get" action="http://docs.aws.amazon.com/search/doc-search.html#facet_doc_guide=API+Reference&facet_doc_product=AWS+SDK+for+Java">
<div id="nav-searchfield-outer" class="nav-sprite">
<div class="nav-searchfield-inner nav-sprite">
<div id="nav-searchfield-width">
<input id="nav-searchfield" name="searchQuery">
</div>
</div>
</div>
<div id="nav-search-button" class="nav-sprite">
<input alt="" id="nav-search-button-inner" type="image">
</div>
<input name="searchPath" type="hidden" value="documentation-guide" />
<input name="this_doc_product" type="hidden" value="AWS SDK for Java" />
<input name="this_doc_guide" type="hidden" value="API Reference" />
<input name="doc_locale" type="hidden" value="en_us" />
</form>
</div>
<!-- END-SECTION -->
<!-- BEGIN-FEEDBACK-SECTION -->
<div id="feedback-section">
<h3>Did this page help you?</h3>
<div id="feedback-link-sectioin">
<a id="feedback_yes" target="_blank" style="display:inline;">Yes</a>
<a id="feedback_no" target="_blank" style="display:inline;">No</a>
<a id="go_cti" target="_blank" style="display:inline;">Tell us about it...</a>
</div>
</div>
<script type="text/javascript">
window.onload = function(){
/* Dynamically add feedback links */
var javadoc_root_name = "/javadoc/";
var javadoc_path = location.href.substring(0, location.href.lastIndexOf(javadoc_root_name) + javadoc_root_name.length);
var file_path = location.href.substring(location.href.lastIndexOf(javadoc_root_name) + javadoc_root_name.length);
var feedback_yes_url = javadoc_path + "javadoc-resources/feedbackyes.html?topic_id=";
var feedback_no_url = javadoc_path + "javadoc-resources/feedbackno.html?topic_id=";
var feedback_tellmore_url = "https://aws-portal.amazon.com/gp/aws/html-forms-controller/documentation/aws_doc_feedback_04?service_name=Java-Ref&file_name=";
if(file_path != "overview-frame.html") {
var file_name = file_path.replace(/[/.]/g, '_');
document.getElementById("feedback_yes").setAttribute("href", feedback_yes_url + file_name);
document.getElementById("feedback_no").setAttribute("href", feedback_no_url + file_name);
document.getElementById("go_cti").setAttribute("href", feedback_tellmore_url + file_name);
} else {
// hide the search box and the feeback links in overview-frame page,
// show "AWS SDK for Java" instead.
document.getElementById("feedback-section").outerHTML = "AWS SDK for Java";
document.getElementById("divsearch").outerHTML = "";
}
};
</script>
<!-- END-FEEDBACK-SECTION -->
</div>
</em></div>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsRequest.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/LoggingConfig.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html" target="_top">Frames</a></li>
<li><a href="ListStreamingDistributionsResult.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">com.amazonaws.services.cloudfront_2012_03_15.model</div>
<h2 title="Class ListStreamingDistributionsResult" class="title">Class ListStreamingDistributionsResult</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>com.amazonaws.services.cloudfront_2012_03_15.model.ListStreamingDistributionsResult</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public class <span class="strong">ListStreamingDistributionsResult</span>
extends java.lang.Object</pre>
<div class="block"><p>
The returned result of the corresponding request.
</p></div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#ListStreamingDistributionsResult()">ListStreamingDistributionsResult</a></strong>()</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#equals(java.lang.Object)">equals</a></strong>(java.lang.Object obj)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/StreamingDistributionList.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">StreamingDistributionList</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#getStreamingDistributionList()">getStreamingDistributionList</a></strong>()</code>
<div class="block">The StreamingDistributionList type.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>int</code></td>
<td class="colLast"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#hashCode()">hashCode</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#setStreamingDistributionList(com.amazonaws.services.cloudfront_2012_03_15.model.StreamingDistributionList)">setStreamingDistributionList</a></strong>(<a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/StreamingDistributionList.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">StreamingDistributionList</a> streamingDistributionList)</code>
<div class="block">The StreamingDistributionList type.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#toString()">toString</a></strong>()</code>
<div class="block">Returns a string representation of this object; useful for testing and
debugging.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">ListStreamingDistributionsResult</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html#withStreamingDistributionList(com.amazonaws.services.cloudfront_2012_03_15.model.StreamingDistributionList)">withStreamingDistributionList</a></strong>(<a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/StreamingDistributionList.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">StreamingDistributionList</a> streamingDistributionList)</code>
<div class="block">The StreamingDistributionList type.</div>
</td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>getClass, notify, notifyAll, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="ListStreamingDistributionsResult()">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>ListStreamingDistributionsResult</h4>
<pre>public ListStreamingDistributionsResult()</pre>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="getStreamingDistributionList()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getStreamingDistributionList</h4>
<pre>public <a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/StreamingDistributionList.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">StreamingDistributionList</a> getStreamingDistributionList()</pre>
<div class="block">The StreamingDistributionList type.</div>
<dl><dt><span class="strong">Returns:</span></dt><dd>The StreamingDistributionList type.</dd></dl>
</li>
</ul>
<a name="setStreamingDistributionList(com.amazonaws.services.cloudfront_2012_03_15.model.StreamingDistributionList)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setStreamingDistributionList</h4>
<pre>public void setStreamingDistributionList(<a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/StreamingDistributionList.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">StreamingDistributionList</a> streamingDistributionList)</pre>
<div class="block">The StreamingDistributionList type.</div>
<dl><dt><span class="strong">Parameters:</span></dt><dd><code>streamingDistributionList</code> - The StreamingDistributionList type.</dd></dl>
</li>
</ul>
<a name="withStreamingDistributionList(com.amazonaws.services.cloudfront_2012_03_15.model.StreamingDistributionList)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>withStreamingDistributionList</h4>
<pre>public <a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">ListStreamingDistributionsResult</a> withStreamingDistributionList(<a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/StreamingDistributionList.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model">StreamingDistributionList</a> streamingDistributionList)</pre>
<div class="block">The StreamingDistributionList type.
<p>
Returns a reference to this object so that method calls can be chained together.</div>
<dl><dt><span class="strong">Parameters:</span></dt><dd><code>streamingDistributionList</code> - The StreamingDistributionList type.</dd>
<dt><span class="strong">Returns:</span></dt><dd>A reference to this updated object so that method calls can be chained
together.</dd></dl>
</li>
</ul>
<a name="toString()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>toString</h4>
<pre>public java.lang.String toString()</pre>
<div class="block">Returns a string representation of this object; useful for testing and
debugging.</div>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>toString</code> in class <code>java.lang.Object</code></dd>
<dt><span class="strong">Returns:</span></dt><dd>A string representation of this object.</dd><dt><span class="strong">See Also:</span></dt><dd><code>Object.toString()</code></dd></dl>
</li>
</ul>
<a name="hashCode()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>hashCode</h4>
<pre>public int hashCode()</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>hashCode</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="equals(java.lang.Object)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>equals</h4>
<pre>public boolean equals(java.lang.Object obj)</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>equals</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><em>
<div>
<!-- Script for Syntax Highlighter START -->
<script type="text/javascript">
SyntaxHighlighter.all()
</script>
<!-- Script for Syntax Highlighter END -->
</div>
<script src="http://a0.awsstatic.com/chrome/js/1.0.46/jquery.1.9.js" type="text/javascript"></script>
<script>jQuery.noConflict();</script>
<script>
jQuery(function ($) {
$("div.header").prepend('<!--REGION_DISCLAIMER_DO_NOT_REMOVE-->');
});
</script>
<!-- BEGIN-URCHIN-TRACKER -->
<script src="http://l0.awsstatic.com/js/urchin.js" type="text/javascript"></script>
<script type="text/javascript">urchinTracker();</script>
<!-- END-URCHIN-TRACKER -->
<!-- SiteCatalyst code version: H.25.2. Copyright 1996-2012 Adobe, Inc. All Rights Reserved.
More info available at http://www.omniture.com -->
<script language="JavaScript" type="text/javascript" src="https://media.amazonwebservices.com/js/sitecatalyst/s_code.min.js (view-source:https://media.amazonwebservices.com/js/sitecatalyst/s_code.min.js)" />
<script language="JavaScript" type="text/javascript">
<!--
// Documentation Service Name
s.prop66='AWS SDK for Java';
s.eVar66='D=c66';
// Documentation Guide Name
s.prop65='API Reference';
s.eVar65='D=c65';
var s_code=s.t();if(s_code)document.write(s_code)
//-->
</script>
<script language="JavaScript" type="text/javascript">
<!--if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-')
//-->
</script>
<noscript>
<img src="http://amazonwebservices.d2.sc.omtrdc.net/b/ss/awsamazondev/1/H.25.2--NS/0" height="1" width="1" border="0" alt="" />
</noscript>
<!--/DO NOT REMOVE/-->
<!-- End SiteCatalyst code version: H.25.2. -->
</em></div>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsRequest.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../../com/amazonaws/services/cloudfront_2012_03_15/model/LoggingConfig.html" title="class in com.amazonaws.services.cloudfront_2012_03_15.model"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?com/amazonaws/services/cloudfront_2012_03_15/model/ListStreamingDistributionsResult.html" target="_top">Frames</a></li>
<li><a href="ListStreamingDistributionsResult.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>
Copyright © 2013 Amazon Web Services, Inc. All Rights Reserved.
</small></p>
</body>
</html>
|
apache-2.0
|
openpreserve/plato
|
plato/src/main/java/eu/scape_project/planning/plato/wfview/full/DefineBasisView.java
|
5051
|
/*******************************************************************************
* Copyright 2006 - 2012 Vienna University of Technology,
* Department of Software Technology and Interactive Systems, IFS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package eu.scape_project.planning.plato.wfview.full;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.context.ConversationScoped;
import javax.inject.Inject;
import javax.inject.Named;
import eu.scape_project.planning.model.Plan;
import eu.scape_project.planning.model.PlanState;
import eu.scape_project.planning.model.PolicyNode;
import eu.scape_project.planning.model.policy.PreservationCase;
import eu.scape_project.planning.plato.bean.TreeHelperBean;
import eu.scape_project.planning.plato.wf.AbstractWorkflowStep;
import eu.scape_project.planning.plato.wf.DefineBasis;
import eu.scape_project.planning.plato.wfview.AbstractView;
import eu.scape_project.planning.policies.OrganisationalPolicies;
/**
* Bean for the viewWorkflow step 'Define Basis'.
*/
@Named("defineBasis")
@ConversationScoped
public class DefineBasisView extends AbstractView implements Serializable {
private static final long serialVersionUID = 8237053627553012469L;
@Inject
private DefineBasis defineBasis;
@Inject
private TreeHelperBean treeHelper;
@Inject
private OrganisationalPolicies policies;
private PreservationCase selectedPreservationCase;
private List<PreservationCase> preservationCases;
public DefineBasisView() {
currentPlanState = PlanState.INITIALISED;
name = "Define Basis";
viewUrl = "/plan/definebasis.jsf";
group = "menu.defineRequirements";
}
/**
* Initializes 'Define Basis' viewWorkflow step, at the moment just the
* triggers.
*
* @see AbstractView#init()
*/
public void init(Plan plan) {
super.init(plan);
policies.init();
preservationCases = policies.getPreservationCases();
selectedPreservationCase = policies.getPreservationCase(plan.getProjectBasis().getSelectedPreservationCaseURI());
// expand all nodes of the displayed policy-tree (if existent)
treeHelper.expandAll(plan.getProjectBasis().getPolicyTree().getRoot());
}
@Override
protected AbstractWorkflowStep getWfStep() {
return defineBasis;
}
/**
* Method responsible for returning the policy-tree appropriate for
* displaying with rich:treeModelRecursiveAdaptor. (This richfaces component
* requires a list of nodes to be returned.)
*
* @return Policy-tree in list representation (for use in
* rich:treeModelRecursiveAdaptor).
*/
public List<PolicyNode> getPolicyRoot() {
List<PolicyNode> l = new ArrayList<PolicyNode>();
if (plan.getProjectBasis().getPolicyTree() != null) {
l.add(plan.getProjectBasis().getPolicyTree().getRoot());
}
return l;
}
// ---------- getter/setter ----------
public OrganisationalPolicies getPolicies() {
return policies;
}
public void setPolicies(OrganisationalPolicies policies) {
this.policies = policies;
}
public String getSelectedPreservationCaseName(){
if (selectedPreservationCase == null) {
return null;
} else {
return selectedPreservationCase.getName();
}
}
public void setSelectedPreservationCaseName(String name) {
selectedPreservationCase = null;
for (PreservationCase preservationCase : preservationCases) {
if (preservationCase.getName().equals(name)) {
selectedPreservationCase = preservationCase;
}
}
}
public void useSelectedPreservationCase(){
if (selectedPreservationCase != null) {
plan.getProjectBasis().applyPreservationCase(selectedPreservationCase);
}
}
public TreeHelperBean getTreeHelper() {
return treeHelper;
}
public List<PreservationCase> getPreservationCases() {
return preservationCases;
}
public PreservationCase getSelectedPreservationCase() {
return selectedPreservationCase;
}
}
|
apache-2.0
|
erisata/axb_config
|
README.md
|
1697
|
# AxB Config #
This application allows to maintain configuration parameters
by having a baseline configuration in the source code and
allowing to overridide separate parameters. The configuration
values are taken from various sources:
* `default` -- are the values, that are common for all the
instances of the configuration. E.g. port=22 for all the
ssh connections by default.
* `static` -- are the values provided by the developer and
hardcoded into the source code (not including the sensitive
data). E.g. host="some.org". These are provided in order
to minimize the ammount of fields an administrator must
provide. It can be considered as a baseline configuration.
* `system` -- are the values read from the sys.config of the
release. It can be considered as a release-specific
static/default configuration provided by the packager.
* `environment` -- are the values read for the application
environment (file system). In most cases it will be a
file in the `/etc` directory. This file is edited by The
administrator.
* `runtime` -- are provided by the administrator at runtime,
to override the configuration for a short time.
The application components can retrieve the actual configuration.
The actual configuration is derived by merging all the sources
by priority as listed above (runtime values have highest priority).
## Modules ##
<table width="100%" border="0" summary="list of modules">
<tr><td><a href="http://github.com/erisata/axb_config/blob/master/doc/axb_config.md" class="module">axb_config</a></td></tr>
<tr><td><a href="http://github.com/erisata/axb_config/blob/master/doc/axb_config_console.md" class="module">axb_config_console</a></td></tr></table>
|
apache-2.0
|
mdoering/backbone
|
life/Plantae/Magnoliophyta/Magnoliopsida/Solanales/Convolvulaceae/Ipomoea/Ipomoea albidiflora/README.md
|
176
|
# Ipomoea albidiflora Matuda SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
mdoering/backbone
|
life/Plantae/Magnoliophyta/Liliopsida/Poales/Poaceae/Melinis/Melinis repens/ Syn. Erianthus repens/README.md
|
191
|
# Erianthus repens (Willd.) P.Beauv. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
mdoering/backbone
|
life/Plantae/Magnoliophyta/Liliopsida/Poales/Poaceae/Aegilops/Aegilops sharonensis/ Syn. Aegilops longissima aristata/README.md
|
195
|
# Aegilops longissima subsp. aristata SUBSPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.