repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
jupeter/zf1 | library/Zend/Config/Writer.php | 2431 | <?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Config
* @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id$
*/
/**
* @category Zend
* @package Zend_Config
* @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
abstract class Zend_Config_Writer
{
/**
* Option keys to skip when calling setOptions()
*
* @var array
*/
protected $_skipOptions = array(
'options'
);
/**
* Config object to write
*
* @var Zend_Config
*/
protected $_config = null;
/**
* Create a new adapter
*
* $options can only be passed as array or be omitted
*
* @param null|array $options
*/
public function __construct(array $options = null)
{
if (is_array($options)) {
$this->setOptions($options);
}
}
/**
* Set options via a Zend_Config instance
*
* @param Zend_Config $config
* @return Zend_Config_Writer
*/
public function setConfig(Zend_Config $config)
{
$this->_config = $config;
return $this;
}
/**
* Set options via an array
*
* @param array $options
* @return Zend_Config_Writer
*/
public function setOptions(array $options)
{
foreach ($options as $key => $value) {
if (in_array(strtolower($key), $this->_skipOptions)) {
continue;
}
$method = 'set' . ucfirst($key);
if (method_exists($this, $method)) {
$this->$method($value);
}
}
return $this;
}
/**
* Write a Zend_Config object to it's target
*
* @return void
*/
abstract public function write();
}
| bsd-3-clause |
darkrsw/safe | tests/browser_extensions/js1_7/extensions/XXXregress-392308.js | 2774 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is JavaScript Engine testing utilities.
*
* The Initial Developer of the Original Code is
* Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s): Norris Boyd
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//-----------------------------------------------------------------------------
var BUGNUMBER = 392308;
var summary = 'StopIteration should be catchable';
var actual = '';
var expect = '';
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
function testStop() {
function yielder() {
actual += 'before, ';
yield;
actual += 'after, ';
}
expect = 'before, after, iteration terminated normally';
try {
var gen = yielder();
result = gen.next();
gen.send(result);
} catch (x if x instanceof StopIteration) {
actual += 'iteration terminated normally';
} catch (x2) {
actual += 'unexpected throw: ' + x2;
}
}
testStop();
reportCompare(expect, actual, summary);
exitFunc ('test');
}
| bsd-3-clause |
js0701/chromium-crosswalk | third_party/WebKit/Source/platform/LifecycleNotifier.h | 4783 | /*
* Copyright (C) 2008 Apple Inc. All Rights Reserved.
* Copyright (C) 2013 Google Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LifecycleNotifier_h
#define LifecycleNotifier_h
#include "platform/heap/Handle.h"
#include "wtf/HashSet.h"
#include "wtf/TemporaryChange.h"
namespace blink {
template<typename T, typename Observer>
class LifecycleNotifier : public virtual WillBeGarbageCollectedMixin {
public:
virtual ~LifecycleNotifier();
void addObserver(Observer*);
void removeObserver(Observer*);
// notifyContextDestroyed() should be explicitly dispatched from an
// observed context to notify observers that contextDestroyed().
//
// When contextDestroyed() is called, the observer's lifecycleContext()
// is still valid and safe to use during the notification.
virtual void notifyContextDestroyed();
DEFINE_INLINE_VIRTUAL_TRACE()
{
#if ENABLE(OILPAN)
visitor->trace(m_observers);
#endif
}
bool isIteratingOverObservers() const { return m_iterating != IteratingNone; }
protected:
LifecycleNotifier()
: m_iterating(IteratingNone)
, m_didCallContextDestroyed(false)
{
}
enum IterationType {
IteratingNone,
IteratingOverAll,
};
IterationType m_iterating;
protected:
using ObserverSet = WillBeHeapHashSet<RawPtrWillBeWeakMember<Observer>>;
ObserverSet m_observers;
#if ENABLE(ASSERT)
T* context() { return static_cast<T*>(this); }
#endif
private:
bool m_didCallContextDestroyed;
};
template<typename T, typename Observer>
inline LifecycleNotifier<T, Observer>::~LifecycleNotifier()
{
// FIXME: Enable the following ASSERT. Also see a FIXME in Document::detach().
// ASSERT(!m_observers.size() || m_didCallContextDestroyed);
#if !ENABLE(OILPAN)
TemporaryChange<IterationType> scope(m_iterating, IteratingOverAll);
for (Observer* observer : m_observers) {
observer->clearLifecycleContext();
}
#endif
}
template<typename T, typename Observer>
inline void LifecycleNotifier<T, Observer>::notifyContextDestroyed()
{
// Don't notify contextDestroyed() twice.
if (m_didCallContextDestroyed)
return;
TemporaryChange<IterationType> scope(m_iterating, IteratingOverAll);
Vector<RawPtrWillBeUntracedMember<Observer>> snapshotOfObservers;
copyToVector(m_observers, snapshotOfObservers);
for (Observer* observer : snapshotOfObservers) {
// FIXME: Oilpan: At the moment, it's possible that the Observer is
// destructed during the iteration.
// Once we enable Oilpan by default for Observers *and*
// Observer::contextDestroyed() does not call removeObserver(),
// we can remove the hack by making m_observers
// a HeapHashSet<WeakMember<Observers>>. (i.e., we can just iterate
// m_observers without taking a snapshot).
if (m_observers.contains(observer)) {
ASSERT(observer->lifecycleContext() == context());
observer->contextDestroyed();
}
}
m_didCallContextDestroyed = true;
}
template<typename T, typename Observer>
inline void LifecycleNotifier<T, Observer>::addObserver(Observer* observer)
{
RELEASE_ASSERT(m_iterating != IteratingOverAll);
m_observers.add(observer);
}
template<typename T, typename Observer>
inline void LifecycleNotifier<T, Observer>::removeObserver(Observer* observer)
{
m_observers.remove(observer);
}
} // namespace blink
#endif // LifecycleNotifier_h
| bsd-3-clause |
nwjs/chromium.src | third_party/google-closure-library/closure/goog/dom/savedcaretrange_test.js | 7324 | /**
* @license
* Copyright The Closure Library Authors.
* SPDX-License-Identifier: Apache-2.0
*/
goog.module('goog.dom.SavedCaretRangeTest');
goog.setTestOnly();
const Range = goog.require('goog.dom.Range');
const SavedCaretRange = goog.require('goog.dom.SavedCaretRange');
const dom = goog.require('goog.dom');
const testSuite = goog.require('goog.testing.testSuite');
const testingDom = goog.require('goog.testing.dom');
const userAgent = goog.require('goog.userAgent');
/*
TODO(user): Look into why removeCarets test doesn't pass.
function testRemoveCarets() {
var def = goog.dom.getElement('def');
var jkl = goog.dom.getElement('jkl');
var range = goog.dom.Range.createFromNodes(
def.firstChild, 1, jkl.firstChild, 2);
range.select();
var saved = range.saveUsingCarets();
assertHTMLEquals(
"d<span id="" + saved.startCaretId_ + ""></span>ef", def.innerHTML);
assertHTMLEquals(
"jk<span id="" + saved.endCaretId_ + ""></span>l", jkl.innerHTML);
saved.removeCarets();
assertHTMLEquals("def", def.innerHTML);
assertHTMLEquals("jkl", jkl.innerHTML);
var selection = goog.dom.Range.createFromWindow(window);
assertEquals('Wrong start node', def.firstChild, selection.getStartNode());
assertEquals('Wrong end node', jkl.firstChild, selection.getEndNode());
assertEquals('Wrong start offset', 1, selection.getStartOffset());
assertEquals('Wrong end offset', 2, selection.getEndOffset());
}
*/
/**
* Clear the selection by re-parsing the DOM. Then restore the saved
* selection.
* @param {Node} parent The node containing the current selection.
* @param {dom.SavedRange} saved The saved range.
* @return {dom.AbstractRange} Restored range.
*/
function clearSelectionAndRestoreSaved(parent, saved) {
Range.clearSelection();
assertFalse(Range.hasSelection(window));
const range = saved.restore();
assertTrue(Range.hasSelection(window));
return range;
}
testSuite({
setUp() {
document.body.normalize();
},
/** @bug 1480638 */
testSavedCaretRangeDoesntChangeSelection() {
// NOTE(nicksantos): We cannot detect this bug programatically. The only
// way to detect it is to run this test manually and look at the selection
// when it ends.
const div = dom.getElement('bug1480638');
const range = Range.createFromNodes(div.firstChild, 0, div.lastChild, 1);
range.select();
// Observe visible selection. Then move to next line and see it change.
// If the bug exists, it starts with "foo" selected and ends with
// it not selected.
// debugger;
const saved = range.saveUsingCarets();
},
/**
@suppress {strictMissingProperties} suppression added to enable type
checking
*/
testSavedCaretRange() {
if (userAgent.IE && !userAgent.isDocumentModeOrHigher(8)) {
// testSavedCaretRange fails in IE7 unless the source files are loaded in
// a certain order. Adding goog.require('goog.dom.classes') to dom.js or
// goog.require('goog.array') to savedcaretrange_test.js after the
// goog.require('goog.dom') line fixes the test, but it's better to not
// rely on such hacks without understanding the reason of the failure.
return;
}
const parent = dom.getElement('caretRangeTest');
let def = dom.getElement('def');
let jkl = dom.getElement('jkl');
const range = Range.createFromNodes(def.firstChild, 1, jkl.firstChild, 2);
assertFalse(range.isReversed());
range.select();
const saved = range.saveUsingCarets();
assertHTMLEquals(
'd<span id="' + saved.startCaretId_ + '"></span>ef', def.innerHTML);
assertHTMLEquals(
'jk<span id="' + saved.endCaretId_ + '"></span>l', jkl.innerHTML);
testingDom.assertRangeEquals(
def.childNodes[1], 0, jkl.childNodes[1], 0, saved.toAbstractRange());
def = dom.getElement('def');
jkl = dom.getElement('jkl');
const restoredRange = clearSelectionAndRestoreSaved(parent, saved);
assertFalse(restoredRange.isReversed());
testingDom.assertRangeEquals(def, 1, jkl, 1, restoredRange);
const selection = Range.createFromWindow(window);
assertHTMLEquals('def', def.innerHTML);
assertHTMLEquals('jkl', jkl.innerHTML);
// def and jkl now contain fragmented text nodes.
const endNode = selection.getEndNode();
if (endNode == jkl.childNodes[0]) {
// Webkit (up to Chrome 57) and IE < 9.
testingDom.assertRangeEquals(
def.childNodes[1], 0, jkl.childNodes[0], 2, selection);
} else if (endNode == jkl.childNodes[1]) {
// Opera
testingDom.assertRangeEquals(
def.childNodes[1], 0, jkl.childNodes[1], 0, selection);
} else {
// Gecko, newer Chromes
testingDom.assertRangeEquals(def, 1, jkl, 1, selection);
}
},
testReversedSavedCaretRange() {
const parent = dom.getElement('caretRangeTest');
const def = dom.getElement('def-5');
const jkl = dom.getElement('jkl-5');
const range = Range.createFromNodes(jkl.firstChild, 1, def.firstChild, 2);
assertTrue(range.isReversed());
range.select();
const saved = range.saveUsingCarets();
const restoredRange = clearSelectionAndRestoreSaved(parent, saved);
assertTrue(restoredRange.isReversed());
testingDom.assertRangeEquals(def, 1, jkl, 1, restoredRange);
},
testRemoveContents() {
const def = dom.getElement('def-4');
const jkl = dom.getElement('jkl-4');
// Sanity check.
const container = dom.getElement('removeContentsTest');
assertEquals(7, container.childNodes.length);
assertEquals('def', def.innerHTML);
assertEquals('jkl', jkl.innerHTML);
const range = Range.createFromNodes(def.firstChild, 1, jkl.firstChild, 2);
range.select();
const saved = range.saveUsingCarets();
const restored = saved.restore();
restored.removeContents();
assertEquals(6, container.childNodes.length);
assertEquals('d', def.innerHTML);
assertEquals('l', jkl.innerHTML);
},
testHtmlEqual() {
const parent = dom.getElement('caretRangeTest-2');
const def = dom.getElement('def-2');
const jkl = dom.getElement('jkl-2');
const range = Range.createFromNodes(def.firstChild, 1, jkl.firstChild, 2);
range.select();
const saved = range.saveUsingCarets();
const html1 = parent.innerHTML;
saved.removeCarets();
const saved2 = range.saveUsingCarets();
const html2 = parent.innerHTML;
saved2.removeCarets();
assertNotEquals(
'Same selection with different saved caret range carets ' +
'must have different html.',
html1, html2);
assertTrue(
'Same selection with different saved caret range carets must ' +
'be considered equal by htmlEqual',
SavedCaretRange.htmlEqual(html1, html2));
saved.dispose();
saved2.dispose();
},
testStartCaretIsAtEndOfParent() {
const parent = dom.getElement('caretRangeTest-3');
const def = dom.getElement('def-3');
const jkl = dom.getElement('jkl-3');
let range = Range.createFromNodes(def, 1, jkl, 1);
range.select();
const saved = range.saveUsingCarets();
clearSelectionAndRestoreSaved(parent, saved);
range = Range.createFromWindow();
assertEquals('ghijkl', range.getText().replace(/\s/g, ''));
},
});
| bsd-3-clause |
olegkaliuga/yii | vendor/paypal/work/merchant-sdk-php/samples/MassPay/MassPay.html.php | 3396 | <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>PayPal Merchant SDK - MassPay</title>
<link rel="stylesheet" href="../Common/sdk.css"/>
<script type="text/javascript" src="../Common/sdk.js"></script>
</head>
<body>
<div id="wrapper">
<img src="https://devtools-paypal.com/image/bdg_payments_by_pp_2line.png">
<div id="header">
<h3>MassPay</h3>
<div id="apidetails">MassPay API operation makes a payment to one or
more PayPal account holders.</div>
</div>
<form method="POST" action="MassPay.php">
<div id="request_form">
<div class="params">
<div class="param_name">Receiver Info Code Type</div>
<div class="param_value">
<select name=receiverInfoCode>
<option value=EmailAddress>Email</option>
<option value=UserID>UserID</option>
<option value=PhoneNumber>Phone</option>
</select>
</div>
</div>
<table class="params">
<tr>
<th class="param_name">Mail</th>
<th class="param_name">UserID</th>
<th class="param_name">Phone Number</th>
<th class="param_name">Amount</th>
<th class="param_name">Currency Code</th>
</tr>
<tr>
<td class="param_value"><input type="text" name="mail[]"
value="[email protected]" size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="id[]" value=""
size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="phone[]" value=""
size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="amount[]"
value="3.00" size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="currencyCode[]"
value="USD" size="25" maxlength="260" /></td>
</tr>
<tr>
<td class="param_value"><input type="text" name="mail[]"
value="[email protected]" size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="id[]" value=""
size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="phone[]" value=""
size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="amount[]"
value="3.00" size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="currencyCode[]"
value="USD" size="25" maxlength="260" /></td>
</tr>
<tr>
<td class="param_value"><input type="text" name="mail[]"
value="[email protected]" size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="id[]" value=""
size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="phone[]" value=""
size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="amount[]"
value="3.00" size="25" maxlength="260" /></td>
<td class="param_value"><input type="text" name="currencyCode[]"
value="USD" size="25" maxlength="260" /></td>
</tr>
</table>
<?php
include('../Permissions/Permission.html.php');
?>
<input type="submit" name="MassPayBtn" value="MassPay" /><br />
</div>
<a href="../index.php">Home</a>
</form>
</div>
</body>
</html>
| bsd-3-clause |
youtube/cobalt | third_party/chromium/media/capture/video/android/java/src/org/chromium/media/VideoCapture.java | 11938 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.media;
import android.content.Context;
import android.graphics.ImageFormat;
import android.hardware.display.DisplayManager;
import android.view.Display;
import android.view.Surface;
import org.chromium.base.ContextUtils;
import org.chromium.base.annotations.CalledByNative;
import org.chromium.base.annotations.JNINamespace;
import org.chromium.base.annotations.NativeMethods;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
/**
* Video Capture Device base class, defines a set of methods that native code
* needs to use to configure, start capture, and to be reached by callbacks and
* provides some necessary data type(s) with accessors.
**/
@JNINamespace("media")
public abstract class VideoCapture {
/**
* Common class for storing a framerate range. Values should be multiplied by 1000.
*/
protected static class FramerateRange {
public int min;
public int max;
public FramerateRange(int min, int max) {
this.min = min;
this.max = max;
}
}
// The angle (0, 90, 180, 270) that the image needs to be rotated to show in
// the display's native orientation.
protected int mCameraNativeOrientation;
// In some occasions we need to invert the device rotation readings, see the
// individual implementations.
protected boolean mInvertDeviceOrientationReadings;
protected VideoCaptureFormat mCaptureFormat;
protected final int mId;
// Native callback context variable.
protected final long mNativeVideoCaptureDeviceAndroid;
protected boolean mUseBackgroundThreadForTesting;
VideoCapture(int id, long nativeVideoCaptureDeviceAndroid) {
mId = id;
mNativeVideoCaptureDeviceAndroid = nativeVideoCaptureDeviceAndroid;
}
// Allocate necessary resources for capture.
@CalledByNative
public abstract boolean allocate(
int width, int height, int frameRate, boolean enableFaceDetection);
// Success is indicated by returning true and a callback to
// VideoCaptureJni.get().onStarted(, VideoCapture.this), which may occur synchronously or
// asynchronously. Failure can be indicated by one of the following:
// * Returning false. In this case no callback to VideoCaptureJni.get().onStarted() is made.
// * Returning true, and asynchronously invoking VideoCaptureJni.get().onError. In this case
// also no callback to VideoCaptureJni.get().onStarted() is made.
@CalledByNative
public abstract boolean startCaptureMaybeAsync();
// Blocks until it is guaranteed that no more frames are sent.
@CalledByNative
public abstract boolean stopCaptureAndBlockUntilStopped();
// Replies by calling VideoCaptureJni.get().onGetPhotoCapabilitiesReply(). Will pass |null|
// for parameter |result| to indicate failure.
@CalledByNative
public abstract void getPhotoCapabilitiesAsync(long callbackId);
/**
* @param zoom Zoom level, should be ignored if 0.
* @param focusMode Focus mode following AndroidMeteringMode enum.
* @param focusDistance Desired distance to plane of sharpest focus.
* @param exposureMode Exposure mode following AndroidMeteringMode enum.
* @param pointsOfInterest2D 2D normalized points of interest, marshalled with
* x coordinate first followed by the y coordinate.
* @param hasExposureCompensation Indicates if |exposureCompensation| is set.
* @param exposureCompensation Adjustment to auto exposure. 0 means not adjusted.
* @param exposureTime Duration each pixel is exposed to light (in nanoseconds).
* @param whiteBalanceMode White Balance mode following AndroidMeteringMode enum.
* @param iso Sensitivity to light. 0, which would be invalid, means ignore.
* @param hasRedEyeReduction Indicates if |redEyeReduction| is set.
* @param redEyeReduction Value of red eye reduction for the auto flash setting.
* @param fillLightMode Flash setting, following AndroidFillLightMode enum.
* @param colorTemperature White Balance reference temperature, valid if whiteBalanceMode is
* manual, and its value is larger than 0.
* @param torch Torch setting, true meaning on.
*/
@CalledByNative
public abstract void setPhotoOptions(double zoom, int focusMode, double focusDistance,
int exposureMode, double width, double height, double[] pointsOfInterest2D,
boolean hasExposureCompensation, double exposureCompensation, double exposureTime,
int whiteBalanceMode, double iso, boolean hasRedEyeReduction, boolean redEyeReduction,
int fillLightMode, boolean hasTorch, boolean torch, double colorTemperature);
// Replies by calling VideoCaptureJni.get().onPhotoTaken().
@CalledByNative
public abstract void takePhotoAsync(long callbackId);
@CalledByNative
public abstract void deallocate();
@CalledByNative
public final int queryWidth() {
return mCaptureFormat.mWidth;
}
@CalledByNative
public final int queryHeight() {
return mCaptureFormat.mHeight;
}
@CalledByNative
public final int queryFrameRate() {
return mCaptureFormat.mFramerate;
}
@CalledByNative
public final int getColorspace() {
switch (mCaptureFormat.mPixelFormat) {
case ImageFormat.YV12:
return AndroidImageFormat.YV12;
case ImageFormat.YUV_420_888:
return AndroidImageFormat.YUV_420_888;
case ImageFormat.NV21:
return AndroidImageFormat.NV21;
case ImageFormat.UNKNOWN:
default:
return AndroidImageFormat.UNKNOWN;
}
}
@CalledByNative
public final void setTestMode() {
mUseBackgroundThreadForTesting = true;
}
protected final int getCameraRotation() {
int rotation = mInvertDeviceOrientationReadings ? (360 - getDeviceRotation())
: getDeviceRotation();
return (mCameraNativeOrientation + rotation) % 360;
}
protected final int getDeviceRotation() {
final int orientation;
DisplayManager dm = (DisplayManager) ContextUtils.getApplicationContext().getSystemService(
Context.DISPLAY_SERVICE);
switch (dm.getDisplay(Display.DEFAULT_DISPLAY).getRotation()) {
case Surface.ROTATION_90:
orientation = 90;
break;
case Surface.ROTATION_180:
orientation = 180;
break;
case Surface.ROTATION_270:
orientation = 270;
break;
case Surface.ROTATION_0:
default:
orientation = 0;
break;
}
return orientation;
}
// {@link VideoCaptureJni.get().onPhotoTaken()} needs to be called back if there's any
// problem after {@link takePhotoAsync()} has returned true.
protected void notifyTakePhotoError(long callbackId) {
VideoCaptureJni.get().onPhotoTaken(
mNativeVideoCaptureDeviceAndroid, VideoCapture.this, callbackId, null);
}
/**
* Finds the framerate range matching |targetFramerate|. Tries to find a range with as low of a
* minimum value as possible to allow the camera adjust based on the lighting conditions.
* Assumes that all framerate values are multiplied by 1000.
*
* This code is mostly copied from WebRTC:
* CameraEnumerationAndroid.getClosestSupportedFramerateRange
* in webrtc/api/android/java/src/org/webrtc/CameraEnumerationAndroid.java
*/
protected static FramerateRange getClosestFramerateRange(
final List<FramerateRange> framerateRanges, final int targetFramerate) {
return Collections.min(framerateRanges, new Comparator<FramerateRange>() {
// Threshold and penalty weights if the upper bound is further away than
// |MAX_FPS_DIFF_THRESHOLD| from requested.
private static final int MAX_FPS_DIFF_THRESHOLD = 5000;
private static final int MAX_FPS_LOW_DIFF_WEIGHT = 1;
private static final int MAX_FPS_HIGH_DIFF_WEIGHT = 3;
// Threshold and penalty weights if the lower bound is bigger than |MIN_FPS_THRESHOLD|.
private static final int MIN_FPS_THRESHOLD = 8000;
private static final int MIN_FPS_LOW_VALUE_WEIGHT = 1;
private static final int MIN_FPS_HIGH_VALUE_WEIGHT = 4;
// Use one weight for small |value| less than |threshold|, and another weight above.
private int progressivePenalty(
int value, int threshold, int lowWeight, int highWeight) {
return (value < threshold)
? value * lowWeight
: threshold * lowWeight + (value - threshold) * highWeight;
}
int diff(FramerateRange range) {
final int minFpsError = progressivePenalty(range.min, MIN_FPS_THRESHOLD,
MIN_FPS_LOW_VALUE_WEIGHT, MIN_FPS_HIGH_VALUE_WEIGHT);
final int maxFpsError = progressivePenalty(Math.abs(targetFramerate - range.max),
MAX_FPS_DIFF_THRESHOLD, MAX_FPS_LOW_DIFF_WEIGHT, MAX_FPS_HIGH_DIFF_WEIGHT);
return minFpsError + maxFpsError;
}
@Override
public int compare(FramerateRange range1, FramerateRange range2) {
return diff(range1) - diff(range2);
}
});
}
protected static int[] integerArrayListToArray(ArrayList<Integer> intArrayList) {
int[] intArray = new int[intArrayList.size()];
for (int i = 0; i < intArrayList.size(); i++) {
intArray[i] = intArrayList.get(i).intValue();
}
return intArray;
}
@NativeMethods
interface Natives {
// Method for VideoCapture implementations to call back native code.
void onFrameAvailable(long nativeVideoCaptureDeviceAndroid, VideoCapture caller,
byte[] data, int length, int rotation);
void onI420FrameAvailable(long nativeVideoCaptureDeviceAndroid, VideoCapture caller,
ByteBuffer yBuffer, int yStride, ByteBuffer uBuffer, ByteBuffer vBuffer,
int uvRowStride, int uvPixelStride, int width, int height, int rotation,
long timestamp);
// Method for VideoCapture implementations to signal an asynchronous error.
void onError(long nativeVideoCaptureDeviceAndroid, VideoCapture caller,
int androidVideoCaptureError, String message);
// Method for VideoCapture implementations to signal that a frame was dropped.
void onFrameDropped(long nativeVideoCaptureDeviceAndroid, VideoCapture caller,
int androidVideoCaptureFrameDropReason);
void onGetPhotoCapabilitiesReply(long nativeVideoCaptureDeviceAndroid, VideoCapture caller,
long callbackId, PhotoCapabilities result);
// Callback for calls to takePhoto(). This can indicate both success and
// failure. Failure is indicated by |data| being null.
void onPhotoTaken(long nativeVideoCaptureDeviceAndroid, VideoCapture caller,
long callbackId, byte[] data);
// Method for VideoCapture implementations to report device started event.
void onStarted(long nativeVideoCaptureDeviceAndroid, VideoCapture caller);
void dCheckCurrentlyOnIncomingTaskRunner(
long nativeVideoCaptureDeviceAndroid, VideoCapture caller);
}
}
| bsd-3-clause |
scheib/chromium | tools/perf/page_sets/maps_perf_test/config.js | 289 | // This file was generated by the _js_query_arg_file Skylark rule defined in
// maps/vectortown/performance/script/build_defs.bzl.
var testConfig = "overridePixelRatio=1&title=chrome_smoothness_performancetest_config&nobudget=false&nodraw=false&noprefetch=true&viewport=basic&wait=true";
| bsd-3-clause |
hujiajie/chromium-crosswalk | ui/app_list/search/history.cc | 1669 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/app_list/search/history.h"
#include <stddef.h>
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "ui/app_list/search/history_data.h"
#include "ui/app_list/search/history_data_store.h"
#include "ui/app_list/search/tokenized_string.h"
namespace app_list {
namespace {
// Normalize the given string by joining all its tokens with a space.
std::string NormalizeString(const std::string& utf8) {
TokenizedString tokenized(base::UTF8ToUTF16(utf8));
return base::UTF16ToUTF8(
base::JoinString(tokenized.tokens(), base::ASCIIToUTF16(" ")));
}
} // namespace
History::History(scoped_refptr<HistoryDataStore> store)
: store_(store), data_loaded_(false) {
const size_t kMaxQueryEntries = 1000;
const size_t kMaxSecondaryQueries = 5;
data_.reset(
new HistoryData(store_.get(), kMaxQueryEntries, kMaxSecondaryQueries));
data_->AddObserver(this);
}
History::~History() {
data_->RemoveObserver(this);
}
bool History::IsReady() const {
return data_loaded_;
}
void History::AddLaunchEvent(const std::string& query,
const std::string& result_id) {
DCHECK(IsReady());
data_->Add(NormalizeString(query), result_id);
}
scoped_ptr<KnownResults> History::GetKnownResults(
const std::string& query) const {
DCHECK(IsReady());
return data_->GetKnownResults(NormalizeString(query));
}
void History::OnHistoryDataLoadedFromStore() {
data_loaded_ = true;
}
} // namespace app_list
| bsd-3-clause |
strahanjen/strahanjen.github.io | elasticsearch-master/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java | 9336 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.rollover;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* Request class to swap index under an alias upon satisfying conditions
*/
public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implements IndicesRequest {
public static ObjectParser<RolloverRequest, ParseFieldMatcherSupplier> PARSER =
new ObjectParser<>("conditions", null);
static {
PARSER.declareField((parser, request, parseFieldMatcherSupplier) ->
Condition.PARSER.parse(parser, request.conditions, parseFieldMatcherSupplier),
new ParseField("conditions"), ObjectParser.ValueType.OBJECT);
PARSER.declareField((parser, request, parseFieldMatcherSupplier) ->
request.createIndexRequest.settings(parser.map()),
new ParseField("settings"), ObjectParser.ValueType.OBJECT);
PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> {
for (Map.Entry<String, Object> mappingsEntry : parser.map().entrySet()) {
request.createIndexRequest.mapping(mappingsEntry.getKey(),
(Map<String, Object>) mappingsEntry.getValue());
}
}, new ParseField("mappings"), ObjectParser.ValueType.OBJECT);
PARSER.declareField((parser, request, parseFieldMatcherSupplier) ->
request.createIndexRequest.aliases(parser.map()),
new ParseField("aliases"), ObjectParser.ValueType.OBJECT);
}
private String alias;
private String newIndexName;
private boolean dryRun;
private Set<Condition> conditions = new HashSet<>(2);
private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_");
RolloverRequest() {}
public RolloverRequest(String alias, String newIndexName) {
this.alias = alias;
this.newIndexName = newIndexName;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = createIndexRequest == null ? null : createIndexRequest.validate();
if (alias == null) {
validationException = addValidationError("index alias is missing", validationException);
}
if (createIndexRequest == null) {
validationException = addValidationError("create index request is missing", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
alias = in.readString();
newIndexName = in.readOptionalString();
dryRun = in.readBoolean();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
this.conditions.add(in.readNamedWriteable(Condition.class));
}
createIndexRequest = new CreateIndexRequest();
createIndexRequest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(alias);
out.writeOptionalString(newIndexName);
out.writeBoolean(dryRun);
out.writeVInt(conditions.size());
for (Condition condition : conditions) {
out.writeNamedWriteable(condition);
}
createIndexRequest.writeTo(out);
}
@Override
public String[] indices() {
return new String[] {alias};
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictSingleIndexNoExpandForbidClosed();
}
/**
* Sets the alias to rollover to another index
*/
public void setAlias(String alias) {
this.alias = alias;
}
/**
* Sets the alias to rollover to another index
*/
public void setNewIndexName(String newIndexName) {
this.newIndexName = newIndexName;
}
/**
* Sets if the rollover should not be executed when conditions are met
*/
public void dryRun(boolean dryRun) {
this.dryRun = dryRun;
}
/**
* Adds condition to check if the index is at least <code>age</code> old
*/
public void addMaxIndexAgeCondition(TimeValue age) {
this.conditions.add(new MaxAgeCondition(age));
}
/**
* Adds condition to check if the index has at least <code>numDocs</code>
*/
public void addMaxIndexDocsCondition(long numDocs) {
this.conditions.add(new MaxDocsCondition(numDocs));
}
/**
* Sets rollover index creation request to override index settings when
* the rolled over index has to be created
*/
public void setCreateIndexRequest(CreateIndexRequest createIndexRequest) {
this.createIndexRequest = Objects.requireNonNull(createIndexRequest, "create index request must not be null");;
}
boolean isDryRun() {
return dryRun;
}
Set<Condition> getConditions() {
return conditions;
}
String getAlias() {
return alias;
}
String getNewIndexName() {
return newIndexName;
}
CreateIndexRequest getCreateIndexRequest() {
return createIndexRequest;
}
public void source(BytesReference source) {
XContentType xContentType = XContentFactory.xContentType(source);
if (xContentType != null) {
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) {
PARSER.parse(parser, this, () -> ParseFieldMatcher.EMPTY);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse source for rollover index", e);
}
} else {
throw new ElasticsearchParseException("failed to parse content type for rollover index source");
}
}
/**
* Sets the number of shard copies that should be active for creation of the
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
* wait for one shard copy (the primary) to become active. Set this value to
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
this.createIndexRequest.waitForActiveShards(waitForActiveShards);
}
/**
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
* to get the ActiveShardCount.
*/
public void setWaitForActiveShards(final int waitForActiveShards) {
setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
}
| bsd-3-clause |
BlueHotDog/sails-migrations | test/fixtures/sample_apps/0.9.8/node_modules/sails/test/config/integration/load.test.js | 3853 | var assert = require('assert');
var appHelper = require('./helpers/appHelper');
var path = require('path');
var fs = require('fs');
describe('Configs', function() {
this.timeout(30000);
var appName = 'testApp';
var config;
var sailsserver;
var up = false;
describe('in production env', function () {
before(function(done) {
// build app
appHelper.build(function(err) {
if (err) return done(err);
process.chdir(appName);
// Start sails and pass it command line arguments
require(path.resolve('./../lib')).lift({
// Override memorystore with `{session:{adapter: null}}`
session: {
adapter: null
}
}, function(err, sails) {
if (err) return done(err);
up = true;
config = sails.config;
sailsserver = sails;
done();
});
});
});
after(function() {
sailsserver.lower(function() {
// Not sure why this runs multiple times, but checking "up" makes
// sure we only do chdir once
sailsserver.removeAllListeners();
if (up === true) {
up = false;
process.chdir('../');
appHelper.teardown();
}
});
});
it('should retain legacy `config.adapters` for backwards compat.', function() {
var legacyConfig = config.adapters;
assert(legacyConfig.custom && legacyConfig.custom.module === 'sails-disk');
assert(legacyConfig.sqlite.module === 'sails-sqlite');
assert(legacyConfig.sqlite.host === 'sqliteHOST');
assert(legacyConfig.sqlite.user === 'sqliteUSER');
});
// it('should load connection configs', function() {
// var connectionsConfig = config.connections;
// assert(config.model.adapter === 'sails-disk');
// assert(connectionsConfig.custom && connectionsConfig.custom.module === 'sails-disk');
// assert(connectionsConfig.sqlite.module === 'sails-sqlite');
// assert(connectionsConfig.sqlite.host === 'sqliteHOST');
// assert(connectionsConfig.sqlite.user === 'sqliteUSER');
// });
it('should load application configs', function() {
assert(config.port === 1702);
assert(config.host === 'localhost');
// this should have been overriden by the local conf file
assert(config.appName === 'portal2');
assert(config.environment === 'production');
assert(config.cache.maxAge === 9001);
assert(config.globals._ === false);
});
it('should load the controllers configs', function() {
var conf = config.controllers;
assert(conf.routes.actions === false);
assert(conf.routes.prefix === 'Z');
assert(conf.routes.expectIntegerId === true);
assert(conf.csrf === true);
});
it('should load the io configs', function() {
var conf = config.sockets;
assert(conf.adapter === 'disk');
assert(conf.transports[0] === 'websocket');
assert(conf.origins === '*:1337');
assert(conf.heartbeats === false);
assert(conf['close timeout'] === 10);
assert(conf.authorization === false);
assert(conf['log level'] === 'error');
assert(conf['log colors'] === true);
assert(conf.static === false);
assert(conf.resource === '/all/the/sockets');
});
it('should override configs with locals config', function() {
assert(config.appName === 'portal2');
});
it('should load the log configs', function() {
assert(config.log.level === 'error');
});
it('should load the poly configs', function() {
assert(config.policies['*'] === false);
});
it('should load the routes configs', function() {
assert(typeof config.routes['/'] === 'function');
});
it('should load the session configs', function() {
assert(config.session.secret === '1234567');
assert(config.session.key === 'sails.sid');
});
it('should load the views config', function() {
var conf = config.views;
assert(conf.engine.ext === 'ejs');
assert(conf.blueprints === false);
assert(conf.layout === false);
});
});
}); | mit |
grokcore/dev.lexycross | wordsmithed/src/gevent-socketio/tests/jstests/tests/suite.js | 1679 | testTransport = function(transports)
{
var prefix = "socketio - " + transports + ": ";
connect = function(transports)
{
// Force transport
io.transports = transports;
deepEqual(io.transports, transports, "Force transports");
var options = { 'force new connection': true }
return io.connect('/test', options);
}
asyncTest(prefix + "Connect", function() {
expect(4);
test = connect(transports);
test.on('connect', function () {
ok( true, "Connected with transport: " + test.socket.transport.name );
test.disconnect();
});
test.on('disconnect', function (reason) {
ok( true, "Disconnected - " + reason );
test.socket.disconnect();
start();
});
test.on('connect_failed', function () {
ok( false, "Connection failed");
start();
});
});
asyncTest(prefix + "Emit with ack", function() {
expect(3);
test = connect(transports);
test.emit('requestack', 1, function (val1, val2) {
equal(val1, 1);
equal(val2, "ack");
test.disconnect();
test.socket.disconnect();
start();
});
});
asyncTest(prefix + "Emit with ack one return value", function() {
expect(2);
test = connect(transports);
test.emit('requestackonevalue', 1, function (val1) {
equal(val1, 1);
test.disconnect();
test.socket.disconnect();
start();
});
});
}
transports = [io.transports];
// Validate individual transports
for(t in io.transports)
{
if(io.Transport[io.transports[t]].check()) {
transports.push([io.transports[t]]);
}
}
for(t in transports)
{
testTransport(transports[t])
}
| mit |
motherjones/congress-vote-tracker | syria/dev/mojo_site/mobile_files/theme.css | 26889 | /*
* Theme: Narcissus
*/
/* Global Reset */
#dsq-content ul,
#dsq-content li,
#dsq-content ol,
#dsq-content cite,
#dsq-content div,
#dsq-content table,
#dsq-content td,
#dsq-content th,
#dsq-content p,
#dsq-content a,
#dsq-content img,
#dsq-content caption {
border: 0;
padding: 0;
margin: 0;
float: none;
text-indent: 0;
background: none;
font-style: normal;
text-align: left;
}
a:focus {outline: 0;}
#dsq-content h3 {
float: none;
}
#dsq-content ul,
#dsq-content li,
#dsq-content ol,
#dsq-content cite,
#dsq-content a,
#dsq-content img,
#dsq-content select {
display: inline;
width: auto;
max-width: none;
min-width: none;
}
#dsq-content div,
#dsq-content p,
#dsq-content h3 {
display: block;
}
#dsq-content input {
background: transparent;
}
#dsq-content img {
max-width: none;
}
#dsq-content li,
#dsq-content ul,
#dsq-content ol {
list-style-type: none;
list-style-image: none;
background: none;
display: block;
}
#dsq-content li:before {
content: "";
}
#dsq-content table {
border-collapse: separate;
border-spacing: 0;
}
/* Shared */
#dsq-content {
font-size: 12px;
line-height: 1.5;
}
#dsq-content .dsq-panel {
background: #f5f5f5;
padding: 4px 8px 4px 4px;
border: 1px solid #999;
-webkit-border-radius: 5px;
-webkit-border-top-left-radius: 0;
-moz-border-radius: 0 5px 5px 5px;
border-radius: 0 5px 5px 5px;
color: #333;
z-index: 2;
}
#dsq-content iframe {
display: block;
border: 0;
background: transparent;
width: 100%;
height: 90px;
}
#dsq-content .dsq-remove-message,
#dsq-content .dsq-realtime-alert,
#dsq-content .dsq-missing-permissions p {
background-color:#FDF1D0;
border:1px solid #FAD163;
line-height: 1.25;
padding: 5px 8px;
color: #817134;
margin: 10px 0;
}
/* Options */
#dsq-content .dsq-options {
clear: left;
/* background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x bottom;*/
margin-bottom: 1em;
}
#dsq-content .dsq-options span {
margin-right: 10px;
}
/* New Post */
#dsq-content .dsq-dc-logo {
float: right;
position: relative;
}
#dsq-content #dsq-community .dsq-dc-logo {
margin-top: 4px;
}
* html #dsq-content .dsq-dc-logo {
width: 90px; /* IE6 float fix. TODO: Figure out why it's needed */
}
/* Login */
#dsq-content .dsq-login-buttons {
padding: 1em 0 0 0;
margin-bottom: 1em;
}
#dsq-content .dsq-login-button {
float: left;
display: block;
margin: 0 3px 0 0;
background: url('../../../images/themes/narcissus/login-all.png');
/* can't let local styles override padding, or sprites appear off (#899) */
padding: 0 !important;
}
#dsq-content .dsq-login-button a {
display: block;
width:89px;
height:21px;
/* can't let local styles override padding, or sprites appear off (#899) */
padding: 0 !important;
margin: 0;
}
#dsq-content .dsq-login-button-disqus { background-position: 0px; }
#dsq-content .dsq-login-button-facebook { background-position: -89px; }
#dsq-content .dsq-login-button-twitter { background-position: -178px; }
#dsq-content .dsq-login-button-openid { background-position: -267px; }
#dsq-content .dsq-login-button-yahoo { background-position: -356px; }
#dsq-content .dsq-login-button-google { background-position: -445px; }
/* Request User Info */
#dsq-content .dsq-request-user-info {
/* background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x bottom;*/
margin-bottom: 1em;
}
#dsq-content .dsq-request-user-logout {
float: right;
}
#dsq-content .dsq-request-user-info td {
vertical-align: top;
}
#dsq-content .dsq-request-user-info td a {
font-weight: bold;
}
#dsq-content .dsq-request-user-info td small a {
font-weight: normal;
}
#dsq-content .dsq-request-user-name {
width: 100%;
font-size: 1.167em;
padding: .25em 0 0 .5em;
line-height: 1;
vertical-align: top;
border: 0;
}
#dsq-content .dsq-request-user-name small {
font-size: 9px;
}
#dsq-content .dsq-request-user-stats {
padding: 0 0 0 .5em;
}
#dsq-content .dsq-request-user-stats span.dsq-request-user-stat {
margin-right: .75em;
}
#dsq-content .dsq-request-user-stats span.dsq-request-user-stat big {
font-weight: bold;
font-size: 1.167em;
}
#dsq-comments-title {
clear: left;
}
#dsq-content .dsq-request-user-info {
clear: right;
}
#dsq-content .dsq-request-user-avatar {
border: 1px solid #ccc;
padding: 3px !important;
-moz-box-shadow: 0 1px 3px rgba(0,0,0,.15);
-webkit-box-shadow: 0 1px 3px rgba(0,0,0,.15);
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
margin: 0 4px 0 0;
}
/*
* User area in the reply box
*/
#dsq-content .dsq-append-post .dsq-request-user-stats {
display: none;
}
#dsq-content .dsq-append-post .dsq-dc-logo {
display: none;
}
#dsq-content .dsq-append-post img.dsq-request-user-avatar {
width: 18px;
height: 18px;
}
/* Form */
#dsq-content #dsq-form-area {
position: relative;
}
#dsq-content .dsq-textarea-wrapper {
min-height: 90px;
}
#dsq-content .dsq-textarea .dsq-textarea-wrapper,
#dsq-content .dsq-input-wrapper {
border: 1px solid #999999;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
background: #fff url('../../../images/themes/narcissus/inputshadow.gif') no-repeat top left;
padding: 3px 5px;
margin: 3px 0;
}
.dsq-textarea-wrapper.dsq-textarea-loading {
background: url('../../../img/dot-loader.gif') no-repeat 8px 8px,
url('../../../images/themes/narcissus/inputshadow.gif') no-repeat top left !important;
}
.dsq-textarea-wrapper.dsq-textarea-loading iframe {
/* visibility: hidden; */
}
#dsq-content .dsq-post-footer .dsq-input-wrapper {
float: left;
margin: 0;
}
#dsq-content .dsq-textarea .dsq-textarea-wrapper {
padding: 5px;
overflow: hidden;
}
#dsq-content .dsq-textarea textarea,
#dsq-content input {
font-size: 12px;
font-family: Arial, Helvetica, sans-serif;
padding: 0;
}
#dsq-content .dsq-textarea textarea {
border: 0;
width: 100%;
height: 36px;
overflow: auto;
}
#dsq-content .dsq-textarea textarea:focus,
#dsq-content input:focus {
outline: 0;
}
#dsq-content .dsq-iframe-loading {
height: 10px;
padding-left: 5px;
}
#dsq-content .dsq-post-fields table {
max-width:700px;
position:relative;
width:100%;
}
#dsq-content .dsq-post-fields td {
padding:0;
width:50%;
}
#dsq-content .dsq-post-fields .dsq-post-fields-left {
padding-right:0.25em;
}
#dsq-content .dsq-post-fields .dsq-post-fields-right {
padding-left:0.25em;
}
#dsq-content .dsq-post-fields input {
border:0 none;
width:100%;
}
#dsq-content .dsq-post-footer input {
width: 100%;
border: 0;
}
#dsq-content .dsq-trackback-url {
border: 1px solid #ccc;
padding: 4px;
-moz-box-shadow: inset 0 1px 2px rgba(0,0,0,.15);
-webkit-box-shadow: inset 0 1px 2px rgba(0,0,0,.15);
-moz-border-radius: 2px;
-webkit-border-radius: 2px;
border-radius: 2px;
background: #fff;
background: rgba(255,255,255, .85);
}
#dsq-content .dsq-subscribe-menu {
background: transparent url('../../../images/themes/narcissus/gray-15.png');
_background: #D0D0D0;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
}
#dsq-content .dsq-subscribe {
position: relative;
}
#dsq-content .dsq-subscribe a,
#dsq-content .dsq-subscribe-label {
text-decoration: none;
color: inherit;
display: block;
padding: 6px;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
text-indent: -99999px;
}
#dsq-content .dsq-subscribe-label input {
width: auto;
}
#dsq-content .dsq-subscribe a:hover {
background-color: #fafafa;
}
#dsq-content .dsq-subscribe ul {
display: none;
position: absolute;
top: -1px;
left: -1px;
width: 100%;
padding: 0;
background: #eee;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
border-radius: 3px;
}
#dsq-content .dsq-subscribe-email img, #dsq-content .dsq-subscribe-rs img {
width: 12px;
height: 12px;
vertical-align: middle;
}
/* Submit */
#dsq-content .dsq-post-footer {
position: relative;
margin: 8px 0 10px 0;
}
#dsq-content .dsq-sharing-options {
float: left;
margin: 1px 0 0 0;
}
#dsq-content .dsq-sharing-options input,
#dsq-content .dsq-sharing-options label {
display: inline;
padding: 0;
margin: 0;
}
#dsq-content .dsq-sharing-options label span {
text-transform: capitalize;
}
#dsq-content .dsq-sharing-options span.dsq-share-on-label {
display: none;
}
#dsq-content .dsq-sharing-options:hover span.dsq-share-on {
display: none;
}
#dsq-content .dsq-sharing-options span.dsq-share-on input {
margin: 0 4px;
}
#dsq-content .dsq-sharing-options span.dsq-share-on img {
width: 13px;
height: 13px;
margin-bottom: -2px;
}
#dsq-content .dsq-sharing-options:hover span.dsq-share-on-label {
display: inline;
}
#dsq-content .dsq-share-on div {
float: left;
}
/* Sharing icons */
#dsq-content .dsq-sharing-options-label {
vertical-align: middle;
padding: 0 5px 0 0;
}
#dsq-content .dsq-share-on .dsq-share-on-icon {
float: right;
margin: 0 4px;
}
#dsq-content .dsq-panel .dsq-share-on-icon {
float: left;
margin: 0 4px;
}
#dsq-content .dsq-share-on-icon {
width: 13px;
height: 13px;
background: url('../../../images/themes/narcissus/13x13-icons-sprite.png');
}
#dsq-content .dsq-share-on-twitter { background-position: 0px; }
#dsq-content .dsq-share-on-facebook { background-position: -13px; }
#dsq-content .dsq-share-on-yahoo { background-position: -26px; }
#dsq-content .dsq-share-on-openid { background-position: -39px; }
#dsq-content .dsq-share-on-tumblr { background-position: -52px; }
#dsq-content .dsq-share-on-wordpress { background-position: -65px; }
#dsq-content .dsq-share-on-movabletype { background-position: -78px; }
#dsq-content .dsq-share-on-typepad { background-position: -91px; }
#dsq-content .dsq-sharing-options .dsq-configure-options {
font-size: 11px;
}
#dsq-content .dsq-sharing-options span small,
#dsq-content .dsq-subscribe small {
font-size: 7px;
}
#dsq-content .dsq-sharing-options button.dsq-button-small {
background: transparent url('../../../images/themes/narcissus/gray-15.png');
_background: #F7F7F7;
border: 0;
padding: 4px 7px 5px;
text-shadow: 0;
min-width: 150px;
cursor: pointer;
text-align: left;
}
#dsq-content .dsq-sharing-options:hover button.dsq-button-small {
background: #f5f5f5 url('../../../images/themes/narcissus/button-small.gif') repeat-x top;
border: 1px solid #999;
padding: 3px 6px 5px;
-webkit-border-bottom-left-radius: 0;
-webkit-border-bottom-right-radius: 0;
-moz-border-radius: 5px 5px 0 0;
border-radius: 5px 5px 0 0;
position: relative;
z-index: 10;
border-bottom: 0;
color: #333;
}
#dsq-content .dsq-sharing-options {
position: relative;
}
#dsq-content .dsq-sharing-options .dsq-panel {
display: none;
position: absolute;
top: 20px;
left: 0;
width: 233px;
padding: 6px;
z-index: 9;
}
#dsq-content .dsq-sharing-options .dsq-panel div {
line-height: 1.33;
margin: .5em 0 0;
}
#dsq-content .dsq-sharing-options .dsq-panel div img {
width: 13px;
height: 13px;
margin-bottom: -2px;
}
#dsq-content .dsq-sharing-options .dsq-panel input {
margin-right: .25em;
}
#dsq-content .dsq-sharing-options:hover .dsq-panel {
display: block;
}
#dsq-content .dsq-post-footer .dsq-button {
float: right;
margin-left: 3px;
}
#dsq-content .dsq-post-footer .dsq-post-loading {
color: #666;
}
#dsq-content .dsq-post-footer .dsq-post-loading img {
height: 12px;
margin-bottom: -2px;
}
#dsq-content .dsq-placeholder {
color: #888;
}
/* New Reply */
#dsq-content .dsq-comment-footer-right button {
vertical-align: top;
}
#dsq-content .dsq-comment-footer-reply,
#dsq-content .dsq-comment-footer-reply-active {
position: relative;
}
#dsq-content .dsq-comment-footer-reply-active .dsq-comment-footer-reply-tab {
display: inline;
}
#dsq-content .dsq-comment-footer-reply-active .dsq-button-small {
display: none;
}
#dsq-content .dsq-comment-footer-reply-tab {
background: url("../../../images/themes/narcissus/header-grey.png");
background: rgba(0, 0, 0, 0.08);
_background: #E6E6E6;
_color: #333;
border: 0;
display: none;
padding: 2px 4px 9px;
#padding: 2px 4px 9px;
font-size: 11px;
-webkit-border-top-left-radius: 3px;
-webkit-border-top-right-radius: 3px;
-moz-border-radius: 3px 3px 0 0;
border-radius: 3px 3px 0 0;
color: inherit;
margin: 0 0 0 2px;
text-shadow: 0 1px 0 #fff;
font-family: Arial, Helvetica, Calibri, sans-serif;
border:1px solid rgba(0, 0, 0, 0.08);
border-bottom: none;
}
#dsq-content .dsq-comment-footer-reply-tab::-moz-focus-inner {
border: none; /* overrides extra padding in Firefox */
}
.dsq-comment-footer-reply-active span {
/*position: absolute;*/
bottom: -21px;
left: -10px;
width: 10px;
height: 10px;
background: url('../../../images/themes/narcissus/reply-corner.png') no-repeat bottom left;
background: none;
}
#dsq-content .dsq-append-post {
clear: both;
margin-top: -1em;
position: relative;
}
#dsq-content .dsq-append-post .dsq-post-area {
background: url("../../../images/themes/narcissus/header-grey.png");
_background: #E6E6E6;
_color: #333;
padding: 14px;
margin-bottom: 20px;
-webkit-border-radius: 5px;
-webkit-border-top-right-radius: 0;
-moz-border-radius: 5px 0 5px 5px;
border-radius: 5px 0 5px 5px 5px;
border:1px solid rgba(0, 0, 0, 0.08);
}
#dsq-content .dsq-append-post .dsq-post-area h3 {
margin-top: 0;
}
/* Comments */
#dsq-content #dsq-comments {
margin-bottom: 10px;
}
#dsq-content .dsq-comment-header {
background: url('../../../images/themes/narcissus/header-grey.png');
_background: #F7F7F7;
_color: #333;
line-height: normal;
height: auto;
border: 1px solid rgba(0,0,0,.08);
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
}
#dsq-content .dsq-moderator .dsq-comment-header {
background-image: url('../../../images/themes/narcissus/header-blue.png');
_background: #EBF0F5;
}
#dsq-content .dsq-comment-header img.dsq-moderator-star {
display: none;
}
#dsq-content .dsq-moderator img.dsq-moderator-star {
display: inline;
margin-top: -3px;
vertical-align: middle;
}
#dsq-content .dsq-comment-outline .dsq-comment-header {
outline: 1px solid #FAD163;
}
#dsq-content .dsq-comment-header table {
width: 100%;
}
#dsq-content .dsq-comment-header td {
vertical-align: middle;
}
#dsq-content .dsq-header-avatar {
width: 1%;
padding: 0;
border: 0;
}
#dsq-content .dsq-header-avatar a.dsq-avatar {
display: block;
padding: 3px 0 3px 3px;
_background: #F7F7F7;
}
#dsq-content .dsq-comment .dsq-header-avatar a.dsq-avatar img {
padding: 3px;
background: #F5F5F5;
background: rgba(255,255,255, .40);
margin: 1px;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
-webkit-box-shadow: 0 1px 2px rgba(0,0,0,.12);
-moz-box-shadow: 0 1px 2px rgba(0,0,0,.12);
}
#dsq-content #dsq-reactions .dsq-comment .dsq-header-avatar a.dsq-avatar img {
width: 48px;
height: 48px;
}
#dsq-content .dsq-comment:hover .dsq-header-avatar a.dsq-avatar img {
padding: 3px;
background: #F5F5F5;
background: rgba(255,255,255, .70);
border: 1px solid #bbb;
-webkit-border-radius: 3px;
-moz-border-radius: 3px;
-webkit-box-shadow: 0 1px 3px rgba(0,0,0,.15);
-moz-box-shadow: 0 1px 3px rgba(0,0,0,.15);
margin: 0;
}
#dsq-content .dsq-header-avatar a.dsq-avatar img {
display: block;
}
#dsq-content .dsq-comment:hover .dsq-header-avatar:hover a.dsq-avatar {
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px;
}
#dsq-content .dsq-header-avatar img {
padding: 0;
margin: 0;
/* Set in thread.js
width: {{ integration.avatar_size }}px;
height: {{ integration.avatar_size }}px; */
}
#dsq-content .dsq-comment-header cite {
font-weight: bold;
font-size: 110%;
}
#dsq-content .dsq-comment-header-meta {
padding: 0 8px;
}
#dsq-content .dsq-comment-header-meta-wrapper {
position: relative;
}
#dsq-content .dsq-comment-header-time {
padding: 0 8px;
}
#dsq-content a.dsq-reply-link {
color: inherit;
opacity: .6;
}
#dsq-content .dsq-comment-hide-thread,
#dsq-content .dsq-comment-show-thread {
display: none;
width: 14px;
height: 14px;
text-indent: -999em;
position: absolute;
right: 0px;
top: 0px;
}
#dsq-content .dsq-comment-show-thread {
display: block;
margin: 4px 7px 0 0;
background: url('../../../images/themes/narcissus/show-hide-icons.png') 0 0;
opacity: .65;
}
#dsq-content .dsq-comment-show-thread:hover {
opacity: 1;
}
#dsq-content .dsq-comment-hide-thread {
background: url('../../../images/themes/narcissus/show-hide-icons.png') 14px 0;
opacity: .65;
}
#dsq-content .dsq-comment-hide-thread:hover {
opacity: 1;
}
#dsq-content .dsq-collapsed-comment {
display: none;
}
#dsq-content .dsq-comment-is-collapsed .dsq-collapsed-comment {
display: block;
padding-bottom: 1em;
}
/** Reveal hide on hover */
#dsq-content .dsq-full-comment:hover .dsq-comment-hide-thread {
display: block;
}
#dsq-content .dsq-comment-hide-thread {
_display: block; /* IE6 reveals always */
}
#dsq-content .dsq-comment-is-collapsed .dsq-full-comment {
display: none;
}
#dsq-content .dsq-comment-header-time a {
text-decoration: none;
color: inherit;
}
#dsq-content .dsq-comment-header-time a:hover {
border-bottom-width: 1px;
border-bottom-style: dotted;
}
#dsq-content .dsq-comment-header .dsq-comment-header-likes {
text-align: right;
padding-right: 8px;
font-size: .85em;
}
#dsq-content .dsq-editedtxt {
margin-top: 1em;
opacity: 0.6;
}
#dsq-content .dsq-comment-message {
line-height: 1.42;
margin: 1em 0;
}
#dsq-content .dsq-comment-text {
display: inline;
}
#dsq-content .dsq-comment-truncate-expand,
#dsq-content .dsq-comment-truncate-collapse {
display: none;
}
#dsq-content .dsq-comment-footer {
_height: 1%;
overflow: hidden;
margin: 1em 0;
}
#dsq-content .dsq-comment-footer-left {
float: left;
visibility: hidden; /* Moderate and Flag links hidden by default */
}
#dsq-content .dsq-comment:hover .dsq-comment-footer-left {
visibility: visible;
}
#dsq-content .dsq-comment-footer-left a {
margin-right: .5em;
}
#dsq-content .dsq-comment-footer-left .dsq-highlight {
/* this is a hidden feature, users should overwrite this rule if they
want to use it */
display: none;
}
#dsq-content .dsq-comment-footer-left .dsq-highlight img {
vertical-align: text-bottom;
}
#dsq-content .dsq-comment-footer-right {
text-align: right;
}
#dsq-content .dsq-comment-footer-right button.dsq-button-small {
margin-left: 2px;
}
#dsq-content .dsq-comment-footer-right .dsq-like {
font-size: 11px;
}
#dsq-content .dsq-comment-footer-right .dsq-like img {
margin-right: 3px;
}
.dsq-popup .dsq-liked-by-user {
margin-bottom: 10px;
}
.dsq-popup .dsq-liked-by-user a {
text-decoration: none;
}
.dsq-popup .dsq-liked-by-user img {
width: 32px;
height: 32px;
margin-right: 8px;
vertical-align: middle;
}
.dsq-popup-user-votes {
width: 350px;
max-height: 400px;
}
.dsq-popup-user-votes ul {
margin-bottom: 15px;
max-height: 400px;
_height: 400px;
overflow-y: auto;
overflow-x: hidden;
}
.dsq-popup-user-votes p {
font-size: 11px;
color: #666;
}
/* Likes */
#dsq-content .dsq-sub-header table {
width: 100%;
/* background: url('../../../images/themes/narcissus/blue-20.png');*/
_background: #EEF9FD;
_color: #333;
}
#dsq-content .dsq-sub-header td {
padding: .5em;
}
#dsq-content .dsq-sub-header-right {
text-align: right;
}
#dsq-content .dsq-user-like {
font-size: 11px;
margin-left: 8px;
padding-bottom: 3px;
}
#dsq-content .dsq-user-like a {
color: inherit;
}
#dsq-content .dsq-user-like img {
height: 16px;
width: 16px;
margin-right: 3px;
vertical-align: middle;
}
#dsq-content .dsq-comment-buttons {
white-space: nowrap;
padding-left: 5px;
}
/* Reactions */
#dsq-content .dsq-h3-reactions {
background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x top;
padding-top: 10px;
margin-top: 20px;
}
#dsq-content .dsq-reaction-header table {
width: 100%;
background: url('../../../images/themes/narcissus/blue-20.png');
_background: #EEF9FD;
_color: #333;
}
#dsq-content .dsq-reaction-header td {
padding: .5em;
}
#dsq-content .dsq-reaction-header-right {
text-align: right;
}
#dsq-content .dsq-show-more-reactions {
padding: 1em;
text-align: center;
margin: 1em 0;
}
#dsq-content .dsq-comment.dsq-reaction:hover .dsq-header-avatar a.dsq-avatar {
/*
padding: 3px;
background: none;
border: 1px solid transparent;*/
}
#dsq-content .dsq-reaction .dsq-comment-cite {
padding-left: 8px;
}
#dsq-content .dsq-reaction .dsq-service-name {
text-transform: capitalize;
}
/* Pagination */
#dsq-content .dsq-pagination {
text-align: center;
padding: 1em;
margin: 1em 0;
/* background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x top;*/
}
#dsq-content .dsq-pagination table {
background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x bottom;
width: 100%;
}
#dsq-content .dsq-pagination td {
padding: .5em;
}
#dsq-content .dsq-pagination-links {
width: 100%;
text-align: center;
}
#dsq-content .dsq-pagination span,
#dsq-content .dsq-pagination a {
margin: 0 .25em;
}
/* Trackbacks */
#dsq-content .dsq-item-trackback {
padding: 1em 0;
margin: 1em 0;
/* background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x top;*/
}
/* Edit */
#dsq-content .dsq-edit {
margin: 10px 0;
}
#dsq-content .dsq-textarea textarea.dsq-edit-textarea {
height: 90px;
}
/* Extras */
#dsq-content .dsq-global-moderator-extras {
font-size: 11px;
}
#dsq-content .dsq-global-moderator-extras strong:first-child {
margin-left: 0;
}
#dsq-content .dsq-global-moderator-extras strong {
margin-left: 10px;
}
/* Collapsed Thread */
#dsq-content #dsq-comments .dsq-collapsed-comment {
position: relative;
}
#dsq-content #dsq-comments .dsq-collapsed-comment img {
vertical-align: middle;
margin: 0 12px 0 0;
width: 24px;
height: 24px;
opacity: .55;
filter: alpha(opacity=55); /* IE lt 8 */
-ms-filter: "alpha(opacity=55)"; /* IE 8 */
-khtml-opacity: .55; /* Safari 1.x */
-moz-opacity: .55; /* FF lt 1.5, Netscape */
}
#dsq-content #dsq-comments .dsq-collapsed-comment .dsq-comment-show-thread {
float: right;
}
#dsq-content #dsq-comments .dsq-collapsed-comment span {
opacity: .55;
}
#dsq-content #dsq-comments .dsq-collapsed-comment a {
opacity: .55;
}
#dsq-content #dsq-comments .dsq-collapsed-comment a:hover {
opacity: 1;
filter: alpha(opacity=100); /* IE lt 8 */
-ms-filter: "alpha(opacity=100)"; /* IE 8 */
-khtml-opacity: 1; /* Safari 1.x */
-moz-opacity: 1; /* FF lt 1.5, Netscape */
}
/* Media embed */
#dsq-content .dsq-media-embed h4 {
margin-top: 10px;
background: transparent url('../../../images/themes/narcissus/gray-50.png') repeat-x scroll center top;
text-transform: uppercase;
font-weight: normal;
font-size: 9px;
font-family: Arial, sans-serif;
letter-spacing: 1px;
-moz-opacity: 0.6;
opacity:.60;
filter: alpha(opacity=60);
}
#dsq-content .dsq-media-embed a {
margin-right: 5px;
}
#dsq-content .dsq-media-embed img {
border-radius: 3px;
}
/* Community */
#dsq-community {
/*border-bottom: 1px dotted #ccc !important;*/
padding: 8px 0 !important;
_height: 1%;
margin: 0 0 8px !important;
}
#dsq-community button {
vertical-align: middle;
}
#dsq-community .dsq-button-last {
margin-right: 10px;
}
#dsq-community .dsq-like-activity {
white-space: nowrap;
vertical-align: middle;
line-height: 32px;
}
.dsq-like-thread span.dsq-icon {
display: inline-block;
width: 16px;
height: 16px;
background: url('../../../images/themes/narcissus/thumb-up.png') no-repeat;
vertical-align: text-bottom;
margin: 0 4px 0 0;
}
.dsq-dislike-thread span.dsq-icon {
display: inline-block;
width: 16px;
height: 16px;
background: url('../../../images/themes/narcissus/thumb-down.png') no-repeat;
vertical-align: text-bottom;
}
/* Media Upload */
#dsq-content .dsq-attach-media {
position: relative;
float: left;
height: 13px;
width: 60px;
}
#dsq-content .dsq-attach-media .dsq-attach-media-container {
}
#dsq-content .dsq-attach-media .dsq-attach-media-container iframe {
position: absolute;
background: transparent url(../../../images/themes/narcissus/add-image.png) 0 0 no-repeat;
height: 13px;
width: 53px;
margin: 4px 8px 0 0;
vertical-align: middle;
z-index: 1;
}
#dsq-content .dsq-attach-media .dsq-attach-media-border iframe {
border-right: 1px dotted #ccc;
}
#dsq-content .dsq-attach-media .dsq-attach-media-container span {
position: absolute;
top: 1px;
left: 16px;
padding: 0;
margin: 0;
z-index: 0;
vertical-align: middle;
}
#dsq-content .dsq-attach-media .dsq-attach-media-container iframe:hover {
background: transparent url(../../../images/themes/narcissus/add-image.png) 0 -14px no-repeat;
}
#dsq-content .dsq-media-preview {
}
#dsq-content .dsq-media-preview .dsq-media-wrapper {
position: relative;
display: inline-block;
height: 50px;
width: 50px;
margin: 10px;
}
#dsq-content .dsq-media-preview .dsq-media-wrapper .dsq-media-image {
height: 50px;
width: 50px;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px;
border: 0;
}
#dsq-content .dsq-media-preview .dsq-media-wrapper .dsq-media-image-close {
position: absolute;
top: -8px;
right: -10px;
height: 23px;
width: 22px;
background: url(../../../images/themes/houdini/icons-sprite.png) -73px 0 no-repeat;
}
| mit |
shahabhijeet/azure-sdk-for-net | src/AzureStack/Admin/ComputeAdmin/Compute.Admin.Tests/src/QuotaTests.cs | 11692 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
using Microsoft.AzureStack.Management.Compute.Admin;
using Microsoft.AzureStack.Management.Compute.Admin.Models;
using System.Linq;
using System.Net;
using Xunit;
namespace Compute.Tests
{
public class QuotaTests : ComputeTestBase
{
// Helper
private Quota Create(int asc, int cl, int vssc, int vmc, int smds, int pmds) {
var newQuota = new Quota()
{
AvailabilitySetCount = asc,
CoresLimit = cl,
VmScaleSetCount = vssc,
VirtualMachineCount = vmc,
MaxAllocationStandardManagedDisksAndSnapshots =smds,
MaxAllocationPremiumManagedDisksAndSnapshots = pmds
};
return newQuota;
}
private void ValidateQuota(Quota quota) {
AssertValidResource(quota);
Assert.NotNull(quota);
Assert.NotNull(quota.AvailabilitySetCount);
Assert.NotNull(quota.CoresLimit);
Assert.NotNull(quota.VirtualMachineCount);
Assert.NotNull(quota.VmScaleSetCount);
Assert.NotNull(quota.MaxAllocationStandardManagedDisksAndSnapshots);
Assert.NotNull(quota.MaxAllocationPremiumManagedDisksAndSnapshots);
}
private void AssertSame(Quota expected, Quota given, bool resourceToo = true) {
if (resourceToo)
{
AssertSameResource(expected, given);
}
if (expected == null)
{
Assert.Null(given);
}
else
{
Assert.NotNull(given);
Assert.Equal(expected.AvailabilitySetCount, given.AvailabilitySetCount);
Assert.Equal(expected.CoresLimit, given.CoresLimit);
Assert.Equal(expected.VirtualMachineCount, given.VirtualMachineCount);
Assert.Equal(expected.VmScaleSetCount, given.VmScaleSetCount);
Assert.Equal(expected.MaxAllocationStandardManagedDisksAndSnapshots, given.MaxAllocationStandardManagedDisksAndSnapshots);
Assert.Equal(expected.MaxAllocationPremiumManagedDisksAndSnapshots, given.MaxAllocationPremiumManagedDisksAndSnapshots);
}
}
[Fact]
public void TestListQuotas() {
RunTest((client) => {
var quotas = client.Quotas.List("local");
Assert.NotNull(quotas);
quotas.ForEach(ValidateQuota);
});
}
[Fact]
public void TestGetQuota() {
RunTest((client) => {
var quota = client.Quotas.List("local").FirstOrDefault();
var result = client.Quotas.Get("local", quota.Name);
AssertSame(quota, result);
});
}
[Fact]
public void TestGetAllQuotas() {
RunTest((client) => {
var quotas = client.Quotas.List("local");
quotas.ForEach((quota) => {
var result = client.Quotas.Get("local", quota.Name);
AssertSame(quota, result);
});
});
}
private void ValidateAgainstData(Quota q, int[] d) {
Assert.Equal(q.AvailabilitySetCount, d[0]);
Assert.Equal(q.CoresLimit, d[1]);
Assert.Equal(q.VmScaleSetCount, d[2]);
Assert.Equal(q.VirtualMachineCount, d[3]);
Assert.Equal(q.MaxAllocationStandardManagedDisksAndSnapshots, d[4]);
Assert.Equal(q.MaxAllocationPremiumManagedDisksAndSnapshots, d[5]);
}
[Fact]
public void CreateUpdateDeleteQuota() {
RunTest((client) => {
var location = "local";
var name = "testQuotaCreateUpdateDelete";
var data = new int[]{1,1,1,1,1,1 };
var newQuota = Create(data[0], data[1], data[2], data[3], data[4], data[5]);
var quota = client.Quotas.CreateOrUpdate(location, name, newQuota);
ValidateAgainstData(quota, data);
AssertSame(newQuota, quota, false);
quota.VirtualMachineCount += 1;
quota = client.Quotas.CreateOrUpdate(location, name, quota);
data[3] += 1;
ValidateAgainstData(quota, data);
quota.AvailabilitySetCount += 1;
quota = client.Quotas.CreateOrUpdate(location, name, quota);
data[0] += 1;
ValidateAgainstData(quota, data);
quota.VmScaleSetCount += 1;
quota = client.Quotas.CreateOrUpdate(location, name, quota);
data[2] += 1;
ValidateAgainstData(quota, data);
quota.CoresLimit+= 1;
quota = client.Quotas.CreateOrUpdate(location, name, quota);
data[1] += 1;
ValidateAgainstData(quota, data);
quota.MaxAllocationStandardManagedDisksAndSnapshots+= 1;
quota = client.Quotas.CreateOrUpdate(location, name, quota);
data[4] += 1;
ValidateAgainstData(quota, data);
quota.MaxAllocationPremiumManagedDisksAndSnapshots += 1;
quota = client.Quotas.CreateOrUpdate(location, name, quota);
data[5] += 1;
ValidateAgainstData(quota, data);
client.Quotas.Delete(location, name);
});
}
[Fact]
public void TestCreateQuota() {
RunTest((client) => {
var location = "local";
var quotaNamePrefix = "testQuota";
var data = new System.Collections.Generic.List<int[]> {
new [] { 0, 0, 0, 0, 0, 0, 0 },
new [] { 1, 0, 0, 0, 0, 0, 1 },
new [] { 0, 1, 0, 0, 0, 0, 2 },
new [] { 0, 0, 1, 0, 0, 0, 3 },
new [] { 0, 0, 0, 1, 0, 0, 4 },
new [] { 0, 0, 0, 0, 1, 0, 5 },
new [] { 0, 0, 0, 0, 0, 1, 6 },
new [] { 100, 100, 100, 100 ,100, 100, 7 },
new [] { 1000, 1000, 1000, 1000, 1000, 1000, 8 }
};
data.ForEach((d) => {
var name = quotaNamePrefix + d[6];
var newQuota = Create(d[0], d[1], d[2], d[3], d[4], d[5]);
var quota = client.Quotas.CreateOrUpdate(location, name, newQuota);
ValidateAgainstData(quota, d);
var result = client.Quotas.Get(location, name);
AssertSame(quota, result, false);
});
data.ForEach((d) => {
var name = quotaNamePrefix + d[6];
var list = client.Quotas.List(location);
Assert.Equal(1, list.Count((q) => q.Name.Equals(name)));
});
data.ForEach((d) => {
var name = quotaNamePrefix + d[6];
client.Quotas.Delete(location, name);
ValidateExpectedReturnCode(
() => client.Quotas.Get(location, name),
HttpStatusCode.NotFound
);
});
});
}
#region Test With Invalid data
[Fact]
public void TestCreateInvalidQuota() {
RunTest((client) => {
var name = "myQuota";
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(-1, 1, 1, 1, 1, 1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(1, -1, 1, 1, 1, 1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(1, 1, -1, 1, 1, 1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(1, 1, 1, -1, 1, 1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(1, 1, 1, 1, -1, 1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(1, 1, 1, 1, 1, -1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(-1, 0, 0, 0, 0, 0)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(0, -1, 0, 0, 0, 0)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(0, 0, -1, 0, 0, 0)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(0, 0, 0, -1, 0, 0)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(0, 0, 0, 0, -1, 0)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(0, 0, 0, 0, 0, -1)));
Assert.ThrowsAny<System.Exception>(() => client.Quotas.CreateOrUpdate("local", name, Create(-1, -1, -1, -1, -1, -1)));
});
}
// Invalid Locations
[Fact(Skip = "CRP does not handle invalid locations correctly.")]
public void TestListInvalidLocation() {
RunTest((client) => {
var list = client.Quotas.List("thisisnotarealplace");
Assert.Empty(list);
});
}
[Fact]
public void TestDeleteNonExistingQuota() {
RunTest((client) => {
ValidateExpectedReturnCode(
() => client.Quotas.Delete("local", "thisdoesnotexistandifitdoesoops"),
HttpStatusCode.NotFound
);
});
}
[Fact(Skip = "CRP does not handle invalid locations correctly.")]
public void TestCreateQuotaOnInvalidLocation() {
RunTest((client) => {
var location = "thislocationdoesnotexist";
var quotaNamePrefix = "testQuota";
var data = new System.Collections.Generic.List<int[]> {
new [] { 0, 0, 0, 0, 0, 0, 0 },
new [] { 1, 0, 0, 0, 0, 0, 1 },
new [] { 0, 1, 0, 0, 0, 0, 2 },
new [] { 0, 0, 1, 0, 0, 0, 3 },
new [] { 0, 0, 0, 1, 0, 0, 4 },
new [] { 0, 0, 0, 0, 1, 0, 5 },
new [] { 0, 0, 0, 0, 0, 1, 6 },
new [] { 100, 100, 100, 100 ,100, 100, 7 },
new [] { 1000, 1000, 1000, 1000, 1000, 1000, 8 }
};
data.ForEach((d) => {
var name = quotaNamePrefix + d[6];
var newQuota = Create(d[0], d[1], d[2], d[3], d[4], d[5]);
var quota = client.Quotas.CreateOrUpdate(location, name, newQuota);
var result = client.Quotas.Get(location, name);
Assert.Null(quota);
Assert.Null(result);
});
data.ForEach((d) => {
var name = quotaNamePrefix + d[6];
var list = client.Quotas.List(location);
Assert.Equal(0, list.Count((q) => q.Name.Equals(name)));
});
});
}
#endregion
}
}
| mit |
owen-kellie-smith/mediawiki | wiki/extensions/SemanticMediaWiki/tests/phpunit/includes/DataValues/ImportValueTest.php | 805 | <?php
namespace SMW\Tests\DataValues;
use SMW\DataValues\ImportValue;
/**
* @covers \SMW\DataValues\ImportValue
*
* @group semantic-mediawiki
*
* @license GNU GPL v2+
* @since 2.2
*
* @author mwjames
*/
class ImportValueTest extends \PHPUnit_Framework_TestCase {
public function testCanConstruct() {
$this->assertInstanceOf(
'\SMW\DataValues\ImportValue',
new ImportValue( '__imp' )
);
// FIXME Legacy naming remove in 3.x
$this->assertInstanceOf(
'\SMWImportValue',
new ImportValue( '__imp' )
);
}
public function testErrorForInvalidUserValue() {
$instance = new ImportValue( '__imp' );
$instance->setUserValue( 'FooBar' );
$this->assertEquals(
'FooBar',
$instance->getWikiValue()
);
$this->assertNotEmpty(
$instance->getErrors()
);
}
}
| mit |
ndykman/CodeContracts | Microsoft.Research/Contracts/MsCorlib/Sources/System.Security.Policy.Hash.cs | 3951 | // CodeContracts
//
// Copyright (c) Microsoft Corporation
//
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// File System.Security.Policy.Hash.cs
// Automatically generated contract file.
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Diagnostics.Contracts;
using System;
// Disable the "this variable is not used" warning as every field would imply it.
#pragma warning disable 0414
// Disable the "this variable is never assigned to".
#pragma warning disable 0067
// Disable the "this event is never assigned to".
#pragma warning disable 0649
// Disable the "this variable is never used".
#pragma warning disable 0169
// Disable the "new keyword not required" warning.
#pragma warning disable 0109
// Disable the "extern without DllImport" warning.
#pragma warning disable 0626
// Disable the "could hide other member" warning, can happen on certain properties.
#pragma warning disable 0108
namespace System.Security.Policy
{
sealed public partial class Hash : EvidenceBase, System.Runtime.Serialization.ISerializable
{
#region Methods and constructors
public override EvidenceBase Clone()
{
return default(EvidenceBase);
}
public static System.Security.Policy.Hash CreateMD5(byte[] md5)
{
Contract.Ensures(Contract.Result<System.Security.Policy.Hash>() != null);
return default(System.Security.Policy.Hash);
}
public static System.Security.Policy.Hash CreateSHA1(byte[] sha1)
{
Contract.Ensures(Contract.Result<System.Security.Policy.Hash>() != null);
return default(System.Security.Policy.Hash);
}
public static System.Security.Policy.Hash CreateSHA256(byte[] sha256)
{
Contract.Ensures(Contract.Result<System.Security.Policy.Hash>() != null);
return default(System.Security.Policy.Hash);
}
public byte[] GenerateHash(System.Security.Cryptography.HashAlgorithm hashAlg)
{
Contract.Ensures(Contract.Result<byte[]>() != null);
return default(byte[]);
}
public void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context)
{
}
public Hash(System.Reflection.Assembly assembly)
{
Contract.Ensures(!assembly.IsDynamic);
}
public override string ToString()
{
return default(string);
}
#endregion
#region Properties and indexers
public byte[] MD5
{
get
{
Contract.Ensures(Contract.Result<byte[]>() != null);
return default(byte[]);
}
}
public byte[] SHA1
{
get
{
Contract.Ensures(Contract.Result<byte[]>() != null);
return default(byte[]);
}
}
public byte[] SHA256
{
get
{
Contract.Ensures(Contract.Result<byte[]>() != null);
return default(byte[]);
}
}
#endregion
}
}
| mit |
ndykman/CodeContracts | Microsoft.Research/Contracts/System/Sources/System.ComponentModel.SingleConverter.cs | 2183 | // CodeContracts
//
// Copyright (c) Microsoft Corporation
//
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// File System.ComponentModel.SingleConverter.cs
// Automatically generated contract file.
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Diagnostics.Contracts;
using System;
// Disable the "this variable is not used" warning as every field would imply it.
#pragma warning disable 0414
// Disable the "this variable is never assigned to".
#pragma warning disable 0067
// Disable the "this event is never assigned to".
#pragma warning disable 0649
// Disable the "this variable is never used".
#pragma warning disable 0169
// Disable the "new keyword not required" warning.
#pragma warning disable 0109
// Disable the "extern without DllImport" warning.
#pragma warning disable 0626
// Disable the "could hide other member" warning, can happen on certain properties.
#pragma warning disable 0108
namespace System.ComponentModel
{
public partial class SingleConverter : BaseNumberConverter
{
#region Methods and constructors
public SingleConverter()
{
}
#endregion
}
}
| mit |
casivaagustin/drupalcon-mentoring | src/core/modules/field/src/Entity/FieldConfig.php | 11622 | <?php
/**
* @file
* Contains \Drupal\field\Entity\FieldConfig.
*/
namespace Drupal\field\Entity;
use Drupal\Component\Utility\SafeMarkup;
use Drupal\Core\Entity\EntityStorageInterface;
use Drupal\Core\Field\FieldConfigBase;
use Drupal\Core\Field\FieldException;
use Drupal\field\FieldStorageConfigInterface;
use Drupal\field\FieldConfigInterface;
/**
* Defines the Field entity.
*
* @ConfigEntityType(
* id = "field_config",
* label = @Translation("Field"),
* handlers = {
* "access" = "Drupal\field\FieldConfigAccessControlHandler",
* "storage" = "Drupal\field\FieldConfigStorage"
* },
* config_prefix = "field",
* entity_keys = {
* "id" = "id",
* "label" = "label"
* }
* )
*/
class FieldConfig extends FieldConfigBase implements FieldConfigInterface {
/**
* Flag indicating whether the field is deleted.
*
* The delete() method marks the field as "deleted" and removes the
* corresponding entry from the config storage, but keeps its definition in
* the state storage while field data is purged by a separate
* garbage-collection process.
*
* Deleted fields stay out of the regular entity lifecycle (notably, their
* values are not populated in loaded entities, and are not saved back).
*
* @var bool
*/
public $deleted = FALSE;
/**
* The associated FieldStorageConfig entity.
*
* @var \Drupal\field\Entity\FieldStorageConfig
*/
protected $fieldStorage;
/**
* Constructs a FieldConfig object.
*
* In most cases, Field entities are created via
* entity_create('field_config', $values), where $values is the same
* parameter as in this constructor.
*
* @param array $values
* An array of field properties, keyed by property name. The
* storage associated to the field can be specified either with:
* - field_storage: the FieldStorageConfigInterface object,
* or by referring to an existing field storage in the current configuration
* with:
* - field_name: The field name.
* - entity_type: The entity type.
* Additionally, a 'bundle' property is required to indicate the entity
* bundle to which the field is attached to. Other array elements will be
* used to set the corresponding properties on the class; see the class
* property documentation for details.
*
* @see entity_create()
*/
public function __construct(array $values, $entity_type = 'field_config') {
// Allow either an injected FieldStorageConfig object, or a field_name and
// entity_type.
if (isset($values['field_storage'])) {
if (!$values['field_storage'] instanceof FieldStorageConfigInterface) {
throw new FieldException('Attempt to create a configurable field for a non-configurable field storage.');
}
$field_storage = $values['field_storage'];
$values['field_name'] = $field_storage->getName();
$values['entity_type'] = $field_storage->getTargetEntityTypeId();
// The internal property is fieldStorage, not field_storage.
unset($values['field_storage']);
$values['fieldStorage'] = $field_storage;
}
else {
if (empty($values['field_name'])) {
throw new FieldException('Attempt to create a field without a field_name.');
}
if (empty($values['entity_type'])) {
throw new FieldException(SafeMarkup::format('Attempt to create a field @field_name without an entity_type.', array('@field_name' => $values['field_name'])));
}
}
// 'bundle' is required in either case.
if (empty($values['bundle'])) {
throw new FieldException(SafeMarkup::format('Attempt to create a field @field_name without a bundle.', array('@field_name' => $values['field_name'])));
}
parent::__construct($values, $entity_type);
}
/**
* {@inheritdoc}
*/
public function postCreate(EntityStorageInterface $storage) {
parent::postCreate($storage);
// Validate that we have a valid storage for this field. This throws an
// exception if the storage is invalid.
$this->getFieldStorageDefinition();
// 'Label' defaults to the field name (mostly useful for fields created in
// tests).
if (empty($this->label)) {
$this->label = $this->getName();
}
}
/**
* Overrides \Drupal\Core\Entity\Entity::preSave().
*
* @throws \Drupal\Core\Field\FieldException
* If the field definition is invalid.
* @throws \Drupal\Core\Entity\EntityStorageException
* In case of failures at the configuration storage level.
*/
public function preSave(EntityStorageInterface $storage) {
$entity_manager = \Drupal::entityManager();
$field_type_manager = \Drupal::service('plugin.manager.field.field_type');
$storage_definition = $this->getFieldStorageDefinition();
// Filter out unknown settings and make sure all settings are present, so
// that a complete field definition is passed to the various hooks and
// written to config.
$default_settings = $field_type_manager->getDefaultFieldSettings($storage_definition->getType());
$this->settings = array_intersect_key($this->settings, $default_settings) + $default_settings;
if ($this->isNew()) {
// Notify the entity storage.
$entity_manager->getStorage($this->entity_type)->onFieldDefinitionCreate($this);
}
else {
// Some updates are always disallowed.
if ($this->entity_type != $this->original->entity_type) {
throw new FieldException("Cannot change an existing field's entity_type.");
}
if ($this->bundle != $this->original->bundle && empty($this->bundleRenameAllowed)) {
throw new FieldException("Cannot change an existing field's bundle.");
}
if ($storage_definition->uuid() != $this->original->getFieldStorageDefinition()->uuid()) {
throw new FieldException("Cannot change an existing field's storage.");
}
// Notify the entity storage.
$entity_manager->getStorage($this->entity_type)->onFieldDefinitionUpdate($this, $this->original);
}
parent::preSave($storage);
}
/**
* {@inheritdoc}
*/
public function calculateDependencies() {
parent::calculateDependencies();
// Mark the field_storage_config as a a dependency.
$this->addDependency('config', $this->getFieldStorageDefinition()->getConfigDependencyName());
return $this->dependencies;
}
/**
* {@inheritdoc}
*/
public static function preDelete(EntityStorageInterface $storage, array $fields) {
$state = \Drupal::state();
parent::preDelete($storage, $fields);
// Keep the field definitions in the state storage so we can use them
// later during field_purge_batch().
$deleted_fields = $state->get('field.field.deleted') ?: array();
foreach ($fields as $field) {
if (!$field->deleted) {
$config = $field->toArray();
$config['deleted'] = TRUE;
$config['field_storage_uuid'] = $field->getFieldStorageDefinition()->uuid();
$deleted_fields[$field->uuid()] = $config;
}
}
$state->set('field.field.deleted', $deleted_fields);
}
/**
* {@inheritdoc}
*/
public static function postDelete(EntityStorageInterface $storage, array $fields) {
// Clear the cache upfront, to refresh the results of getBundles().
\Drupal::entityManager()->clearCachedFieldDefinitions();
// Notify the entity storage.
foreach ($fields as $field) {
if (!$field->deleted) {
\Drupal::entityManager()->getStorage($field->entity_type)->onFieldDefinitionDelete($field);
}
}
// If this is part of a configuration synchronization then the following
// configuration updates are not necessary.
$entity = reset($fields);
if ($entity->isSyncing()) {
return;
}
// Delete the associated field storages if they are not used anymore and are
// not persistent.
$storages_to_delete = array();
foreach ($fields as $field) {
$storage_definition = $field->getFieldStorageDefinition();
if (!$field->deleted && !$field->isUninstalling() && $storage_definition->isDeletable()) {
// Key by field UUID to avoid deleting the same storage twice.
$storages_to_delete[$storage_definition->uuid()] = $storage_definition;
}
}
if ($storages_to_delete) {
\Drupal::entityManager()->getStorage('field_storage_config')->delete($storages_to_delete);
}
}
/**
* {@inheritdoc}
*/
protected function linkTemplates() {
$link_templates = parent::linkTemplates();
if (\Drupal::moduleHandler()->moduleExists('field_ui')) {
$link_templates["{$this->entity_type}-field-edit-form"] = 'entity.field_config.' . $this->entity_type . '_field_edit_form';
$link_templates["{$this->entity_type}-storage-edit-form"] = 'entity.field_config.' . $this->entity_type . '_storage_edit_form';
$link_templates["{$this->entity_type}-field-delete-form"] = 'entity.field_config.' . $this->entity_type . '_field_delete_form';
if (isset($link_templates['config-translation-overview'])) {
$link_templates["config-translation-overview.{$this->entity_type}"] = "entity.field_config.config_translation_overview.{$this->entity_type}";
}
}
return $link_templates;
}
/**
* {@inheritdoc}
*/
protected function urlRouteParameters($rel) {
$parameters = parent::urlRouteParameters($rel);
$entity_type = \Drupal::entityManager()->getDefinition($this->entity_type);
$parameters[$entity_type->getBundleEntityType()] = $this->bundle;
return $parameters;
}
/**
* {@inheritdoc}
*/
public function isDeleted() {
return $this->deleted;
}
/**
* {@inheritdoc}
*/
public function getFieldStorageDefinition() {
if (!$this->fieldStorage) {
$fields = $this->entityManager()->getFieldStorageDefinitions($this->entity_type);
if (!isset($fields[$this->field_name])) {
throw new FieldException(SafeMarkup::format('Attempt to create a field @field_name that does not exist on entity type @entity_type.', array('@field_name' => $this->field_name, '@entity_type' => $this->entity_type))); }
if (!$fields[$this->field_name] instanceof FieldStorageConfigInterface) {
throw new FieldException(SafeMarkup::format('Attempt to create a configurable field of non-configurable field storage @field_name.', array('@field_name' => $this->field_name, '@entity_type' => $this->entity_type)));
}
$this->fieldStorage = $fields[$this->field_name];
}
return $this->fieldStorage;
}
/**
* {@inheritdoc}
*/
public function isDisplayConfigurable($context) {
return TRUE;
}
/**
* {@inheritdoc}
*/
public function getDisplayOptions($display_context) {
// Hide configurable fields by default.
return array('type' => 'hidden');
}
/**
* {@inheritdoc}
*/
public function isReadOnly() {
return FALSE;
}
/**
* {@inheritdoc}
*/
public function isComputed() {
return FALSE;
}
/**
* Loads a field config entity based on the entity type and field name.
*
* @param string $entity_type_id
* ID of the entity type.
* @param string $bundle
* Bundle name.
* @param string $field_name
* Name of the field.
*
* @return static
* The field config entity if one exists for the provided field
* name, otherwise NULL.
*/
public static function loadByName($entity_type_id, $bundle, $field_name) {
return \Drupal::entityManager()->getStorage('field_config')->load($entity_type_id . '.' . $bundle . '.' . $field_name);
}
}
| mit |
1yvT0s/illacceptanything | data/france.code-civil/Livre Ier/Titre IV/Article 132.md | 115 | Article 132
----
Le mariage de l'absent reste dissous, même si le jugement déclaratif d'absence a
été annulé.
| mit |
jmwenda/feedbunch | vendor/assets/bower_components/bootlint/test/fixtures/grid/spacer-col/last-child.html | 1087 | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
<script src="../../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../../lib/qunit.css">
<script src="../../../lib/qunit.js"></script>
<script src="../../../../dist/browser/bootlint.js"></script>
<script src="../../generic-qunit.js"></script>
</head>
<body>
<div class="container">
<div class="row">
<div class="col-xs-6">Some content</div>
<div class="spacer col-xs-6"></div>
</div>
</div>
<div id="qunit"></div>
<ol id="bootlint"></ol>
</body>
</html>
| mit |
benishouga/DefinitelyTyped | types/angular-file-upload/index.d.ts | 8307 | // Type definitions for angular-file-upload 2.5
// Project: https://github.com/nervgh/angular-file-upload
// Definitions by: Cyril Gandon <https://github.com/cyrilgandon>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.2
import * as angular from 'angular';
export interface FileUploaderFactory {
new(options?: Partial<FileUploaderOptions>): FileUploader;
}
export interface FileUploaderOptions {
/**
* Path on the server to upload files
* @default /
*/
url: string;
/**
* Name of the field which will contain the file, default is file
* @default file
*/
alias: string;
/**
* Headers to be sent along with the files. HTML5 browsers only.
* @default {}
*/
headers: Headers;
/**
* Items to be uploaded
* @default []
*/
queue: FileItem[];
/**
* Automatically upload files after adding them to the queue
* @default false
*/
autoUpload: boolean;
/**
* Remove files from the queue after uploading
* @default false
*/
removeAfterUpload: boolean;
/**
* It's a request method. HTML5 browsers only.
* @default POST
*/
method: string;
/**
* Filters to be applied to the files before adding them to the queue. If the filter returns true the file will be added to the queue
* @default [folderFilter, queueLimitFilter]
*/
filters: Filter[];
/**
* Data to be sent along with the files
* @default []
*/
formData: FormData[];
/**
* Maximum count of files.
* @default Number.MAX_VALUE
*/
queueLimit: number;
/**
* enable CORS. HTML5 browsers only.
* @default false
*/
withCredentials: boolean;
/**
* Disable multipart.
* @default false
*/
disableMultipart: boolean;
}
export interface FileUploader extends FileUploaderOptions {
/**
* Upload queue progress percentage. Read only.
*/
progress: number;
/**
* true if uploader is html5-uploader. Read only.
*/
isHTML5: boolean;
/**
* true if an upload is in progress. Read only.
*/
isUploading: boolean;
// **Methods**
/**
* Add items to the queue
*/
addToQueue(files: File | HTMLInputElement | object | FileList | object[], options: object, filters: Filter[] | string): void;
/**
* Remove an item from the queue, where value is {FileItem} or index of item.
*/
removeFromQueue(value: FileItem | number): void;
/**
* Removes all elements from the queue.
*/
clearQueue(): void;
/**
* Uploads an item, where value is {FileItem} or index of item.
*/
uploadItem(value: FileItem | number): void;
/**
* Cancels uploading of item, where value is {FileItem} or index of item.
*/
cancelItem(value: FileItem | number): void;
/**
* Upload all pending items on the queue.
*/
uploadAll(): void;
/**
* Cancels all current uploads.
*/
cancelAll(): void;
/**
* Destroys a uploader.
*/
destroy(): void;
/**
* Returns true if value is {File}.
*/
isFile(value: any): boolean;
/**
* Returns true if value is {FileLikeObject}.
*/
isFileLikeObject(value: any): boolean;
/**
* Returns the index of the {FileItem} queue element.
*/
getIndexOfItem(fileItem: FileItem): number;
/**
* Return items are ready to upload.
*/
getReadyItems(): FileItem[];
/**
* Return an array of all pending items on the queue
*/
getNotUploadedItems(): FileItem[];
// **Callbacks**
/**
* Fires after adding all the dragged or selected files to the queue.
*/
onAfterAddingAll(addedItems: FileItem[]): void;
/**
* When adding a file failed
*/
onWhenAddingFileFailed(item: FileItem, filter: Filter, options: object): void;
/**
* Fires after adding a single file to the queue.
*/
onAfterAddingFile(item: FileItem): void;
/**
* Fires before uploading an item.
*/
onBeforeUploadItem(item: FileItem): void;
/**
* On file upload progress.
*/
onProgressItem(item: FileItem, progress: number): void;
/**
* On file successfully uploaded
*/
onSuccessItem(item: FileItem, response: Response, status: number, headers: Headers): void;
/**
* On upload error
*/
onErrorItem(item: FileItem, response: Response, status: number, headers: Headers): void;
/**
* On cancel uploading
*/
onCancelItem(item: FileItem, response: Response, status: number, headers: Headers): void;
/**
* On file upload complete (independently of the sucess of the operation)
*/
onCompleteItem(item: FileItem, response: Response, status: number, headers: Headers): void;
/**
* On upload queue progress
*/
onProgressAll(progress: number): void;
/**
* On all loaded when uploading an entire queue, or on file loaded when uploading a single independent file
*/
onCompleteAll(): void;
}
export interface FileLikeObject {
/**
* Equals File.lastModifiedDate
*/
lastModifiedDate: any;
/**
* Equals File.name
*/
name: string;
/**
* Equals Blob.size, in octet
*/
size: number;
/**
* Equals Blob.type, in octet
*/
type: string;
}
export interface FileItem {
// **Properties**
file: FileLikeObject;
/**
* Path on the server in which this file will be uploaded
*/
url: string;
/**
* Name of the field which will contain the file, default is file
*/
alias: string;
/**
* Headers to be sent along with this file. HTML5 browsers only.
*/
headers: Headers;
/**
* Data to be sent along with this file
*/
formData: FormData[];
/**
* It's a request method. By default POST. HTML5 browsers only.
*/
method: string;
/**
* enable CORS. HTML5 browsers only.
*/
withCredentials: boolean;
/**
* Remove this file from the queue after uploading
*/
removeAfterUpload: boolean;
/**
* A sequence number upload. Read only.
*/
index: number;
/**
* File upload progress percentage. Read only.
*/
progress: number;
/**
* File is ready to upload. Read only.
*/
isReady: boolean;
/**
* true if the file is being uploaded. Read only.
*/
isUploading: boolean;
/**
* true if the file was uploaded. Read only.
*/
isUploaded: boolean;
/**
* true if the file was uploaded successfully. Read only.
*/
isSuccess: boolean;
/**
* true if uploading was canceled. Read only.
*/
isCancel: boolean;
/**
* true if occurred error while file uploading. Read only.
*/
isError: boolean;
/**
* Reference to the parent Uploader object for this file. Read only.
*/
uploader: FileUploader;
// **Methods**
/**
* Cancels uploading of this file
*/
cancel(): void;
/**
* Remove this file from the queue
*/
remove(): void;
/**
* Upload this file
*/
upload(): void;
// **Callbacks**
/**
* Fires before uploading an item.
*/
onBeforeUpload(): void;
/**
* On file upload progress.
*/
onProgress(progress: number): void;
/**
* On file successfully uploaded
*/
onSuccess(response: Response, status: number, headers: Headers): void;
/**
* On upload error
*/
onError(response: Response, status: number, headers: Headers): void;
/**
* On cancel uploading
*/
onCancel(response: Response, status: number, headers: Headers): void;
/**
* On file upload complete (independently of the sucess of the operation)
*/
onComplete(response: Response, status: number, headers: Headers): void;
}
export type SyncFilter = (item: File | FileLikeObject, options?: object) => boolean;
export type AsyncFilter = (item: File | FileLikeObject, options: object | undefined, deferred: angular.IDeferred<any>) => void;
export interface Filter {
name: string;
fn: SyncFilter | AsyncFilter;
}
| mit |
loic425/Sylius | src/Sylius/Component/Product/Resolver/AvailableProductOptionValuesResolver.php | 1552 | <?php
/*
* This file is part of the Sylius package.
*
* (c) Paweł Jędrzejewski
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
declare(strict_types=1);
namespace Sylius\Component\Product\Resolver;
use Doctrine\Common\Collections\Collection;
use Sylius\Component\Product\Model\ProductInterface;
use Sylius\Component\Product\Model\ProductOptionInterface;
use Sylius\Component\Product\Model\ProductOptionValueInterface;
final class AvailableProductOptionValuesResolver implements AvailableProductOptionValuesResolverInterface
{
public function resolve(ProductInterface $product, ProductOptionInterface $productOption): Collection
{
if (!$product->hasOption($productOption)) {
throw new \InvalidArgumentException(
sprintf(
'Cannot resolve available product option values. Option "%s" does not belong to product "%s".',
$product->getCode(),
$productOption->getCode()
)
);
}
return $productOption->getValues()->filter(
static function (ProductOptionValueInterface $productOptionValue) use ($product) {
foreach ($product->getEnabledVariants() as $productVariant) {
if ($productVariant->hasOptionValue($productOptionValue)) {
return true;
}
}
return false;
}
);
}
}
| mit |
cez81/gitea | vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go | 2297 | package polyfill
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
// Polyfill is a helper that implements all missing method from billy.Filesystem.
type Polyfill struct {
billy.Basic
c capabilities
}
type capabilities struct{ tempfile, dir, symlink, chroot bool }
// New creates a new filesystem wrapping up 'fs' the intercepts all the calls
// made and errors if fs doesn't implement any of the billy interfaces.
func New(fs billy.Basic) billy.Filesystem {
if original, ok := fs.(billy.Filesystem); ok {
return original
}
h := &Polyfill{Basic: fs}
_, h.c.tempfile = h.Basic.(billy.TempFile)
_, h.c.dir = h.Basic.(billy.Dir)
_, h.c.symlink = h.Basic.(billy.Symlink)
_, h.c.chroot = h.Basic.(billy.Chroot)
return h
}
func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) {
if !h.c.tempfile {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.TempFile).TempFile(dir, prefix)
}
func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) {
if !h.c.dir {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.Dir).ReadDir(path)
}
func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error {
if !h.c.dir {
return billy.ErrNotSupported
}
return h.Basic.(billy.Dir).MkdirAll(filename, perm)
}
func (h *Polyfill) Symlink(target, link string) error {
if !h.c.symlink {
return billy.ErrNotSupported
}
return h.Basic.(billy.Symlink).Symlink(target, link)
}
func (h *Polyfill) Readlink(link string) (string, error) {
if !h.c.symlink {
return "", billy.ErrNotSupported
}
return h.Basic.(billy.Symlink).Readlink(link)
}
func (h *Polyfill) Lstat(path string) (os.FileInfo, error) {
if !h.c.symlink {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.Symlink).Lstat(path)
}
func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) {
if !h.c.chroot {
return nil, billy.ErrNotSupported
}
return h.Basic.(billy.Chroot).Chroot(path)
}
func (h *Polyfill) Root() string {
if !h.c.chroot {
return string(filepath.Separator)
}
return h.Basic.(billy.Chroot).Root()
}
func (h *Polyfill) Underlying() billy.Basic {
return h.Basic
}
// Capabilities implements the Capable interface.
func (h *Polyfill) Capabilities() billy.Capability {
return billy.Capabilities(h.Basic)
}
| mit |
hedgehogsbattle/First | README.md | 12 | First
=====
| mit |
lrosskamp/makealist-public | vendor/cache/ruby/2.3.0/gems/devise-4.3.0/test/support/webrat/integrations/rails.rb | 717 | require 'webrat/core/elements/form'
require 'action_dispatch/testing/integration'
module Webrat
Form.class_eval do
def self.parse_rails_request_params(params)
Rack::Utils.parse_nested_query(params)
end
end
module Logging
# Avoid RAILS_DEFAULT_LOGGER deprecation warning
def logger # :nodoc:
::Rails.logger
end
end
class RailsAdapter
protected
def do_request(http_method, url, data, headers)
update_protocol(url)
integration_session.send(http_method, normalize_url(url), params: data, headers: headers)
end
end
end
module ActionDispatch #:nodoc:
IntegrationTest.class_eval do
include Webrat::Methods
include Webrat::Matchers
end
end
| mit |
alphonsekurian/corefx | src/System.Reflection.Metadata/src/System/Reflection/Metadata/Internal/NamespaceCache.cs | 21688 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Diagnostics;
using System.Runtime.CompilerServices;
namespace System.Reflection.Metadata.Ecma335
{
internal class NamespaceCache
{
private readonly MetadataReader _metadataReader;
private readonly object _namespaceTableAndListLock = new object();
private Dictionary<NamespaceDefinitionHandle, NamespaceData> _namespaceTable;
private NamespaceData _rootNamespace;
private ImmutableArray<NamespaceDefinitionHandle> _namespaceList;
private uint _virtualNamespaceCounter;
internal NamespaceCache(MetadataReader reader)
{
Debug.Assert(reader != null);
_metadataReader = reader;
}
/// <summary>
/// Returns whether the namespaceTable has been created. If it hasn't, calling a GetXXX method
/// on this will probably have a very high amount of overhead.
/// </summary>
internal bool CacheIsRealized
{
get { return _namespaceTable != null; }
}
internal string GetFullName(NamespaceDefinitionHandle handle)
{
Debug.Assert(!handle.HasFullName); // we should not hit the cache in this case.
NamespaceData data = GetNamespaceData(handle);
return data.FullName;
}
internal NamespaceData GetRootNamespace()
{
EnsureNamespaceTableIsPopulated();
Debug.Assert(_rootNamespace != null);
return _rootNamespace;
}
internal NamespaceData GetNamespaceData(NamespaceDefinitionHandle handle)
{
EnsureNamespaceTableIsPopulated();
NamespaceData result;
if (!_namespaceTable.TryGetValue(handle, out result))
{
Throw.InvalidHandle();
}
return result;
}
/// <summary>
/// This will return a StringHandle for the simple name of a namespace name at the given segment index.
/// If no segment index is passed explicitly or the "segment" index is greater than or equal to the number
/// of segments, then the last segment is used. "Segment" in this context refers to part of a namespace
/// name between dots.
///
/// Example: Given a NamespaceDefinitionHandle to "System.Collections.Generic.Test" called 'handle':
///
/// reader.GetString(GetSimpleName(handle)) == "Test"
/// reader.GetString(GetSimpleName(handle, 0)) == "System"
/// reader.GetString(GetSimpleName(handle, 1)) == "Collections"
/// reader.GetString(GetSimpleName(handle, 2)) == "Generic"
/// reader.GetString(GetSimpleName(handle, 3)) == "Test"
/// reader.GetString(GetSimpleName(handle, 1000)) == "Test"
/// </summary>
private StringHandle GetSimpleName(NamespaceDefinitionHandle fullNamespaceHandle, int segmentIndex = Int32.MaxValue)
{
StringHandle handleContainingSegment = fullNamespaceHandle.GetFullName();
Debug.Assert(!handleContainingSegment.IsVirtual);
int lastFoundIndex = fullNamespaceHandle.GetHeapOffset() - 1;
int currentSegment = 0;
while (currentSegment < segmentIndex)
{
int currentIndex = _metadataReader.StringHeap.IndexOfRaw(lastFoundIndex + 1, '.');
if (currentIndex == -1)
{
break;
}
lastFoundIndex = currentIndex;
++currentSegment;
}
Debug.Assert(lastFoundIndex >= 0 || currentSegment == 0);
// + 1 because lastFoundIndex will either "point" to a '.', or will be -1. Either way,
// we want the next char.
int resultIndex = lastFoundIndex + 1;
return StringHandle.FromOffset(resultIndex).WithDotTermination();
}
/// <summary>
/// Two distinct namespace handles represent the same namespace if their full names are the same. This
/// method merges builders corresponding to such namespace handles.
/// </summary>
private void PopulateNamespaceTable()
{
lock (_namespaceTableAndListLock)
{
if (_namespaceTable != null)
{
return;
}
var namespaceBuilderTable = new Dictionary<NamespaceDefinitionHandle, NamespaceDataBuilder>();
// Make sure to add entry for root namespace. The root namespace is special in that even
// though it might not have types of its own it always has an equivalent representation
// as a nil handle and we don't want to handle it below as dot-terminated virtual namespace.
// We use NamespaceDefinitionHandle.FromIndexOfFullName(0) instead of default(NamespaceDefinitionHandle) so
// that we never hand back a handle to the user that doesn't have a typeid as that prevents
// round-trip conversion to Handle and back. (We may discover other handle aliases for the
// root namespace (any nil/empty string will do), but we need this one to always be there.
NamespaceDefinitionHandle rootNamespace = NamespaceDefinitionHandle.FromFullNameOffset(0);
namespaceBuilderTable.Add(
rootNamespace,
new NamespaceDataBuilder(
rootNamespace,
rootNamespace.GetFullName(),
String.Empty));
PopulateTableWithTypeDefinitions(namespaceBuilderTable);
PopulateTableWithExportedTypes(namespaceBuilderTable);
Dictionary<string, NamespaceDataBuilder> stringTable;
MergeDuplicateNamespaces(namespaceBuilderTable, out stringTable);
List<NamespaceDataBuilder> virtualNamespaces;
ResolveParentChildRelationships(stringTable, out virtualNamespaces);
var namespaceTable = new Dictionary<NamespaceDefinitionHandle, NamespaceData>();
foreach (var group in namespaceBuilderTable)
{
// Freeze() caches the result, so any many-to-one relationships
// between keys and values will be preserved and efficiently handled.
namespaceTable.Add(group.Key, group.Value.Freeze());
}
if (virtualNamespaces != null)
{
foreach (var virtualNamespace in virtualNamespaces)
{
namespaceTable.Add(virtualNamespace.Handle, virtualNamespace.Freeze());
}
}
_namespaceTable = namespaceTable;
_rootNamespace = namespaceTable[rootNamespace];
}
}
/// <summary>
/// This will take 'table' and merge all of the NamespaceData instances that point to the same
/// namespace. It has to create 'stringTable' as an intermediate dictionary, so it will hand it
/// back to the caller should the caller want to use it.
/// </summary>
private void MergeDuplicateNamespaces(Dictionary<NamespaceDefinitionHandle, NamespaceDataBuilder> table, out Dictionary<string, NamespaceDataBuilder> stringTable)
{
var namespaces = new Dictionary<string, NamespaceDataBuilder>();
List<KeyValuePair<NamespaceDefinitionHandle, NamespaceDataBuilder>> remaps = null;
foreach (var group in table)
{
NamespaceDataBuilder data = group.Value;
NamespaceDataBuilder existingRecord;
if (namespaces.TryGetValue(data.FullName, out existingRecord))
{
// Children should not exist until the next step.
Debug.Assert(data.Namespaces.Count == 0);
data.MergeInto(existingRecord);
if (remaps == null)
{
remaps = new List<KeyValuePair<NamespaceDefinitionHandle, NamespaceDataBuilder>>();
}
remaps.Add(new KeyValuePair<NamespaceDefinitionHandle, NamespaceDataBuilder>(group.Key, existingRecord));
}
else
{
namespaces.Add(data.FullName, data);
}
}
// Needs to be done outside of foreach (var group in table) to avoid modifying the dictionary while foreach'ing over it.
if (remaps != null)
{
foreach (var tuple in remaps)
{
table[tuple.Key] = tuple.Value;
}
}
stringTable = namespaces;
}
/// <summary>
/// Creates a NamespaceDataBuilder instance that contains a synthesized NamespaceDefinitionHandle,
/// as well as the name provided.
/// </summary>
private NamespaceDataBuilder SynthesizeNamespaceData(string fullName, NamespaceDefinitionHandle realChild)
{
Debug.Assert(realChild.HasFullName);
int numberOfSegments = 0;
foreach (char c in fullName)
{
if (c == '.')
{
numberOfSegments++;
}
}
StringHandle simpleName = GetSimpleName(realChild, numberOfSegments);
var namespaceHandle = NamespaceDefinitionHandle.FromVirtualIndex(++_virtualNamespaceCounter);
return new NamespaceDataBuilder(namespaceHandle, simpleName, fullName);
}
/// <summary>
/// Quick convenience method that handles linking together child + parent
/// </summary>
private void LinkChildDataToParentData(NamespaceDataBuilder child, NamespaceDataBuilder parent)
{
Debug.Assert(child != null && parent != null);
Debug.Assert(!child.Handle.IsNil);
child.Parent = parent.Handle;
parent.Namespaces.Add(child.Handle);
}
/// <summary>
/// Links a child to its parent namespace. If the parent namespace doesn't exist, this will create a
/// virtual one. This will automatically link any virtual namespaces it creates up to its parents.
/// </summary>
private void LinkChildToParentNamespace(Dictionary<string, NamespaceDataBuilder> existingNamespaces,
NamespaceDataBuilder realChild,
ref List<NamespaceDataBuilder> virtualNamespaces)
{
Debug.Assert(realChild.Handle.HasFullName);
string childName = realChild.FullName;
var child = realChild;
// The condition for this loop is very complex -- essentially, we keep going
// until we:
// A. Encounter the root namespace as 'child'
// B. Find a preexisting namespace as 'parent'
while (true)
{
int lastIndex = childName.LastIndexOf('.');
string parentName;
if (lastIndex == -1)
{
if (childName.Length == 0)
{
return;
}
else
{
parentName = String.Empty;
}
}
else
{
parentName = childName.Substring(0, lastIndex);
}
NamespaceDataBuilder parentData;
if (existingNamespaces.TryGetValue(parentName, out parentData))
{
LinkChildDataToParentData(child, parentData);
return;
}
if (virtualNamespaces != null)
{
foreach (var data in virtualNamespaces)
{
if (data.FullName == parentName)
{
LinkChildDataToParentData(child, data);
return;
}
}
}
else
{
virtualNamespaces = new List<NamespaceDataBuilder>();
}
var virtualParent = SynthesizeNamespaceData(parentName, realChild.Handle);
LinkChildDataToParentData(child, virtualParent);
virtualNamespaces.Add(virtualParent);
childName = virtualParent.FullName;
child = virtualParent;
}
}
/// <summary>
/// This will link all parents/children in the given namespaces dictionary up to each other.
///
/// In some cases, we need to synthesize namespaces that do not have any type definitions or forwarders
/// of their own, but do have child namespaces. These are returned via the virtualNamespaces out
/// parameter.
/// </summary>
private void ResolveParentChildRelationships(Dictionary<string, NamespaceDataBuilder> namespaces, out List<NamespaceDataBuilder> virtualNamespaces)
{
virtualNamespaces = null;
foreach (var namespaceData in namespaces.Values)
{
LinkChildToParentNamespace(namespaces, namespaceData, ref virtualNamespaces);
}
}
/// <summary>
/// Loops through all type definitions in metadata, adding them to the given table
/// </summary>
private void PopulateTableWithTypeDefinitions(Dictionary<NamespaceDefinitionHandle, NamespaceDataBuilder> table)
{
Debug.Assert(table != null);
foreach (var typeHandle in _metadataReader.TypeDefinitions)
{
TypeDefinition type = _metadataReader.GetTypeDefinition(typeHandle);
if (type.Attributes.IsNested())
{
continue;
}
NamespaceDefinitionHandle namespaceHandle = _metadataReader.TypeDefTable.GetNamespaceDefinition(typeHandle);
NamespaceDataBuilder builder;
if (table.TryGetValue(namespaceHandle, out builder))
{
builder.TypeDefinitions.Add(typeHandle);
}
else
{
StringHandle name = GetSimpleName(namespaceHandle);
string fullName = _metadataReader.GetString(namespaceHandle);
var newData = new NamespaceDataBuilder(namespaceHandle, name, fullName);
newData.TypeDefinitions.Add(typeHandle);
table.Add(namespaceHandle, newData);
}
}
}
/// <summary>
/// Loops through all type forwarders in metadata, adding them to the given table
/// </summary>
private void PopulateTableWithExportedTypes(Dictionary<NamespaceDefinitionHandle, NamespaceDataBuilder> table)
{
Debug.Assert(table != null);
foreach (var exportedTypeHandle in _metadataReader.ExportedTypes)
{
ExportedType exportedType = _metadataReader.GetExportedType(exportedTypeHandle);
if (exportedType.Implementation.Kind == HandleKind.ExportedType)
{
continue; // skip nested exported types.
}
NamespaceDefinitionHandle namespaceHandle = exportedType.NamespaceDefinition;
NamespaceDataBuilder builder;
if (table.TryGetValue(namespaceHandle, out builder))
{
builder.ExportedTypes.Add(exportedTypeHandle);
}
else
{
Debug.Assert(namespaceHandle.HasFullName);
StringHandle simpleName = GetSimpleName(namespaceHandle);
string fullName = _metadataReader.GetString(namespaceHandle);
var newData = new NamespaceDataBuilder(namespaceHandle, simpleName, fullName);
newData.ExportedTypes.Add(exportedTypeHandle);
table.Add(namespaceHandle, newData);
}
}
}
/// <summary>
/// Populates namespaceList with distinct namespaces. No ordering is guaranteed.
/// </summary>
private void PopulateNamespaceList()
{
lock (_namespaceTableAndListLock)
{
if (_namespaceList != null)
{
return;
}
Debug.Assert(_namespaceTable != null);
var namespaceNameSet = new HashSet<string>();
var namespaceListBuilder = ImmutableArray.CreateBuilder<NamespaceDefinitionHandle>();
foreach (var group in _namespaceTable)
{
var data = group.Value;
if (namespaceNameSet.Add(data.FullName))
{
namespaceListBuilder.Add(group.Key);
}
}
_namespaceList = namespaceListBuilder.ToImmutable();
}
}
/// <summary>
/// If the namespace table doesn't exist, populates it!
/// </summary>
private void EnsureNamespaceTableIsPopulated()
{
// PERF: Branch will rarely be taken; do work in PopulateNamespaceList() so this can be inlined easily.
if (_namespaceTable == null)
{
PopulateNamespaceTable();
}
Debug.Assert(_namespaceTable != null);
}
/// <summary>
/// If the namespace list doesn't exist, populates it!
/// </summary>
private void EnsureNamespaceListIsPopulated()
{
if (_namespaceList == null)
{
PopulateNamespaceList();
}
Debug.Assert(_namespaceList != null);
}
/// <summary>
/// An intermediate class used to build NamespaceData instances. This was created because we wanted to
/// use ImmutableArrays in NamespaceData, but having ArrayBuilders and ImmutableArrays that served the
/// same purpose in NamespaceData got ugly. With the current design of how we create our Namespace
/// dictionary, this needs to be a class because we have a many-to-one mapping between NamespaceHandles
/// and NamespaceData. So, the pointer semantics must be preserved.
///
/// This class assumes that the builders will not be modified in any way after the first call to
/// Freeze().
/// </summary>
private class NamespaceDataBuilder
{
public readonly NamespaceDefinitionHandle Handle;
public readonly StringHandle Name;
public readonly string FullName;
public NamespaceDefinitionHandle Parent;
public ImmutableArray<NamespaceDefinitionHandle>.Builder Namespaces;
public ImmutableArray<TypeDefinitionHandle>.Builder TypeDefinitions;
public ImmutableArray<ExportedTypeHandle>.Builder ExportedTypes;
private NamespaceData _frozen;
public NamespaceDataBuilder(NamespaceDefinitionHandle handle, StringHandle name, string fullName)
{
Handle = handle;
Name = name;
FullName = fullName;
Namespaces = ImmutableArray.CreateBuilder<NamespaceDefinitionHandle>();
TypeDefinitions = ImmutableArray.CreateBuilder<TypeDefinitionHandle>();
ExportedTypes = ImmutableArray.CreateBuilder<ExportedTypeHandle>();
}
/// <summary>
/// Returns a NamespaceData that represents this NamespaceDataBuilder instance. After calling
/// this method, it is an error to use any methods or fields except Freeze() on the target
/// NamespaceDataBuilder.
/// </summary>
public NamespaceData Freeze()
{
// It is not an error to call this function multiple times. We cache the result
// because it's immutable.
if (_frozen == null)
{
var namespaces = Namespaces.ToImmutable();
Namespaces = null;
var typeDefinitions = TypeDefinitions.ToImmutable();
TypeDefinitions = null;
var exportedTypes = ExportedTypes.ToImmutable();
ExportedTypes = null;
_frozen = new NamespaceData(Name, FullName, Parent, namespaces, typeDefinitions, exportedTypes);
}
return _frozen;
}
public void MergeInto(NamespaceDataBuilder other)
{
Parent = default(NamespaceDefinitionHandle);
other.Namespaces.AddRange(this.Namespaces);
other.TypeDefinitions.AddRange(this.TypeDefinitions);
other.ExportedTypes.AddRange(this.ExportedTypes);
}
}
}
}
| mit |
ericleigh007/azure-stream-analytics | Samples/SensorDataAnalytics/SourceCode/Archived/PowerBIRestDemo-master/PowerBIRestDemo/Results/ChallengeResult.cs | 964 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
using System.Web.Http;
namespace PowerBIRestDemo.Results
{
public class ChallengeResult : IHttpActionResult
{
public ChallengeResult(string loginProvider, ApiController controller)
{
LoginProvider = loginProvider;
Request = controller.Request;
}
public string LoginProvider { get; set; }
public HttpRequestMessage Request { get; set; }
public Task<HttpResponseMessage> ExecuteAsync(CancellationToken cancellationToken)
{
Request.GetOwinContext().Authentication.Challenge(LoginProvider);
HttpResponseMessage response = new HttpResponseMessage(HttpStatusCode.Unauthorized);
response.RequestMessage = Request;
return Task.FromResult(response);
}
}
}
| mit |
vishin-pavel/quick-app | www/protected/extensions/YiiMongoDbSuite/examples/MongoImage.php | 1364 | <?php
/**
* EMongoGridFS.php
*
* PHP version 5.2+
*
* @author Jose Martinez <[email protected]>
* @author Philippe Gaultier <[email protected]>
* @copyright 2010 Ibitux
* @license http://www.yiiframework.com/license/ BSD license
* @version SVN: $Revision: $
* @category ext
* @package ext.YiiMongoDbSuite
*/
/**
* EMongoGridFS
*
* Authorization management, dispatches actions and views on the system
*
* @author Jose Martinez <[email protected]>
* @author Philippe Gaultier <[email protected]>
* @copyright 2010 Ibitux
* @license http://www.yiiframework.com/license/ BSD license
* @version SVN: $Revision: $
* @category ext
* @package ext.YiiMongoDbSuite
*
*/
class MongoImage extends EMongoGridFS
{
public $metadata;
/**
* this is similar to the get tableName() method. this returns tha name of the
* document for this class. this should be in all lowercase.
*/
public function getCollectionName()
{
return 'images';
}
/**
* Returns the static model of the specified AR class.
*
* @param string $className class name
*
* @return CompaniesDb the static model class
*/
public static function model($className=__CLASS__)
{
return parent::model($className);
}
public function rules()
{
return array(
array('filename, metadata','safe'),
array('filename','required'),
);
}
} | mit |
shyamalschandra/Mocha.jl | src/cuda/cudnn.jl | 21485 | export CuDNN
module CuDNN
using ..CUDA
# cudnnStatus_t
const CUDNN_STATUS_SUCCESS = 0
const CUDNN_STATUS_NOT_INITIALIZED = 1
const CUDNN_STATUS_ALLOC_FAILED = 2
const CUDNN_STATUS_BAD_PARAM = 3
const CUDNN_STATUS_INTERNAL_ERROR = 4
const CUDNN_STATUS_INVALID_VALUE = 5
const CUDNN_STATUS_ARCH_MISMATCH = 6
const CUDNN_STATUS_MAPPING_ERROR = 7
const CUDNN_STATUS_EXECUTION_FAILED = 8
const CUDNN_STATUS_NOT_SUPPORTED = 9
const CUDNN_STATUS_LICENSE_ERROR = 10
immutable CuDNNError <: Exception
code :: Int
end
using Compat
const cudnn_error_description = @compat(Dict(
CUDNN_STATUS_SUCCESS => "Success",
CUDNN_STATUS_NOT_INITIALIZED => "Not initialized",
CUDNN_STATUS_ALLOC_FAILED => "Alloc failed",
CUDNN_STATUS_BAD_PARAM => "Bad param",
CUDNN_STATUS_INTERNAL_ERROR => "Internal error",
CUDNN_STATUS_INVALID_VALUE => "Invalid value",
CUDNN_STATUS_ARCH_MISMATCH => "Arch mismatch",
CUDNN_STATUS_MAPPING_ERROR => "Mapping error",
CUDNN_STATUS_EXECUTION_FAILED => "Execution failed",
CUDNN_STATUS_NOT_SUPPORTED => "Not supported",
CUDNN_STATUS_LICENSE_ERROR => "License error"
))
import Base.show
show(io::IO, error::CuDNNError) = print(io, cudnn_error_description[error.code])
macro cudnncall(fv, argtypes, args...)
f = eval(fv)
quote
_curet = ccall( ($(Meta.quot(f)), "libcudnn"), Cint, $argtypes, $(args...) )
if round(Int64, _curet) != CUDNN_STATUS_SUCCESS
throw(CuDNNError(round(Int64, _curet)))
end
end
end
typealias Handle Ptr{Void}
typealias StreamHandle Ptr{Void}
function create()
handle = Handle[0]
@cudnncall(:cudnnCreate, (Ptr{Handle},), handle)
return handle[1]
end
function destroy(handle :: Handle)
@cudnncall(:cudnnDestroy, (Handle,), handle)
end
function set_stream(handle::Handle, stream::StreamHandle)
@cudnncall(:cudnnSetStream, (Handle, StreamHandle), handle, stream)
end
function get_stream(handle::Handle)
s_handle = StreamHandle[0]
@cudnncall(:cudnnGetStream, (Handle, Ptr{StreamHandle}), handle, s_handle)
return s_handle[1]
end
# Data structures to represent Image/Filter and the Neural Network Layer
typealias Tensor4dDescriptor Ptr{Void}
typealias ConvolutionDescriptor Ptr{Void}
typealias PoolingDescriptor Ptr{Void}
typealias FilterDescriptor Ptr{Void}
const CUDNN_DATA_FLOAT = 0
const CUDNN_DATA_DOUBLE = 1
function cudnn_data_type{T<:FloatingPoint}(dtype::Type{T})
if dtype == Float32
return CUDNN_DATA_FLOAT
elseif dtype == Float64
return CUDNN_DATA_DOUBLE
else
error("Unsupported data type $(dtype)")
end
end
function cudnn_data_type(dtype :: Cint)
if dtype == CUDNN_DATA_FLOAT
return Float32
elseif dtype == CUDNN_DATA_DOUBLE
return Float64
else
error("Unknown CuDNN data code: $(dtype)")
end
end
const CUDNN_TENSOR_NCHW = 0 # row major (wStride = 1, hStride = w)
const CUDNN_TENSOR_NHWC = 1 # feature maps interleaved ( cStride = 1 )
function create_tensor4d_descriptor()
desc = Tensor4dDescriptor[0]
@cudnncall(:cudnnCreateTensorDescriptor, (Tensor4dDescriptor,), desc)
return desc[1]
end
function set_tensor4d_descriptor{T<:FloatingPoint}(desc::Tensor4dDescriptor, dtype::Type{T}, dims :: NTuple{4, Int})
w,h,c,n = dims
@cudnncall(:cudnnSetTensor4dDescriptor, (Tensor4dDescriptor, Cint, Cint, Cint, Cint, Cint, Cint),
desc, CUDNN_TENSOR_NCHW, cudnn_data_type(dtype), n, c, h, w)
end
function set_tensor4d_descriptor{T<:FloatingPoint}(desc::Tensor4dDescriptor, dtype::Type{T},
dims :: NTuple{4, Int}, stride :: NTuple{4, Int})
w, h, c, n = dims
wStride, hStride, cStride, nStride = stride
@cudnncall(:cudnnSetTensor4dDescriptorEx, (Tensor4dDescriptor, Cint, Cint, Cint, Cint, Cint, Cint, Cint, Cint, Cint),
desc, cudnn_data_type(dtype), n,c,h,w,nStride,cStride,hStride,wStride)
end
function create_tensor4d_descriptor(dtype::Type, dims :: NTuple{4, Int})
desc = create_tensor4d_descriptor()
set_tensor4d_descriptor(desc, dtype, dims)
return desc
end
function create_tensor4d_descriptor(dtype::Type, dims :: NTuple{4, Int}, stride :: NTuple{4, Int})
desc = create_tensor4d_descriptor()
set_tensor4d_descriptor(desc, dtype, dims, stride)
return desc
end
function get_tensor4d_descriptor(desc::Tensor4dDescriptor)
dtype = Cint[0]
n = Cint[0]; c = Cint[0]; h = Cint[0]; w = Cint[0]
nStride = Cint[0]; cStride = Cint[0]; hStride = Cint[0]; wStride = Cint[0]
@cudnncall(:cudnnGetTensor4dDescriptor, (Tensor4dDescriptor, Ptr{Cint}, Ptr{Cint}, Ptr{Cint}, Ptr{Cint}, Ptr{Cint},
Ptr{Cint}, Ptr{Cint}, Ptr{Cint}, Ptr{Cint}), desc, dtype, n,c,h,w,
nStride, cStride, hStride, wStride)
return (cudnn_data_type(dtype[1]), (w[1],h[1],c[1],n[1]), (wStride[1],hStride[1],cStride[1],nStride[1]))
end
function destroy_tensor4d_descriptor(desc :: Tensor4dDescriptor)
@cudnncall(:cudnnDestroyTensorDescriptor, (Tensor4dDescriptor,), desc)
end
function transform_tensor4d(handle::Handle, src_desc::Tensor4dDescriptor, src::CuPtr, dest_desc::Tensor4dDescriptor, dest::CuPtr)
@cudnncall(:cudnnTransformTensor4d, (Handle, Tensor4dDescriptor, Ptr{Void}, Tensor4dDescriptor, Ptr{Void}),
handle, src_desc, src.p, dest_desc, dest.p)
end
# Tensor bias addition mode
const CUDNN_ADD_IMAGE = 0 # add one image to every feature maps of each input
const CUDNN_ADD_SAME_HW = 0
const CUDNN_ADD_FEATURE_MAP = 1 # add a set of feature maps to a batch of inputs : tensorBias has n=1 , same nb feature than Src/dest
const CUDNN_ADD_SAME_CHW = 1
const CUDNN_ADD_SAME_C = 2 # add a tensor of size 1,c,1,1 to every corresponding point of n,c,h,w input
const CUDNN_ADD_FULL_TENSOR = 3 # add 2 tensors with same n,c,h,w
function add_tensor4d{T<:FloatingPoint}(handle::Handle, mode::Int, alpha::T,
bias_desc::Tensor4dDescriptor, bias::CuPtr,
beta::T,
srcdst_desc::Tensor4dDescriptor, srcdst::CuPtr)
@assert CUDNN_ADD_IMAGE <= mode <= CUDNN_ADD_FULL_TENSOR
@assert typeof(alpha) == get_tensor4d_descriptor(bias_desc)[1]
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnAddTensor, (Handle, Cint, Ptr{Void}, Tensor4dDescriptor, Ptr{Void}, Ptr{Void}, Tensor4dDescriptor, Ptr{Void}),
handle, mode, alpha_ptr, bias_desc, bias.p, beta_ptr, srcdst_desc, srcdst.p)
end
function set_tensor4d{T<:FloatingPoint}(handle::Handle, desc::Tensor4dDescriptor, data::CuPtr, val::T)
@assert typeof(val) == get_tensor4d_descriptor(desc)[0]
val_ptr = T[val]
@cudnncall(:cudnnSetTensor4d, (Handle, Tensor4dDescriptor, Ptr{Void}, Ptr{Void}),
handle, desc, data.p, val_ptr)
end
# Convolution Mode
const CUDNN_CONVOLUTION = 0
const CUDNN_CROSS_CORRELATION = 1
# Convolution Path
const CUDNN_CONVOLUTION_FWD = 0 # Tensor Convolution function
const CUDNN_CONVOLUTION_WEIGHT_GRAD = 1 # Weight Gradient update function
const CUDNN_CONVOLUTION_DATA_GRAD = 2 # Data Gradient update function
function create_filter_descriptor()
desc = FilterDescriptor[0]
@cudnncall(:cudnnCreateFilterDescriptor, (Ptr{FilterDescriptor},), desc)
return desc[1]
end
function set_filter_descriptor{T<:FloatingPoint}(desc::FilterDescriptor, dtype::Type{T}, dims :: NTuple{4, Int})
w,h,c,k = dims
@cudnncall(:cudnnSetFilter4dDescriptor, (FilterDescriptor, Cint, Cint, Cint, Cint, Cint),
desc, cudnn_data_type(dtype), k, c, h, w)
end
function create_filter_descriptor(dtype::Type, dims :: NTuple{4, Int})
desc = create_filter_descriptor()
set_filter_descriptor(desc, dtype, dims)
return desc
end
function get_filter_descriptor(desc::FilterDescriptor)
k = Cint[0]; c = Cint[0]; h = Cint[0]; w = Cint[0]
dtype = Cint[0]
@cudnncall(:cudnnGetFilterDescriptor, (FilterDescriptor,Ptr{Cint},Ptr{Cint},Ptr{Cint},Ptr{Cint},Ptr{Cint}),
desc, dtype, k, c, h, w)
return (cudnn_data_type(dtype[1]), w[1], h[1], c[1], k[1])
end
function destroy_filter_descriptor(desc::FilterDescriptor)
@cudnncall(:cudnnDestroyFilterDescriptor, (FilterDescriptor,), desc)
end
function create_convolution_descriptor()
desc = ConvolutionDescriptor[0]
@cudnncall(:cudnnCreateConvolutionDescriptor, (Ptr{ConvolutionDescriptor},), desc)
return desc[1]
end
function set_convolution_descriptor(desc::ConvolutionDescriptor, input_desc::Tensor4dDescriptor,
filter_desc::FilterDescriptor, pad::NTuple{2, Int}, stride::NTuple{2, Int}, upscale::NTuple{2, Int},
conv_mode :: Int)
@assert CUDNN_CONVOLUTION <= conv_mode <= CUDNN_CROSS_CORRELATION
pad_w, pad_h = pad
v, u = stride
upscalex, upscaley = upscale
@cudnncall(:cudnnSetConvolution2dDescriptor, (ConvolutionDescriptor, Cint, Cint, Cint, Cint,
Cint, Cint, Cint),
desc, pad_h, pad_w, u, v, upscalex, upscaley, conv_mode)
end
function create_convolution_descriptor(input_desc::Tensor4dDescriptor,
filter_desc::FilterDescriptor, pad::NTuple{2, Int}, stride::NTuple{2, Int}, upscale::NTuple{2, Int},
conv_mode :: Int)
desc = create_convolution_descriptor()
set_convolution_descriptor(desc, input_desc, filter_desc, pad, stride, upscale, conv_mode)
return desc
end
function set_convolution_descriptor_ex(desc::ConvolutionDescriptor, dims::NTuple{4, Int},
n_filter::Int, kernel_wh::NTuple{2, Int}, pad::NTuple{2, Int}, stride::NTuple{2, Int},
upscale::NTuple{2, Int}, conv_mode::Int)
@assert CUDNN_CONVOLUTION <= conv_mode <= CUDNN_CROSS_CORRELATION
w,h,c,n = dims
s,r = kernel_wh
pad_w, pad_h = pad
v,u = stride
upscalex, upscaley = upscale
@cudnncall(:cudnnSetConvolutionDescriptorEx, (ConvolutionDescriptor, Cint,Cint,Cint,Cint,
Cint,Cint,Cint, Cint,Cint, Cint,Cint, Cint,Cint, Cint),
desc, n,c,h,w, n_filter, r,s, pad_h,pad_w, u,v, upscalex,upscaley, conv_mode)
end
function destroy_convolution_descriptor(desc::ConvolutionDescriptor)
@cudnncall(:cudnnDestroyConvolutionDescriptor, (ConvolutionDescriptor,), desc)
end
function get_output_tensor4d_dim(desc::ConvolutionDescriptor, path::Int)
@assert CUDNN_CONVOLUTION_FWD <= path <= CUDNN_CONVOLUTION_DATA_GRAD
n = Cint[0]; c = Cint[0]; h = Cint[0]; w = Cint[0]
@cudnncall(:cudnnGetOutputTensor4dDim, (ConvolutionDescriptor, Cint, Ptr{Void}, Ptr{Void}, Ptr{Void}, Ptr{Void}),
desc, path, n, c, h, w)
return (w[1], h[1], c[1], n[1])
end
const CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0
const CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMPT_GEMM = 1
const CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2
const CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3
const CUDNN_CONVOLUTION_FWD_NO_WORKSPACE = 0
const CUDNN_CONVOLUTION_FWD_PREFER_FASTEST = 1
const CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT = 2
function get_convolution_forward_algorithm(handle::Handle, src_desc::Tensor4dDescriptor,
filter_desc::FilterDescriptor, conv_desc::ConvolutionDescriptor, dest_desc::Tensor4dDescriptor,
preference::Int, mem_limit_bytes::Int)
@assert CUDNN_CONVOLUTION_FWD_NO_WORKSPACE <= preference <= CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
algor = Int[0]
@cudnncall(:cudnnGetConvolutionForwardAlgorithm, (Handle, Tensor4dDescriptor, FilterDescriptor,
ConvolutionDescriptor, Tensor4dDescriptor, Int,
Csize_t, Ptr{Int}),
handle, src_desc, filter_desc, conv_desc, dest_desc, preference, mem_limit_bytes, algor)
algor = algor[1]
@assert CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM <= algor <= CUDNN_CONVOLUTION_FWD_ALGO_DIRECT
return algor
end
function get_convolution_forward_workspace_size(handle::Handle, src_desc::Tensor4dDescriptor,
filter_desc::FilterDescriptor, conv_desc::ConvolutionDescriptor, dest_desc::Tensor4dDescriptor,
algor::Int)
@assert CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM <= algor <= CUDNN_CONVOLUTION_FWD_ALGO_DIRECT
ws_size = Csize_t[0]
@cudnncall(:cudnnGetConvolutionForwardWorkspaceSize, (Handle, Tensor4dDescriptor, FilterDescriptor,
ConvolutionDescriptor, Tensor4dDescriptor, Int, Ptr{Csize_t}),
handle, src_desc, filter_desc, conv_desc, dest_desc, algor, ws_size)
ws_size = ws_size[1]
return ws_size
end
function convolution_forward{T<:FloatingPoint}(handle::Handle, alpha::T, src_desc::Tensor4dDescriptor, src::CuPtr,
filter_desc::FilterDescriptor, filter::CuPtr, conv::ConvolutionDescriptor,
dest_desc::Tensor4dDescriptor, dest::CuPtr, workspace::CuPtr, workspace_size, algo::Int,
beta::T)
#no workspace needed since we will use CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@assert CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM <= algo <= CUDNN_CONVOLUTION_FWD_ALGO_DIRECT
@cudnncall(:cudnnConvolutionForward, (Handle, Ptr{Void}, Tensor4dDescriptor, Ptr{Void},
FilterDescriptor, Ptr{Void}, ConvolutionDescriptor,
Ptr{Void}, Ptr{Void}, Csize_t, Ptr{Void},
Tensor4dDescriptor, Ptr{Void}),
handle, alpha_ptr, src_desc, src.p,
filter_desc, filter.p, conv,
algo, workspace.p, workspace_size, beta_ptr,
dest_desc, dest.p)
end
function convolution_backward_bias{T<:FloatingPoint}(handle::Handle, alpha::T, src_desc::Tensor4dDescriptor, src::CuPtr,
beta::T, dest_desc::Tensor4dDescriptor, dest::CuPtr)
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnConvolutionBackwardBias, (Handle, Ptr{Void}, Tensor4dDescriptor, Ptr{Void},
Ptr{Void}, Tensor4dDescriptor, Ptr{Void}), handle, alpha_ptr, src_desc, src.p, beta_ptr, dest_desc, dest.p)
end
function convolution_backward_filter{T<:FloatingPoint}(handle::Handle, alpha::T, src_desc::Tensor4dDescriptor, src::CuPtr,
diff_desc::Tensor4dDescriptor, diff::CuPtr, conv::ConvolutionDescriptor,
beta::T, grad_desc::FilterDescriptor, grad::CuPtr)
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnConvolutionBackwardFilter, (Handle, Ptr{Void}, Tensor4dDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void},
ConvolutionDescriptor,
Ptr{Void}, FilterDescriptor, Ptr{Void}),
handle, alpha_ptr, src_desc, src.p, diff_desc, diff.p, conv, beta_ptr, grad_desc, grad.p)
end
function convolution_backward_data{T<:FloatingPoint}(handle::Handle, alpha::T, filter_desc::FilterDescriptor, filter::CuPtr,
diff_desc::Tensor4dDescriptor, diff::CuPtr, conv::ConvolutionDescriptor,
beta::T, grad_desc::Tensor4dDescriptor, grad::CuPtr)
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnConvolutionBackwardData, (Handle, Ptr{Void}, FilterDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void},
ConvolutionDescriptor,
Ptr{Void},Tensor4dDescriptor,
Ptr{Void}),
handle, alpha_ptr, filter_desc, filter.p, diff_desc, diff.p, conv, beta_ptr, grad_desc, grad.p)
end
const CUDNN_SOFTMAX_FAST = 0 # straightforward implementation
const CUDNN_SOFTMAX_ACCURATE = 1 # subtract max from every point to avoid overflow
const CUDNN_SOFTMAX_MODE_INSTANCE = 0 # compute the softmax over all C, H, W for each N
const CUDNN_SOFTMAX_MODE_CHANNEL = 1 # compute the softmax over all C for each H, W, N
function softmax_forward{T<:FloatingPoint}(handle::Handle, algorithm::Int, mode::Int,
alpha::T, src_desc::Tensor4dDescriptor, src::CuPtr, beta::T, dest_desc::Tensor4dDescriptor, dest::CuPtr)
@assert CUDNN_SOFTMAX_FAST <= algorithm <= CUDNN_SOFTMAX_ACCURATE
@assert CUDNN_SOFTMAX_MODE_INSTANCE <= mode <= CUDNN_SOFTMAX_MODE_CHANNEL
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnSoftmaxForward, (Handle, Cint, Cint, Ptr{Void}, Tensor4dDescriptor, Ptr{Void},
Ptr{Void}, Tensor4dDescriptor, Ptr{Void}),
handle, algorithm, mode, alpha_ptr, src_desc, src.p, beta_ptr, dest_desc, dest.p)
end
function softmax_backward{T<:FloatingPoint}(handle::Handle, algorithm::Int, mode::Int,
alpha::T, src_desc::Tensor4dDescriptor, src::CuPtr, srcdiff_desc::Tensor4dDescriptor, srcdiff::CuPtr,
beta::T, destdiff_desc::Tensor4dDescriptor, destdiff::CuPtr)
@assert CUDNN_SOFTMAX_FAST <= algorithm <= CUDNN_SOFTMAX_ACCURATE
@assert CUDNN_SOFTMAX_MODE_INSTANCE <= mode <= CUDNN_SOFTMAX_MODE_CHANNEL
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnSoftmaxBackward, (Handle, Cint, Cint, Ptr{Void}, Tensor4dDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void}, Ptr{Void}, Tensor4dDescriptor, Ptr{Void}),
handle, algorithm, mode, alpha_ptr, src_desc, src.p, srcdiff_desc, srcdiff.p,
beta_ptr, destdiff_desc, destdiff.p)
end
const CUDNN_POOLING_MAX = 0
const CUDNN_POOLING_AVERAGE = 1
function create_pooling_descriptor()
desc = PoolingDescriptor[0]
@cudnncall(:cudnnCreatePoolingDescriptor, (Ptr{PoolingDescriptor},), desc)
return desc[1]
end
function set_pooling_descriptor(desc::PoolingDescriptor, mode::Int, dims::NTuple{2, Int}, stride::NTuple{2, Int}, padding::NTuple{2, Int})
@assert CUDNN_POOLING_MAX <= mode <= CUDNN_POOLING_AVERAGE
w,h = dims
pad_w, pad_h = padding
stride_w, stride_h = stride
@cudnncall(:cudnnSetPooling2dDescriptor, (PoolingDescriptor, Cint, Cint,Cint, Cint,Cint, Cint,Cint),
desc, mode, h,w, pad_w, pad_h, stride_h, stride_w)
end
function create_pooling_descriptor(mode::Int, dims::NTuple{2,Int}, stride::NTuple{2,Int}, padding::NTuple{2,Int})
desc = create_pooling_descriptor()
set_pooling_descriptor(desc, mode, dims, stride, padding)
return desc
end
function get_pooling_descriptor(desc::PoolingDescriptor)
mode = Cint[0]
h = Cint[0]; w = Cint[0]; stride_h = Cint[0]; stride_w = Cint[0]
@cudnncall(:cudnGetPoolingDescriptor, (PoolingDescriptor, Ptr{Cint}, Ptr{Cint}, Ptr{Cint},
Ptr{Cint}, Ptr{Cint}),
desc, mode, h, w, stride_h, stride_w)
return (mode[1], (w,h), (stride_w, stride_h))
end
function destroy_pooling_descriotpr(desc::PoolingDescriptor)
@cudnncall(:cudnnDestroyPoolingDescriptor, (PoolingDescriptor,), desc)
end
function pooling_forward{T<:FloatingPoint}(handle::Handle, pooling::PoolingDescriptor, alpha::T,
src_desc::Tensor4dDescriptor, src::CuPtr, beta::T,
dest_desc::Tensor4dDescriptor, dest::CuPtr)
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnPoolingForward, (Handle, PoolingDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void}, Ptr{Void},
Tensor4dDescriptor, Ptr{Void}),
handle, pooling, alpha_ptr,
src_desc, src.p, beta_ptr,
dest_desc, dest.p)
end
function pooling_backward{T<:FloatingPoint}(handle::Handle, pooling::PoolingDescriptor, alpha::T,
src_desc::Tensor4dDescriptor, src::CuPtr, srcdiff_desc::Tensor4dDescriptor, srcdiff::CuPtr,
dest_desc::Tensor4dDescriptor, dest::CuPtr, beta::T, destdiff_desc::Tensor4dDescriptor, destdiff::CuPtr)
alpha_ptr = T[alpha]
beta_ptr = T[beta]
@cudnncall(:cudnnPoolingBackward, (Handle, PoolingDescriptor, Ptr{Void}, Tensor4dDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void},Tensor4dDescriptor, Ptr{Void},
Ptr{Void}, Tensor4dDescriptor, Ptr{Void}),
handle, pooling, alpha_ptr, src_desc, src.p, srcdiff_desc, srcdiff.p,
dest_desc, dest.p, beta_ptr, destdiff_desc, destdiff.p)
end
const CUDNN_ACTIVATION_SIGMOID = 0
const CUDNN_ACTIVATION_RELU = 1
const CUDNN_ACTIVATION_TANH = 2
function activation_forward(handle::Handle, mode::Int, src_desc::Tensor4dDescriptor, src::CuPtr,
dest_desc::Tensor4dDescriptor, dest::CuPtr)
@assert CUDNN_ACTIVATION_SIGMOID <= mode <+ CUDNN_ACTIVATION_TANH
@cudnncall(:cudnnActivationForward, (Handle, Cint, Tensor4dDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void}),
handle, mode, src_desc, src.p, dest_desc, dest.p)
end
function activation_backward(handle::Handle, mode::Int,
src_desc::Tensor4dDescriptor, src::CuPtr, srcdiff_desc::Tensor4dDescriptor, srcdiff::CuPtr,
dest_desc::Tensor4dDescriptor, dest::CuPtr, destdiff_desc::Tensor4dDescriptor, destdiff::CuPtr)
@assert CUDNN_ACTIVATION_SIGMOID <= mode <+ CUDNN_ACTIVATION_TANH
@cudnncall(:cudnnActivationBackward, (Handle, Cint, Tensor4dDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void},Tensor4dDescriptor, Ptr{Void},
Tensor4dDescriptor, Ptr{Void}),
handle, mode, src_desc, src.p, srcdiff_desc, srcdiff.p,
dest_desc, dest.p, destdiff_desc, destdiff.p)
end
end # module
| mit |
xvadur01/BPIS | vendor/nette/reflection/src/Reflection/Property.php | 2431 | <?php
/**
* This file is part of the Nette Framework (http://nette.org)
* Copyright (c) 2004 David Grudl (http://davidgrudl.com)
*/
namespace Nette\Reflection;
use Nette,
Nette\Utils\ObjectMixin;
/**
* Reports information about a classes variable.
*
* @author David Grudl
* @property-read ClassType $declaringClass
* @property-read IAnnotation[][] $annotations
* @property-read string $description
* @property-read string $name
* @property mixed $value
* @property-read bool $public
* @property-read bool $private
* @property-read bool $protected
* @property-read bool $static
* @property-read bool $default
* @property-read int $modifiers
* @property-read string $docComment
* @property-write bool $accessible
*/
class Property extends \ReflectionProperty
{
public function __toString()
{
return parent::getDeclaringClass()->getName() . '::$' . $this->getName();
}
/********************* Reflection layer ****************d*g**/
/**
* @return ClassType
*/
public function getDeclaringClass()
{
return new ClassType(parent::getDeclaringClass()->getName());
}
/********************* Nette\Annotations support ****************d*g**/
/**
* Has property specified annotation?
* @param string
* @return bool
*/
public function hasAnnotation($name)
{
$res = AnnotationsParser::getAll($this);
return !empty($res[$name]);
}
/**
* Returns an annotation value.
* @param string
* @return IAnnotation
*/
public function getAnnotation($name)
{
$res = AnnotationsParser::getAll($this);
return isset($res[$name]) ? end($res[$name]) : NULL;
}
/**
* Returns all annotations.
* @return IAnnotation[][]
*/
public function getAnnotations()
{
return AnnotationsParser::getAll($this);
}
/**
* Returns value of annotation 'description'.
* @return string
*/
public function getDescription()
{
return $this->getAnnotation('description');
}
/********************* Nette\Object behaviour ****************d*g**/
public function __call($name, $args)
{
return ObjectMixin::call($this, $name, $args);
}
public function &__get($name)
{
return ObjectMixin::get($this, $name);
}
public function __set($name, $value)
{
ObjectMixin::set($this, $name, $value);
}
public function __isset($name)
{
return ObjectMixin::has($this, $name);
}
public function __unset($name)
{
ObjectMixin::remove($this, $name);
}
}
| mit |
StrellaGroup/frappe | frappe/docs/user/zh/guides/app-development/insert-a-document-via-api.md | 1320 | # Insert A Document Via Api
You can insert documents via a script using the `frappe.get_doc` method
### Examples:
#### 1. Insert a ToDo
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
todo.insert()
---
#### 2. Insert without the user's permissions being checked:
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
todo.insert(ignore_permissions = True)
---
#### 3. Submit after inserting
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
todo.insert(ignore_permissions=True)
todo.submit()
---
#### 4. Insert a document on saving of another document
class MyType(Document):
def on_update(self):
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
todo.insert()
----
#### 5. Insert a document with child tables:
sales_order = frappe.get_doc({
"doctype": "Sales Order",
"company": "_Test Company",
"customer": "_Test Customer",
"delivery_date": "2013-02-23",
"sales_order_details": [
{
"item_code": "_Test Item Home Desktop 100",
"qty": 10.0,
"rate": 100.0,
"warehouse": "_Test Warehouse - _TC"
}
]
})
sales_order.insert()
| mit |
extend1994/cdnjs | ajax/libs/jQuery.mmenu/8.2.3/addons/navbars/_navbar.next.js | 1073 | import Mmenu from '../../core/oncanvas/mmenu.oncanvas';
import * as DOM from '../../core/_dom';
// DEPRECATED
// Will be removed in version 8.2
export default function (navbar) {
// Add content
var next = DOM.create('a.mm-btn.mm-btn_next.mm-navbar__btn');
navbar.append(next);
// Update to opened panel
var org;
var _url, _txt;
this.bind('openPanel:start', (panel) => {
org = panel.querySelector('.' + this.conf.classNames.navbars.panelNext);
_url = org ? org.getAttribute('href') : '';
_txt = org ? org.innerHTML : '';
if (_url) {
next.setAttribute('href', _url);
}
else {
next.removeAttribute('href');
}
next.classList[_url || _txt ? 'remove' : 'add']('mm-hidden');
next.innerHTML = _txt;
});
// Add screenreader / aria support
this.bind('openPanel:start:sr-aria', (panel) => {
Mmenu.sr_aria(next, 'hidden', next.matches('mm-hidden'));
Mmenu.sr_aria(next, 'owns', (next.getAttribute('href') || '').slice(1));
});
}
| mit |
paolodenti/openhab | bundles/io/org.openhab.io.caldav/src/main/java/org/openhab/io/caldav/internal/CalDavLoaderImpl.java | 23171 | /**
* Copyright (c) 2010-2016, openHAB.org and others.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.io.caldav.internal;
import static org.quartz.impl.matchers.GroupMatcher.jobGroupEquals;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.Dictionary;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import org.apache.commons.lang3.BooleanUtils;
import org.joda.time.DateTimeZone;
import org.openhab.core.service.AbstractActiveService;
import org.openhab.io.caldav.CalDavEvent;
import org.openhab.io.caldav.CalDavLoader;
import org.openhab.io.caldav.CalDavQuery;
import org.openhab.io.caldav.EventNotifier;
import org.openhab.io.caldav.internal.EventStorage.CalendarRuntime;
import org.openhab.io.caldav.internal.EventStorage.EventContainer;
import org.openhab.io.caldav.internal.job.EventJob;
import org.openhab.io.caldav.internal.job.EventJob.EventTrigger;
import org.openhab.io.caldav.internal.job.EventReloaderJob;
import org.osgi.service.cm.ConfigurationException;
import org.osgi.service.cm.ManagedService;
import org.quartz.DateBuilder;
import org.quartz.DateBuilder.IntervalUnit;
import org.quartz.JobBuilder;
import org.quartz.JobDetail;
import org.quartz.JobKey;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SimpleScheduleBuilder;
import org.quartz.SimpleTrigger;
import org.quartz.Trigger;
import org.quartz.TriggerBuilder;
import org.quartz.TriggerKey;
import org.quartz.impl.StdSchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.sardine.Sardine;
import net.fortuna.ical4j.model.Calendar;
import net.fortuna.ical4j.util.CompatibilityHints;
/**
* Loads all events from the configured calDAV servers. This is done with an
* interval. All interesting events are hold in memory.
*
* @author Robert Delbrück
* @since 1.8.0
*
*/
public class CalDavLoaderImpl extends AbstractActiveService implements ManagedService, CalDavLoader {
private static final String JOB_NAME_EVENT_RELOADER = "event-reloader";
public static final String JOB_NAME_EVENT_START = "event-start";
public static final String JOB_NAME_EVENT_END = "event-end";
private static final String PROP_RELOAD_INTERVAL = "reloadInterval";
private static final String PROP_PRELOAD_TIME = "preloadTime";
private static final String PROP_HISTORIC_LOAD_TIME = "historicLoadTime";
private static final String PROP_URL = "url";
private static final String PROP_PASSWORD = "password";
private static final String PROP_USERNAME = "username";
private static final String PROP_TIMEZONE = "timeZone";
public static final String PROP_DISABLE_CERTIFICATE_VERIFICATION = "disableCertificateVerification";
private static final String PROP_LAST_MODIFIED_TIMESTAMP_VALID = "lastModifiedFileTimeStampValid";
public static DateTimeZone defaultTimeZone = DateTimeZone.getDefault();
private static final Logger log = LoggerFactory.getLogger(CalDavLoaderImpl.class);
public static final String CACHE_PATH = "etc/caldav";
private ScheduledExecutorService execService;
private List<EventNotifier> eventListenerList = new ArrayList<EventNotifier>();
private Scheduler scheduler;
public static CalDavLoaderImpl instance;
public CalDavLoaderImpl() {
if (instance != null) {
throw new IllegalStateException("something went wrong, the loader service should be singleton");
}
instance = this;
}
@Override
public void start() {
super.start();
if (this.isProperlyConfigured()) {
try {
scheduler = new StdSchedulerFactory().getScheduler();
this.removeAllJobs();
} catch (SchedulerException e) {
log.error("cannot get job-scheduler", e);
throw new IllegalStateException("cannot get job-scheduler", e);
}
this.startLoading();
}
}
private void removeAllJobs() throws SchedulerException {
scheduler.deleteJobs(new ArrayList<JobKey>(scheduler.getJobKeys(jobGroupEquals(JOB_NAME_EVENT_RELOADER))));
scheduler.deleteJobs(new ArrayList<JobKey>(scheduler.getJobKeys(jobGroupEquals(JOB_NAME_EVENT_START))));
scheduler.deleteJobs(new ArrayList<JobKey>(scheduler.getJobKeys(jobGroupEquals(JOB_NAME_EVENT_END))));
}
@Override
public void shutdown() {
super.shutdown();
try {
this.removeAllJobs();
} catch (SchedulerException e) {
log.error("cannot remove jobs: " + e.getMessage(), e);
}
}
@Override
public void updated(Dictionary<String, ?> config) throws ConfigurationException {
if (config != null) {
CompatibilityHints.setHintEnabled(CompatibilityHints.KEY_RELAXED_PARSING, true);
// just temporary
Map<String, CalDavConfig> configMap = new HashMap<String, CalDavConfig>();
Enumeration<String> iter = config.keys();
while (iter.hasMoreElements()) {
String key = iter.nextElement();
log.trace("configuration parameter: " + key);
if (key.equals("service.pid")) {
continue;
} else if (key.equals(PROP_TIMEZONE)) {
log.debug("overriding default timezone {} with {}", defaultTimeZone, config.get(key));
defaultTimeZone = DateTimeZone.forID(config.get(key) + "");
if (defaultTimeZone == null) {
throw new ConfigurationException(PROP_TIMEZONE, "invalid timezone value: " + config.get(key));
}
log.debug("found timeZone: {}", defaultTimeZone);
continue;
}
String[] keys = key.split(":");
if (keys.length != 2) {
throw new ConfigurationException(key, "unknown identifier");
}
String id = keys[0];
String paramKey = keys[1];
CalDavConfig calDavConfig = configMap.get(id);
if (calDavConfig == null) {
calDavConfig = new CalDavConfig();
configMap.put(id, calDavConfig);
}
String value = config.get(key) + "";
calDavConfig.setKey(id);
if (paramKey.equals(PROP_USERNAME)) {
calDavConfig.setUsername(value);
} else if (paramKey.equals(PROP_PASSWORD)) {
calDavConfig.setPassword(value);
} else if (paramKey.equals(PROP_URL)) {
calDavConfig.setUrl(value);
} else if (paramKey.equals(PROP_RELOAD_INTERVAL)) {
calDavConfig.setReloadMinutes(Integer.parseInt(value));
} else if (paramKey.equals(PROP_PRELOAD_TIME)) {
calDavConfig.setPreloadMinutes(Integer.parseInt(value));
} else if (paramKey.equals(PROP_HISTORIC_LOAD_TIME)) {
calDavConfig.setHistoricLoadMinutes(Integer.parseInt(value));
} else if (paramKey.equals(PROP_LAST_MODIFIED_TIMESTAMP_VALID)) {
calDavConfig.setLastModifiedFileTimeStampValid(BooleanUtils.toBoolean(value));
} else if (paramKey.equals(PROP_DISABLE_CERTIFICATE_VERIFICATION)) {
calDavConfig.setDisableCertificateVerification(BooleanUtils.toBoolean(value));
}
}
// verify if all required parameters are set
for (String id : configMap.keySet()) {
if (configMap.get(id).getUrl() == null) {
throw new ConfigurationException(PROP_URL, PROP_URL + " must be set");
}
if (configMap.get(id).getUsername() == null) {
throw new ConfigurationException(PROP_USERNAME, PROP_USERNAME + " must be set");
}
if (configMap.get(id).getPassword() == null) {
throw new ConfigurationException(PROP_PASSWORD, PROP_PASSWORD + " must be set");
}
log.trace("config for id '{}': {}", id, configMap.get(id));
}
// initialize event cache
for (CalDavConfig calDavConfig : configMap.values()) {
final CalendarRuntime eventRuntime = new CalendarRuntime();
eventRuntime.setConfig(calDavConfig);
File cachePath = Util.getCachePath(calDavConfig.getKey());
if (!cachePath.exists() && !cachePath.mkdirs()) {
log.error("cannot create directory ({}) for calendar caching (missing rights?)",
cachePath.getAbsoluteFile());
continue;
}
EventStorage.getInstance().getEventCache().put(calDavConfig.getKey(), eventRuntime);
}
setProperlyConfigured(true);
}
}
public List<EventNotifier> getEventListenerList() {
return eventListenerList;
}
public Scheduler getScheduler() {
return scheduler;
}
@Override
public void addListener(EventNotifier notifier) {
this.eventListenerList.add(notifier);
// notify for missing changes
for (CalendarRuntime calendarRuntime : EventStorage.getInstance().getEventCache().values()) {
for (EventContainer eventContainer : calendarRuntime.getEventMap().values()) {
for (CalDavEvent event : eventContainer.getEventList()) {
notifier.eventLoaded(event);
}
}
}
}
@Override
public void removeListener(EventNotifier notifier) {
this.eventListenerList.remove(notifier);
}
public synchronized void addEventToMap(EventContainer eventContainer, boolean createTimer) {
CalendarRuntime calendarRuntime = EventStorage.getInstance().getEventCache()
.get(eventContainer.getCalendarId());
ConcurrentHashMap<String, EventContainer> eventContainerMap = calendarRuntime.getEventMap();
if (eventContainerMap.containsKey(eventContainer.getEventId())) {
EventContainer eventContainerOld = eventContainerMap.get(eventContainer.getEventId());
// event is already in map
if (eventContainer.getLastChanged().isAfter(eventContainerOld.getLastChanged())) {
log.debug("event is already in event map and newer -> delete the old one, reschedule timer");
// cancel old jobs
for (String timerKey : eventContainerOld.getTimerMap()) {
try {
this.scheduler.deleteJob(JobKey.jobKey(timerKey));
} catch (SchedulerException e) {
log.error("cannot cancel event with job-id: " + timerKey, e);
}
}
eventContainerOld.getTimerMap().clear();
// override event
eventContainerMap.put(eventContainer.getEventId(), eventContainer);
for (EventNotifier notifier : eventListenerList) {
for (CalDavEvent event : eventContainerOld.getEventList()) {
log.trace("notify listener... {}", notifier);
try {
notifier.eventRemoved(event);
} catch (Exception e) {
log.error("error while invoking listener", e);
}
}
}
for (EventNotifier notifier : eventListenerList) {
for (CalDavEvent event : eventContainer.getEventList()) {
log.trace("notify listener... {}", notifier);
try {
notifier.eventLoaded(event);
} catch (Exception e) {
log.error("error while invoking listener", e);
}
}
}
if (createTimer) {
int index = 0;
for (CalDavEvent event : eventContainer.getEventList()) {
if (event.getEnd().isAfterNow()) {
try {
createJob(eventContainer, event, index);
} catch (SchedulerException e) {
log.error("cannot create jobs for event '{}': ", event.getShortName(), e.getMessage());
}
}
index++;
}
}
} else {
// event is already in map and not updated, ignoring
}
} else {
// event is new
eventContainerMap.put(eventContainer.getEventId(), eventContainer);
log.trace("listeners for events: {}", eventListenerList.size());
for (EventNotifier notifier : eventListenerList) {
for (CalDavEvent event : eventContainer.getEventList()) {
log.trace("notify listener... {}", notifier);
try {
notifier.eventLoaded(event);
} catch (Exception e) {
log.error("error while invoking listener", e);
}
}
}
if (createTimer) {
int index = 0;
for (CalDavEvent event : eventContainer.getEventList()) {
if (event.getEnd().isAfterNow()) {
try {
createJob(eventContainer, event, index);
} catch (SchedulerException e) {
log.error("cannot create jobs for event: " + event.getShortName());
}
}
index++;
}
}
}
}
private synchronized void createJob(final EventContainer eventContainer, final CalDavEvent event, final int index)
throws SchedulerException {
final String triggerStart = JOB_NAME_EVENT_START + "-" + event.getShortName() + "-" + index;
final boolean startJobTriggerDeleted = this.scheduler
.unscheduleJob(TriggerKey.triggerKey(triggerStart, JOB_NAME_EVENT_START));
final boolean startJobDeleted = this.scheduler.deleteJob(JobKey.jobKey(triggerStart, JOB_NAME_EVENT_START));
log.trace("old start job ({}) deleted? {}/{}", triggerStart, startJobDeleted, startJobTriggerDeleted);
Date startDate = event.getStart().toDate();
JobDetail jobStart = JobBuilder.newJob().ofType(EventJob.class)
.usingJobData(EventJob.KEY_CONFIG, eventContainer.getCalendarId())
.usingJobData(EventJob.KEY_EVENT, eventContainer.getEventId())
.usingJobData(EventJob.KEY_REC_INDEX, index)
.usingJobData(EventJob.KEY_EVENT_TRIGGER, EventTrigger.BEGIN.name()).storeDurably(false)
.withIdentity(triggerStart, JOB_NAME_EVENT_START).build();
Trigger jobTriggerStart = TriggerBuilder.newTrigger().withIdentity(triggerStart, JOB_NAME_EVENT_START)
.startAt(startDate).build();
this.scheduler.scheduleJob(jobStart, jobTriggerStart);
eventContainer.getTimerMap().add(triggerStart);
log.debug("begin timer scheduled for event '{}' @ {}", event.getShortName(), startDate);
final String triggerEnd = JOB_NAME_EVENT_END + "-" + event.getShortName() + "-" + index;
final boolean endJobTriggerDeleted = this.scheduler
.unscheduleJob(TriggerKey.triggerKey(triggerEnd, JOB_NAME_EVENT_END));
final boolean endJobDeleted = this.scheduler.deleteJob(JobKey.jobKey(triggerEnd, JOB_NAME_EVENT_END));
log.trace("old end job ({}) deleted? {}/{}", triggerEnd, endJobDeleted, endJobTriggerDeleted);
Date endDate = event.getEnd().toDate();
JobDetail jobEnd = JobBuilder.newJob().ofType(EventJob.class)
.usingJobData(EventJob.KEY_CONFIG, eventContainer.getCalendarId())
.usingJobData(EventJob.KEY_EVENT, eventContainer.getEventId())
.usingJobData(EventJob.KEY_REC_INDEX, index)
.usingJobData(EventJob.KEY_EVENT_TRIGGER, EventTrigger.END.name()).storeDurably(false)
.withIdentity(triggerEnd, JOB_NAME_EVENT_END).build();
Trigger jobTriggerEnd = TriggerBuilder.newTrigger().withIdentity(triggerEnd, JOB_NAME_EVENT_END)
.startAt(endDate).build();
this.scheduler.scheduleJob(jobEnd, jobTriggerEnd);
eventContainer.getTimerMap().add(triggerEnd);
log.debug("end timer scheduled for event '{}' @ {}", event.getShortName(), endDate);
}
public void startLoading() {
if (execService != null) {
return;
}
log.trace("starting execution...");
int i = 0;
for (final CalendarRuntime eventRuntime : EventStorage.getInstance().getEventCache().values()) {
try {
JobDetail job = JobBuilder.newJob().ofType(EventReloaderJob.class)
.usingJobData(EventReloaderJob.KEY_CONFIG, eventRuntime.getConfig().getKey())
.withIdentity(eventRuntime.getConfig().getKey(), JOB_NAME_EVENT_RELOADER).storeDurably()
.build();
this.scheduler.addJob(job, false);
SimpleTrigger jobTrigger = TriggerBuilder.newTrigger().forJob(job)
.withIdentity(eventRuntime.getConfig().getKey(), JOB_NAME_EVENT_RELOADER)
.startAt(DateBuilder.futureDate(10 + i, IntervalUnit.SECOND)).withSchedule(SimpleScheduleBuilder
.repeatMinutelyForever(eventRuntime.getConfig().getReloadMinutes()))
.build();
this.scheduler.scheduleJob(jobTrigger);
log.info("reload job scheduled for: {}", eventRuntime.getConfig().getKey());
} catch (SchedulerException e) {
log.error("cannot schedule calendar-reloader", e);
}
// next event 10 seconds later
i += 10;
}
}
@Override
protected void execute() {
}
@Override
protected long getRefreshInterval() {
return 1000;
}
@Override
protected String getName() {
return "CalDav Loader";
}
@Override
public void addEvent(CalDavEvent calDavEvent) {
final CalendarRuntime calendarRuntime = EventStorage.getInstance().getEventCache()
.get(calDavEvent.getCalendarId());
CalDavConfig config = calendarRuntime.getConfig();
if (config == null) {
log.error("cannot find config for calendar id: {}", calDavEvent.getCalendarId());
}
Sardine sardine = Util.getConnection(config);
Calendar calendar = Util.createCalendar(calDavEvent, defaultTimeZone);
try {
final String fullIcsFile = config.getUrl() + "/" + calDavEvent.getFilename() + ".ics";
if (calendarRuntime.getEventContainerByFilename(calDavEvent.getFilename()) != null) {
log.debug("event will be updated: {}", fullIcsFile);
try {
sardine.delete(fullIcsFile);
} catch (IOException e) {
log.error("cannot remove old ics file: {}", fullIcsFile);
}
} else {
log.debug("event is new: {}", fullIcsFile);
}
sardine.put(fullIcsFile, calendar.toString().getBytes("UTF-8"));
EventContainer eventContainer = new EventContainer(calDavEvent.getCalendarId());
eventContainer.setEventId(calDavEvent.getId());
eventContainer.setFilename(Util.getFilename(calDavEvent.getFilename()));
eventContainer.getEventList().add(calDavEvent);
eventContainer.setLastChanged(calDavEvent.getLastChanged());
this.addEventToMap(eventContainer, false);
} catch (UnsupportedEncodingException e) {
log.error("cannot write event", e);
} catch (IOException e) {
log.error("cannot write event", e);
}
}
@Override
public List<CalDavEvent> getEvents(final CalDavQuery query) {
log.trace("quering events for filter: {}", query);
final ArrayList<CalDavEvent> eventList = new ArrayList<CalDavEvent>();
if (query.getCalendarIds() != null) {
for (String calendarId : query.getCalendarIds()) {
final CalendarRuntime eventRuntime = EventStorage.getInstance().getEventCache().get(calendarId);
if (eventRuntime == null) {
log.debug("calendar id {} not found", calendarId);
continue;
}
for (EventContainer eventContainer : eventRuntime.getEventMap().values()) {
for (CalDavEvent calDavEvent : eventContainer.getEventList()) {
if (query.getFrom() != null) {
if (calDavEvent.getEnd().isBefore(query.getFrom())) {
continue;
}
}
if (query.getTo() != null) {
if (calDavEvent.getStart().isAfter(query.getTo())) {
continue;
}
}
eventList.add(calDavEvent);
}
}
}
}
if (query.getSort() != null) {
Collections.sort(eventList, new Comparator<CalDavEvent>() {
@Override
public int compare(CalDavEvent arg0, CalDavEvent arg1) {
if (query.getSort().equals(CalDavQuery.Sort.ASCENDING)) {
return (arg0.getStart().compareTo(arg1.getStart()));
} else if (query.getSort().equals(CalDavQuery.Sort.DESCENDING)) {
return (arg1.getStart().compareTo(arg0.getStart()));
} else {
return 0;
}
}
});
}
log.debug("return event list for {} with {} entries", query, eventList.size());
return eventList;
}
}
| epl-1.0 |
mpranivong/golf-phpnuke | wp/wp-content/plugins/wordcents/src/io/apiCurlIO.php | 9202 | <?php
/*
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Curl based implementation of apiIO.
*
* @author Chris Chabot <[email protected]>
* @author Chirag Shah <[email protected]>
*/
require_once 'apiCacheParser.php';
class apiCurlIO implements apiIO {
const CONNECTION_ESTABLISHED = "HTTP/1.0 200 Connection established\r\n\r\n";
const FORM_URLENCODED = 'application/x-www-form-urlencoded';
private static $ENTITY_HTTP_METHODS = array("POST" => null, "PUT" => null);
private static $HOP_BY_HOP = array(
'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization',
'te', 'trailers', 'transfer-encoding', 'upgrade');
private static $DEFAULT_CURL_PARAMS = array (
CURLOPT_RETURNTRANSFER => true,
CURLOPT_FOLLOWLOCATION => 0,
CURLOPT_FAILONERROR => false,
CURLOPT_SSL_VERIFYPEER => true,
CURLOPT_HEADER => true,
CURLOPT_VERBOSE => false,
);
/**
* Perform an authenticated / signed apiHttpRequest.
* This function takes the apiHttpRequest, calls apiAuth->sign on it
* (which can modify the request in what ever way fits the auth mechanism)
* and then calls apiCurlIO::makeRequest on the signed request
*
* @param apiHttpRequest $request
* @return apiHttpRequest The resulting HTTP response including the
* responseHttpCode, responseHeaders and responseBody.
*/
public function authenticatedRequest(apiHttpRequest $request) {
$request = apiClient::$auth->sign($request);
return $this->makeRequest($request);
}
/**
* Execute a apiHttpRequest
*
* @param apiHttpRequest $request the http request to be executed
* @return apiHttpRequest http request with the response http code, response
* headers and response body filled in
* @throws apiIOException on curl or IO error
*/
public function makeRequest(apiHttpRequest $request) {
// First, check to see if we have a valid cached version.
$cached = $this->getCachedRequest($request);
if ($cached !== false) {
if (apiCacheParser::mustRevalidate($cached)) {
$addHeaders = array();
if ($cached->getResponseHeader('etag')) {
// [13.3.4] If an entity tag has been provided by the origin server,
// we must use that entity tag in any cache-conditional request.
$addHeaders['If-None-Match'] = $cached->getResponseHeader('etag');
} elseif ($cached->getResponseHeader('date')) {
$addHeaders['If-Modified-Since'] = $cached->getResponseHeader('date');
}
$request->setRequestHeaders($addHeaders);
} else {
// No need to revalidate the request, return it directly
return $cached;
}
}
if (array_key_exists($request->getRequestMethod(),
self::$ENTITY_HTTP_METHODS)) {
$request = $this->processEntityRequest($request);
}
$ch = curl_init();
curl_setopt_array($ch, self::$DEFAULT_CURL_PARAMS);
curl_setopt($ch, CURLOPT_URL, $request->getUrl());
if ($request->getPostBody()) {
curl_setopt($ch, CURLOPT_POSTFIELDS, $request->getPostBody());
}
$requestHeaders = $request->getRequestHeaders();
if ($requestHeaders && is_array($requestHeaders)) {
$parsed = array();
foreach ($requestHeaders as $k => $v) {
$parsed[] = "$k: $v";
}
curl_setopt($ch, CURLOPT_HTTPHEADER, $parsed);
}
curl_setopt($ch, CURLOPT_CUSTOMREQUEST, $request->getRequestMethod());
curl_setopt($ch, CURLOPT_USERAGENT, $request->getUserAgent());
$respData = curl_exec($ch);
// Retry if certificates are missing.
if (curl_errno($ch) == CURLE_SSL_CACERT) {
error_log('SSL certificate problem, verify that the CA cert is OK.'
. ' Retrying with the CA cert bundle from google-api-php-client.');
curl_setopt($ch, CURLOPT_CAINFO, dirname(__FILE__) . '/cacerts.pem');
$respData = curl_exec($ch);
}
$respHeaderSize = curl_getinfo($ch, CURLINFO_HEADER_SIZE);
$respHttpCode = (int) curl_getinfo($ch, CURLINFO_HTTP_CODE);
$curlErrorNum = curl_errno($ch);
$curlError = curl_error($ch);
curl_close($ch);
if ($curlErrorNum != CURLE_OK) {
throw new apiIOException("HTTP Error: ($respHttpCode) $curlError");
}
// Parse out the raw response into usable bits
list($responseHeaders, $responseBody) =
self::parseHttpResponse($respData, $respHeaderSize);
if ($respHttpCode == 304 && $cached) {
// If the server responded NOT_MODIFIED, return the cached request.
if (isset($responseHeaders['connection'])) {
$hopByHop = array_merge(
self::$HOP_BY_HOP,
explode(',', $responseHeaders['connection'])
);
$endToEnd = array();
foreach($hopByHop as $key) {
if (isset($responseHeaders[$key])) {
$endToEnd[$key] = $responseHeaders[$key];
}
}
$cached->setResponseHeaders($endToEnd);
}
return $cached;
}
// Fill in the apiHttpRequest with the response values
$request->setResponseHttpCode($respHttpCode);
$request->setResponseHeaders($responseHeaders);
$request->setResponseBody($responseBody);
// Store the request in cache (the function checks to see if the request
// can actually be cached)
$this->setCachedRequest($request);
// And finally return it
return $request;
}
/**
* @visible for testing.
* Cache the response to an HTTP request if it is cacheable.
* @param apiHttpRequest $request
* @return bool Returns true if the insertion was successful.
* Otherwise, return false.
*/
public function setCachedRequest(apiHttpRequest $request) {
// Determine if the request is cacheable.
if (apiCacheParser::isResponseCacheable($request)) {
apiClient::$cache->set($request->getCacheKey(), $request);
return true;
}
return false;
}
/**
* @visible for testing.
* @param apiHttpRequest $request
* @return apiHttpRequest|bool Returns the cached object or
* false if the operation was unsuccessful.
*/
public function getCachedRequest(apiHttpRequest $request) {
if (false == apiCacheParser::isRequestCacheable($request)) {
false;
}
return apiClient::$cache->get($request->getCacheKey());
}
/**
* @param $respData
* @param $headerSize
* @return array
*/
public static function parseHttpResponse($respData, $headerSize) {
if (stripos($respData, self::CONNECTION_ESTABLISHED) !== false) {
$respData = str_ireplace(self::CONNECTION_ESTABLISHED, '', $respData);
}
if ($headerSize) {
$responseBody = substr($respData, $headerSize);
$responseHeaders = substr($respData, 0, $headerSize);
} else {
list($responseHeaders, $responseBody) = explode("\r\n\r\n", $respData, 2);
}
$responseHeaders = self::parseResponseHeaders($responseHeaders);
return array($responseHeaders, $responseBody);
}
public static function parseResponseHeaders($rawHeaders) {
$responseHeaders = array();
$responseHeaderLines = explode("\r\n", $rawHeaders);
foreach ($responseHeaderLines as $headerLine) {
if ($headerLine && strpos($headerLine, ':') !== false) {
list($header, $value) = explode(': ', $headerLine, 2);
$header = strtolower($header);
if (isset($responseHeaders[$header])) {
$responseHeaders[$header] .= "\n" . $value;
} else {
$responseHeaders[$header] = $value;
}
}
}
return $responseHeaders;
}
/**
* @visible for testing
* Process an http request that contains an enclosed entity.
* @param apiHttpRequest $request
* @return apiHttpRequest Processed request with the enclosed entity.
*/
public function processEntityRequest(apiHttpRequest $request) {
$postBody = $request->getPostBody();
$contentType = $request->getRequestHeader("content-type");
// Set the default content-type as application/x-www-form-urlencoded.
if (false == $contentType) {
$contentType = self::FORM_URLENCODED;
$request->setRequestHeaders(array('content-type' => $contentType));
}
// Force the payload to match the content-type asserted in the header.
if ($contentType == self::FORM_URLENCODED && is_array($postBody)) {
$postBody = http_build_query($postBody, '', '&');
$request->setPostBody($postBody);
}
// Make sure the content-length header is set.
if (!$postBody || is_string($postBody)) {
$postsLength = strlen($postBody);
$request->setRequestHeaders(array('content-length' => $postsLength));
}
return $request;
}
} | gpl-2.0 |
apopple/linux | drivers/net/virtio_net.c | 86010 | // SPDX-License-Identifier: GPL-2.0-or-later
/* A network driver using virtio.
*
* Copyright 2007 Rusty Russell <[email protected]> IBM Corporation
*/
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
static bool csum = true, gso = true, napi_tx = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
#define VIRTIO_XDP_FLAG BIT(0)
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
* term, transient changes in packet size.
*/
DECLARE_EWMA(pkt_len, 0, 64)
#define VIRTNET_DRIVER_VERSION "1.0.0"
static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN,
VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_GUEST_CSUM
};
#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
size_t offset;
};
struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
u64 xdp_tx;
u64 xdp_tx_drops;
u64 kicks;
};
struct virtnet_rq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
u64 drops;
u64 xdp_packets;
u64 xdp_tx;
u64 xdp_redirects;
u64 xdp_drops;
u64 kicks;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
{ "packets", VIRTNET_SQ_STAT(packets) },
{ "bytes", VIRTNET_SQ_STAT(bytes) },
{ "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
{ "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
{ "kicks", VIRTNET_SQ_STAT(kicks) },
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
{ "packets", VIRTNET_RQ_STAT(packets) },
{ "bytes", VIRTNET_RQ_STAT(bytes) },
{ "drops", VIRTNET_RQ_STAT(drops) },
{ "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
{ "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
{ "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
{ "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
{ "kicks", VIRTNET_RQ_STAT(kicks) },
};
#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
struct virtqueue *vq;
/* TX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
/* Name of the send queue: output.$index */
char name[40];
struct virtnet_sq_stats stats;
struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
struct receive_queue {
/* Virtqueue associated with this receive_queue */
struct virtqueue *vq;
struct napi_struct napi;
struct bpf_prog __rcu *xdp_prog;
struct virtnet_rq_stats stats;
/* Chain pages by the private ptr. */
struct page *pages;
/* Average packet length for mergeable receive buffers. */
struct ewma_pkt_len mrg_avg_pkt_len;
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
/* RX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
/* Min single buffer size for mergeable buffers case. */
unsigned int min_buf_len;
/* Name of this receive queue: input.$index */
char name[40];
struct xdp_rxq_info xdp_rxq;
};
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
struct virtio_net_ctrl_mq mq;
u8 promisc;
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
};
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *cvq;
struct net_device *dev;
struct send_queue *sq;
struct receive_queue *rq;
unsigned int status;
/* Max # of queue pairs supported by the device */
u16 max_queue_pairs;
/* # of queue pairs currently used by the driver */
u16 curr_queue_pairs;
/* # of XDP queue pairs currently used by the driver */
u16 xdp_queue_pairs;
/* I like... big packets and I cannot lie! */
bool big_packets;
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
/* Has control virtqueue */
bool has_cvq;
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
/* Packet virtio header size */
u8 hdr_len;
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
/* Work struct for config space updates */
struct work_struct config_work;
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
/* CPU hotplug instances for online & dead */
struct hlist_node node;
struct hlist_node node_dead;
struct control_buf *ctrl;
/* Ethtool settings */
u8 duplex;
u32 speed;
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
/* failover when STANDBY feature enabled */
struct failover *failover;
};
struct padded_vnet_hdr {
struct virtio_net_hdr_mrg_rxbuf hdr;
/*
* hdr is in a separate sg buffer, and data sg buffer shares same page
* with this header sg. This padding makes next sg 16 byte aligned
* after the header.
*/
char padding[4];
};
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
}
static void *xdp_to_ptr(struct xdp_frame *ptr)
{
return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
}
static struct xdp_frame *ptr_to_xdp(void *ptr)
{
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
static int vq2txq(struct virtqueue *vq)
{
return (vq->index - 1) / 2;
}
static int txq2vq(int txq)
{
return txq * 2 + 1;
}
static int vq2rxq(struct virtqueue *vq)
{
return vq->index / 2;
}
static int rxq2vq(int rxq)
{
return rxq * 2;
}
static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
}
/*
* private is used to chain pages for big packets, put the whole
* most recent used list in the beginning for reuse
*/
static void give_pages(struct receive_queue *rq, struct page *page)
{
struct page *end;
/* Find end of list, sew whole thing into vi->rq.pages. */
for (end = page; end->private; end = (struct page *)end->private);
end->private = (unsigned long)rq->pages;
rq->pages = page;
}
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
{
struct page *p = rq->pages;
if (p) {
rq->pages = (struct page *)p->private;
/* clear private here, it is used to chain pages */
p->private = 0;
} else
p = alloc_page(gfp_mask);
return p;
}
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
if (napi_schedule_prep(napi)) {
virtqueue_disable_cb(vq);
__napi_schedule(napi);
}
}
static void virtqueue_napi_complete(struct napi_struct *napi,
struct virtqueue *vq, int processed)
{
int opaque;
opaque = virtqueue_enable_cb_prepare(vq);
if (napi_complete_done(napi, processed)) {
if (unlikely(virtqueue_poll(vq, opaque)))
virtqueue_napi_schedule(napi, vq);
} else {
virtqueue_disable_cb(vq);
}
}
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
if (napi->weight)
virtqueue_napi_schedule(napi, vq);
else
/* We were probably waiting for more output buffers. */
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
#define MRG_CTX_HEADER_SHIFT 22
static void *mergeable_len_to_ctx(unsigned int truesize,
unsigned int headroom)
{
return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
}
static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
{
return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
}
static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
{
return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
bool hdr_valid, unsigned int metasize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
unsigned int copy, hdr_len, hdr_padded_len;
char *p;
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
hdr = skb_vnet_hdr(skb);
hdr_len = vi->hdr_len;
if (vi->mergeable_rx_bufs)
hdr_padded_len = sizeof(*hdr);
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
/* hdr_valid means no XDP, so we can copy the vnet header */
if (hdr_valid)
memcpy(hdr, p, hdr_len);
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
copy = len;
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
skb_put_data(skb, p, copy);
if (metasize) {
__skb_pull(skb, metasize);
skb_metadata_set(skb, metasize);
}
len -= copy;
offset += copy;
if (vi->mergeable_rx_bufs) {
if (len)
skb_add_rx_frag(skb, 0, page, offset, len, truesize);
else
put_page(page);
return skb;
}
/*
* Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously
* tries to receive more than is possible. This is usually
* the case of a broken device.
*/
if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
dev_kfree_skb(skb);
return NULL;
}
BUG_ON(offset >= PAGE_SIZE);
while (len) {
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
frag_size, truesize);
len -= frag_size;
page = (struct page *)page->private;
offset = 0;
}
if (page)
give_pages(rq, page);
return skb;
}
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
/* Make room for virtqueue hdr (also change xdpf->headroom?) */
xdpf->data -= vi->hdr_len;
/* Zero header and leave csum up to XDP layers */
hdr = xdpf->data;
memset(hdr, 0, vi->hdr_len);
xdpf->len += vi->hdr_len;
sg_init_one(sq->sg, xdpf->data, xdpf->len);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
GFP_ATOMIC);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
return 0;
}
static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
{
unsigned int qp;
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
return &vi->sq[qp];
}
static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
struct virtnet_info *vi = netdev_priv(dev);
struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
int packets = 0;
int bytes = 0;
int drops = 0;
int kicks = 0;
int ret, err;
void *ptr;
int i;
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_access_pointer(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
sq = virtnet_xdp_sq(vi);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
drops = n;
goto out;
}
/* Free up any pending old buffers before queueing new ones. */
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
if (likely(is_xdp_frame(ptr))) {
struct xdp_frame *frame = ptr_to_xdp(ptr);
bytes += frame->len;
xdp_return_frame(frame);
} else {
struct sk_buff *skb = ptr;
bytes += skb->len;
napi_consume_skb(skb, false);
}
packets++;
}
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
if (err) {
xdp_return_frame_rx_napi(xdpf);
drops++;
}
}
ret = n - drops;
if (flags & XDP_XMIT_FLUSH) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
kicks = 1;
}
out:
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.bytes += bytes;
sq->stats.packets += packets;
sq->stats.xdp_tx += n;
sq->stats.xdp_tx_drops += drops;
sq->stats.kicks += kicks;
u64_stats_update_end(&sq->stats.syncp);
return ret;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
}
/* We copy the packet for XDP in the following cases:
*
* 1) Packet is scattered across multiple rx buffers.
* 2) Headroom space is insufficient.
*
* This is inefficient but it's a temporary condition that
* we hit right after XDP is enabled and until queue is refilled
* with large buffers with sufficient headroom - so it should affect
* at most queue size packets.
* Afterwards, the conditions to enable
* XDP should preclude the underlying device from sending packets
* across multiple buffers (num_buf > 1), and we make sure buffers
* have enough headroom.
*/
static struct page *xdp_linearize_page(struct receive_queue *rq,
u16 *num_buf,
struct page *p,
int offset,
int page_off,
unsigned int *len)
{
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
return NULL;
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page_off += *len;
while (--*num_buf) {
int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int buflen;
void *buf;
int off;
buf = virtqueue_get_buf(rq->vq, &buflen);
if (unlikely(!buf))
goto err_buf;
p = virt_to_head_page(buf);
off = buf - page_address(p);
/* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU.
*/
if ((page_off + buflen + tailroom) > PAGE_SIZE) {
put_page(p);
goto err_buf;
}
memcpy(page_address(page) + page_off,
page_address(p) + off, buflen);
page_off += buflen;
put_page(p);
}
/* Headroom does not contribute to packet length */
*len = page_off - VIRTIO_XDP_HEADROOM;
return page;
err_buf:
__free_pages(page, 0);
return NULL;
}
static struct sk_buff *receive_small(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf, void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
unsigned int xdp_headroom = (unsigned long)ctx;
unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
unsigned int headroom = vi->hdr_len + header_offset;
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page = virt_to_head_page(buf);
unsigned int delta = 0;
struct page *xdp_page;
int err;
unsigned int metasize = 0;
len -= vi->hdr_len;
stats->bytes += len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
struct xdp_frame *xdpf;
struct xdp_buff xdp;
void *orig_data;
u32 act;
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
u16 num_buf = 1;
xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
xdp_page = xdp_linearize_page(rq, &num_buf, page,
offset, header_offset,
&tlen);
if (!xdp_page)
goto err_xdp;
buf = page_address(xdp_page);
put_page(page);
page = xdp_page;
}
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp.data_end = xdp.data + len;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
xdp.frame_sz = buflen;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
switch (act) {
case XDP_PASS:
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
*xdp_xmit |= VIRTIO_XDP_TX;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err)
goto err_xdp;
*xdp_xmit |= VIRTIO_XDP_REDIR;
rcu_read_unlock();
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
goto err_xdp;
}
}
rcu_read_unlock();
skb = build_skb(buf, buflen);
if (!skb) {
put_page(page);
goto err;
}
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
if (!xdp_prog) {
buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
} /* keep zeroed vnet hdr since XDP is loaded */
if (metasize)
skb_metadata_set(skb, metasize);
err:
return skb;
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
stats->drops++;
put_page(page);
xdp_xmit:
return NULL;
}
static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
unsigned int len,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb =
page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
goto err;
return skb;
err:
stats->drops++;
give_pages(rq, page);
return NULL;
}
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
unsigned int metasize = 0;
unsigned int frame_sz;
int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
struct xdp_frame *xdpf;
struct page *xdp_page;
struct xdp_buff xdp;
void *data;
u32 act;
/* Transient failure which in theory could occur if
* in-flight packets from before XDP was enabled reach
* the receive path after XDP is loaded.
*/
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
/* Buffers with headroom use PAGE_SIZE as alloc size,
* see add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
frame_sz = headroom ? PAGE_SIZE : truesize;
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
* happen for the first several packets, so we don't
* care much about its performance.
*/
if (unlikely(num_buf > 1 ||
headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, &num_buf,
page, offset,
VIRTIO_XDP_HEADROOM,
&len);
frame_sz = PAGE_SIZE;
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
} else {
xdp_page = page;
}
/* Allow consuming headroom but reserve enough space to push
* the descriptor on if we get an XDP_TX return code.
*/
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
xdp.data_end = xdp.data + (len - vi->hdr_len);
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
xdp.frame_sz = frame_sz - vi->hdr_len;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
switch (act) {
case XDP_PASS:
metasize = xdp.data - xdp.data_meta;
/* recalculate offset to account for any header
* adjustments and minus the metasize to copy the
* metadata in page_to_skb(). Note other cases do not
* build an skb and avoid using offset
*/
offset = xdp.data - page_address(xdp_page) -
vi->hdr_len - metasize;
/* recalculate len if xdp.data, xdp.data_end or
* xdp.data_meta were adjusted
*/
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
metasize);
return head_skb;
}
break;
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
goto err_xdp;
}
*xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err) {
if (unlikely(xdp_page != page))
put_page(xdp_page);
goto err_xdp;
}
*xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
goto err_xdp;
}
}
rcu_read_unlock();
if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
dev->stats.rx_length_errors++;
goto err_skb;
}
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
metasize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
while (--num_buf) {
int num_skb_frags;
buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf,
virtio16_to_cpu(vi->vdev,
hdr->num_buffers));
dev->stats.rx_length_errors++;
goto err_buf;
}
stats->bytes += len;
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
dev->stats.rx_length_errors++;
goto err_skb;
}
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
if (unlikely(!nskb))
goto err_skb;
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
curr_skb->next = nskb;
curr_skb = nskb;
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
head_skb->truesize += truesize;
}
offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
len, truesize);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
offset, len, truesize);
}
}
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
err_skb:
put_page(page);
while (num_buf-- > 1) {
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
dev->stats.rx_length_errors++;
break;
}
stats->bytes += len;
page = virt_to_head_page(buf);
put_page(page);
}
err_buf:
stats->drops++;
dev_kfree_skb(head_skb);
xdp_xmit:
return NULL;
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
if (vi->mergeable_rx_bufs) {
put_page(virt_to_head_page(buf));
} else if (vi->big_packets) {
give_pages(rq, buf);
} else {
put_page(virt_to_head_page(buf));
}
return;
}
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
else if (vi->big_packets)
skb = receive_big(dev, vi, rq, buf, len, stats);
else
skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
if (unlikely(!skb))
return;
hdr = skb_vnet_hdr(skb);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev))) {
net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
dev->name, hdr->hdr.gso_type,
hdr->hdr.gso_size);
goto frame_err;
}
skb_record_rx_queue(skb, vq2rxq(rq->vq));
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
napi_gro_receive(&rq->napi, skb);
return;
frame_err:
dev->stats.rx_frame_errors++;
dev_kfree_skb(skb);
}
/* Unlike mergeable buffers, all buffers are allocated to the
* same size, except for the headroom. For this reason we do
* not need to use mergeable_len_to_ctx here - it is enough
* to store the headroom as the context ignoring the truesize.
*/
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
char *buf;
unsigned int xdp_headroom = virtnet_get_headroom(vi);
void *ctx = (void *)(unsigned long)xdp_headroom;
int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
int err;
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
get_page(alloc_frag->page);
alloc_frag->offset += len;
sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
return err;
}
static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
struct page *first, *list = NULL;
char *p;
int i, err, offset;
sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(rq, gfp);
if (!first) {
if (list)
give_pages(rq, list);
return -ENOMEM;
}
sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
list = first;
}
first = get_a_page(rq, gfp);
if (!first) {
give_pages(rq, list);
return -ENOMEM;
}
p = page_address(first);
/* rq->sg[0], rq->sg[1] share the same page */
/* a separated rq->sg[0] for header - required in case !any_header_sg */
sg_set_buf(&rq->sg[0], p, vi->hdr_len);
/* rq->sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
return err;
}
static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
struct ewma_pkt_len *avg_pkt_len,
unsigned int room)
{
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int len;
if (room)
return PAGE_SIZE - room;
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
rq->min_buf_len, PAGE_SIZE - hdr_len);
return ALIGN(len, L1_CACHE_BYTES);
}
static int add_recvbuf_mergeable(struct virtnet_info *vi,
struct receive_queue *rq, gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
char *buf;
void *ctx;
int err;
unsigned int len, hole;
/* Extra tailroom is needed to satisfy XDP's assumption. This
* means rx frags coalescing won't work, but consider we've
* disabled GSO for XDP, it won't be a big issue.
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
get_page(alloc_frag->page);
alloc_frag->offset += len + room;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + room) {
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
* the current buffer.
*/
len += hole;
alloc_frag->offset += hole;
}
sg_init_one(rq->sg, buf, len);
ctx = mergeable_len_to_ctx(len, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
return err;
}
/*
* Returns false if we couldn't fill entirely (OOM).
*
* Normally run in the receive path, but can also be run from ndo_open
* before we're receiving packets, or from refill_work which is
* careful to disable receiving (using napi_disable).
*/
static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
int err;
bool oom;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, rq, gfp);
else if (vi->big_packets)
err = add_recvbuf_big(vi, rq, gfp);
else
err = add_recvbuf_small(vi, rq, gfp);
oom = err == -ENOMEM;
if (err)
break;
} while (rq->vq->num_free);
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq->stats.kicks++;
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
return !oom;
}
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
virtqueue_napi_schedule(&rq->napi, rvq);
}
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
* won't get another interrupt, so process any outstanding packets now.
* Call local_bh_enable after to trigger softIRQ processing.
*/
local_bh_disable();
virtqueue_napi_schedule(napi, vq);
local_bh_enable();
}
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
struct virtqueue *vq,
struct napi_struct *napi)
{
if (!napi->weight)
return;
/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
* enable the feature if this is likely affine with the transmit path.
*/
if (!vi->affinity_hint_set) {
napi->weight = 0;
return;
}
return virtnet_napi_enable(vq, napi);
}
static void virtnet_napi_tx_disable(struct napi_struct *napi)
{
if (napi->weight)
napi_disable(napi);
}
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
container_of(work, struct virtnet_info, refill.work);
bool still_empty;
int i;
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
*/
if (still_empty)
schedule_delayed_work(&vi->refill, HZ/2);
}
}
static int virtnet_receive(struct receive_queue *rq, int budget,
unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_stats stats = {};
unsigned int len;
void *buf;
int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
while (stats.packets < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
stats.packets++;
}
} else {
while (stats.packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
stats.packets++;
}
}
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
u64_stats_update_begin(&rq->stats.syncp);
for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
size_t offset = virtnet_rq_stats_desc[i].offset;
u64 *item;
item = (u64 *)((u8 *)&rq->stats + offset);
*item += *(u64 *)((u8 *)&stats + offset);
}
u64_stats_update_end(&rq->stats.syncp);
return stats.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
unsigned int len;
unsigned int packets = 0;
unsigned int bytes = 0;
void *ptr;
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
if (likely(!is_xdp_frame(ptr))) {
struct sk_buff *skb = ptr;
pr_debug("Sent skb %p\n", skb);
bytes += skb->len;
napi_consume_skb(skb, in_napi);
} else {
struct xdp_frame *frame = ptr_to_xdp(ptr);
bytes += frame->len;
xdp_return_frame(frame);
}
packets++;
}
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
if (!packets)
return;
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.bytes += bytes;
sq->stats.packets += packets;
u64_stats_update_end(&sq->stats.syncp);
}
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
return true;
else
return false;
}
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int index = vq2rxq(rq->vq);
struct send_queue *sq = &vi->sq[index];
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
return;
if (__netif_tx_trylock(txq)) {
free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
}
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
unsigned int received;
unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
received = virtnet_receive(rq, budget, &xdp_xmit);
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
if (xdp_xmit & VIRTIO_XDP_REDIR)
xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.kicks++;
u64_stats_update_end(&sq->stats.syncp);
}
}
return received;
}
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
if (err < 0)
return err;
err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (err < 0) {
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
return err;
}
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
}
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
struct send_queue *sq = container_of(napi, struct send_queue, napi);
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
napi_complete_done(napi, 0);
return 0;
}
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
return 0;
}
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
can_push = vi->any_header_sg &&
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
/* Even if we can, don't push here yet as this would skew
* csum_start offset below. */
if (can_push)
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
else
hdr = skb_vnet_hdr(skb);
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev), false,
0))
BUG();
if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
if (unlikely(num_sg < 0))
return num_sg;
/* Pull header back to avoid skew in tx bytes calculations. */
__skb_pull(skb, hdr_len);
} else {
sg_set_buf(sq->sg, hdr, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
if (unlikely(num_sg < 0))
return num_sg;
num_sg++;
}
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int qnum = skb_get_queue_mapping(skb);
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !netdev_xmit_more();
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq, false);
if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
/* Try to transmit */
err = xmit_skb(sq, skb);
/* This should not happen! */
if (unlikely(err)) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
"Unexpected TXQ (%d) queue failure: %d\n",
qnum, err);
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Don't wait up for transmitted skbs to be freed. */
if (!use_napi) {
skb_orphan(skb);
nf_reset_ct(skb);
}
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
* An alternative would be to force queuing layer to requeue the skb by
* returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
* returned in a normal path of operation: it means that driver is not
* maintaining the TX queue stop/start state properly, and causes
* the stack to do a non-trivial amount of useless work.
* Since most packets only take 1 or 2 ring slots, stopping the queue
* early means 16 slots are typically wasted.
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
}
}
}
if (kick || netif_xmit_stopped(txq)) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.kicks++;
u64_stats_update_end(&sq->stats.syncp);
}
}
return NETDEV_TX_OK;
}
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
* never fail unless improperly formatted.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *out)
{
struct scatterlist *sgs[4], hdr, stat;
unsigned out_num = 0, tmp;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
vi->ctrl->status = ~0;
vi->ctrl->hdr.class = class;
vi->ctrl->hdr.cmd = cmd;
/* Add header */
sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
sgs[out_num++] = &hdr;
if (out)
sgs[out_num++] = out;
/* Add return status. */
sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl->status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
while (!virtqueue_get_buf(vi->cvq, &tmp) &&
!virtqueue_is_broken(vi->cvq))
cpu_relax();
return vi->ctrl->status == VIRTIO_NET_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
struct sockaddr *addr;
struct scatterlist sg;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
return -EOPNOTSUPP;
addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
ret = eth_prepare_mac_addr_change(dev, addr);
if (ret)
goto out;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
ret = -EINVAL;
goto out;
}
} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
unsigned int i;
/* Naturally, this has an atomicity problem. */
for (i = 0; i < dev->addr_len; i++)
virtio_cwrite8(vdev,
offsetof(struct virtio_net_config, mac) +
i, addr->sa_data[i]);
}
eth_commit_mac_addr_change(dev, p);
ret = 0;
out:
kfree(addr);
return ret;
}
static void virtnet_stats(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct virtnet_info *vi = netdev_priv(dev);
unsigned int start;
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
u64 tpackets, tbytes, rpackets, rbytes, rdrops;
struct receive_queue *rq = &vi->rq[i];
struct send_queue *sq = &vi->sq[i];
do {
start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
tpackets = sq->stats.packets;
tbytes = sq->stats.bytes;
} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
do {
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
rpackets = rq->stats.packets;
rbytes = rq->stats.bytes;
rdrops = rq->stats.drops;
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
tot->rx_dropped += rdrops;
}
tot->tx_dropped = dev->stats.tx_dropped;
tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock();
}
static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct scatterlist sg;
struct net_device *dev = vi->dev;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
} else {
vi->curr_queue_pairs = queue_pairs;
/* virtnet_open() will refill when device is going to up. */
if (dev->flags & IFF_UP)
schedule_delayed_work(&vi->refill, 0);
}
return 0;
}
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
int err;
rtnl_lock();
err = _virtnet_set_queues(vi, queue_pairs);
rtnl_unlock();
return err;
}
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
return 0;
}
static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
int uc_count;
int mc_count;
void *buf;
int i;
/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
vi->ctrl->promisc ? "en" : "dis");
sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl->allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
if (!buf)
return;
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
i = 0;
netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
/* multicast list and count fill the end */
mac_data = (void *)&mac_data->macs[uc_count][0];
mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
kfree(buf);
}
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
static void virtnet_clean_affinity(struct virtnet_info *vi)
{
int i;
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_queue_pairs; i++) {
virtqueue_set_affinity(vi->rq[i].vq, NULL);
virtqueue_set_affinity(vi->sq[i].vq, NULL);
}
vi->affinity_hint_set = false;
}
}
static void virtnet_set_affinity(struct virtnet_info *vi)
{
cpumask_var_t mask;
int stragglers;
int group_size;
int i, j, cpu;
int num_cpu;
int stride;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
virtnet_clean_affinity(vi);
return;
}
num_cpu = num_online_cpus();
stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
for (j = 0; j < group_size; j++) {
cpumask_set_cpu(cpu, mask);
cpu = cpumask_next_wrap(cpu, cpu_online_mask,
nr_cpu_ids, false);
}
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
cpumask_clear(mask);
}
vi->affinity_hint_set = true;
free_cpumask_var(mask);
}
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
node);
virtnet_set_affinity(vi);
return 0;
}
static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
node_dead);
virtnet_set_affinity(vi);
return 0;
}
static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
{
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
node);
virtnet_clean_affinity(vi);
return 0;
}
static enum cpuhp_state virtionet_online;
static int virtnet_cpu_notif_add(struct virtnet_info *vi)
{
int ret;
ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
if (ret)
return ret;
ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
&vi->node_dead);
if (!ret)
return ret;
cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
return ret;
}
static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
{
cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
&vi->node_dead);
}
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct virtnet_info *vi = netdev_priv(dev);
ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
ring->rx_pending = ring->rx_max_pending;
ring->tx_pending = ring->tx_max_pending;
}
static void virtnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
}
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct virtnet_info *vi = netdev_priv(dev);
u16 queue_pairs = channels->combined_count;
int err;
/* We don't support separate rx/tx channels.
* We don't allow setting 'other' channels.
*/
if (channels->rx_count || channels->tx_count || channels->other_count)
return -EINVAL;
if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
return -EINVAL;
/* For now we don't support modifying channels while XDP is loaded
* also when XDP is loaded all RX queues have XDP programs so we only
* need to check a single RX queue.
*/
if (vi->rq[0].xdp_prog)
return -EINVAL;
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
if (err) {
put_online_cpus();
goto err;
}
virtnet_set_affinity(vi);
put_online_cpus();
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
err:
return err;
}
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
char *p = (char *)data;
unsigned int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < vi->curr_queue_pairs; i++) {
for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
i, virtnet_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < vi->curr_queue_pairs; i++) {
for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
i, virtnet_sq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
}
break;
}
}
static int virtnet_get_sset_count(struct net_device *dev, int sset)
{
struct virtnet_info *vi = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
VIRTNET_SQ_STATS_LEN);
default:
return -EOPNOTSUPP;
}
}
static void virtnet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
unsigned int idx = 0, start, i, j;
const u8 *stats_base;
size_t offset;
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
stats_base = (u8 *)&rq->stats;
do {
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
offset = virtnet_rq_stats_desc[j].offset;
data[idx + j] = *(u64 *)(stats_base + offset);
}
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
idx += VIRTNET_RQ_STATS_LEN;
}
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct send_queue *sq = &vi->sq[i];
stats_base = (u8 *)&sq->stats;
do {
start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
offset = virtnet_sq_stats_desc[j].offset;
data[idx + j] = *(u64 *)(stats_base + offset);
}
} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
idx += VIRTNET_SQ_STATS_LEN;
}
}
static void virtnet_get_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct virtnet_info *vi = netdev_priv(dev);
channels->combined_count = vi->curr_queue_pairs;
channels->max_combined = vi->max_queue_pairs;
channels->max_other = 0;
channels->rx_count = 0;
channels->tx_count = 0;
channels->other_count = 0;
}
static int virtnet_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
return ethtool_virtdev_set_link_ksettings(dev, cmd,
&vi->speed, &vi->duplex);
}
static int virtnet_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
cmd->base.speed = vi->speed;
cmd->base.duplex = vi->duplex;
cmd->base.port = PORT_OTHER;
return 0;
}
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct virtnet_info *vi = netdev_priv(dev);
int i, napi_weight;
if (ec->tx_max_coalesced_frames > 1 ||
ec->rx_max_coalesced_frames != 1)
return -EINVAL;
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
return 0;
}
static int virtnet_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct ethtool_coalesce ec_default = {
.cmd = ETHTOOL_GCOALESCE,
.rx_max_coalesced_frames = 1,
};
struct virtnet_info *vi = netdev_priv(dev);
memcpy(ec, &ec_default, sizeof(ec_default));
if (vi->sq[0].napi.weight)
ec->tx_max_coalesced_frames = 1;
return 0;
}
static void virtnet_init_settings(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
vi->speed = SPEED_UNKNOWN;
vi->duplex = DUPLEX_UNKNOWN;
}
static void virtnet_update_settings(struct virtnet_info *vi)
{
u32 speed;
u8 duplex;
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
return;
virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
if (ethtool_validate_speed(speed))
vi->speed = speed;
virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
if (ethtool_validate_duplex(duplex))
vi->duplex = duplex;
}
static const struct ethtool_ops virtnet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = virtnet_get_link_ksettings,
.set_link_ksettings = virtnet_set_link_ksettings,
.set_coalesce = virtnet_set_coalesce,
.get_coalesce = virtnet_get_coalesce,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int i;
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
}
}
static int init_vqs(struct virtnet_info *vi);
static int virtnet_restore_up(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int err, i;
err = init_vqs(vi);
if (err)
return err;
virtio_device_ready(vdev);
if (netif_running(vi->dev)) {
for (i = 0; i < vi->curr_queue_pairs; i++)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
}
}
netif_tx_lock_bh(vi->dev);
netif_device_attach(vi->dev);
netif_tx_unlock_bh(vi->dev);
return err;
}
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
struct scatterlist sg;
vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
return -EINVAL;
}
return 0;
}
static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
{
u64 offloads = 0;
if (!vi->guest_offloads)
return 0;
return virtnet_set_guest_offloads(vi, offloads);
}
static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
{
u64 offloads = vi->guest_offloads;
if (!vi->guest_offloads)
return 0;
return virtnet_set_guest_offloads(vi, offloads);
}
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
struct virtnet_info *vi = netdev_priv(dev);
struct bpf_prog *old_prog;
u16 xdp_qp = 0, curr_qp;
int i, err;
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
&& (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
return -EOPNOTSUPP;
}
if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
return -EINVAL;
}
if (dev->mtu > max_sz) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
return -EINVAL;
}
curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
if (prog)
xdp_qp = nr_cpu_ids;
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
netdev_warn(dev, "request %i queues but max is %i\n",
curr_qp + xdp_qp, vi->max_queue_pairs);
return -ENOMEM;
}
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
if (!prog && !old_prog)
return 0;
if (prog)
bpf_prog_add(prog, vi->max_queue_pairs - 1);
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
}
if (!prog) {
for (i = 0; i < vi->max_queue_pairs; i++) {
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0)
virtnet_restore_guest_offloads(vi);
}
synchronize_net();
}
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
vi->xdp_queue_pairs = xdp_qp;
if (prog) {
for (i = 0; i < vi->max_queue_pairs; i++) {
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
}
}
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
if (netif_running(dev)) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
}
}
return 0;
err:
if (!prog) {
virtnet_clear_guest_offloads(vi);
for (i = 0; i < vi->max_queue_pairs; i++)
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
}
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
return err;
}
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
default:
return -EINVAL;
}
}
static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
size_t len)
{
struct virtnet_info *vi = netdev_priv(dev);
int ret;
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
return -EOPNOTSUPP;
ret = snprintf(buf, len, "sby");
if (ret >= len)
return -EOPNOTSUPP;
return 0;
}
static int virtnet_set_features(struct net_device *dev,
netdev_features_t features)
{
struct virtnet_info *vi = netdev_priv(dev);
u64 offloads;
int err;
if ((dev->features ^ features) & NETIF_F_LRO) {
if (vi->xdp_queue_pairs)
return -EBUSY;
if (features & NETIF_F_LRO)
offloads = vi->guest_offloads_capable;
else
offloads = vi->guest_offloads_capable &
~GUEST_OFFLOAD_LRO_MASK;
err = virtnet_set_guest_offloads(vi, offloads);
if (err)
return err;
vi->guest_offloads = offloads;
}
return 0;
}
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
.ndo_start_xmit = start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
};
static void virtnet_config_changed_work(struct work_struct *work)
{
struct virtnet_info *vi =
container_of(work, struct virtnet_info, config_work);
u16 v;
if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
struct virtio_net_config, status, &v) < 0)
return;
if (v & VIRTIO_NET_S_ANNOUNCE) {
netdev_notify_peers(vi->dev);
virtnet_ack_link_announce(vi);
}
/* Ignore unknown (future) status bits */
v &= VIRTIO_NET_S_LINK_UP;
if (vi->status == v)
return;
vi->status = v;
if (vi->status & VIRTIO_NET_S_LINK_UP) {
virtnet_update_settings(vi);
netif_carrier_on(vi->dev);
netif_tx_wake_all_queues(vi->dev);
} else {
netif_carrier_off(vi->dev);
netif_tx_stop_all_queues(vi->dev);
}
}
static void virtnet_config_changed(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
schedule_work(&vi->config_work);
}
static void virtnet_free_queues(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
__netif_napi_del(&vi->rq[i].napi);
__netif_napi_del(&vi->sq[i].napi);
}
/* We called __netif_napi_del(),
* we need to respect an RCU grace period before freeing vi->rq
*/
synchronize_net();
kfree(vi->rq);
kfree(vi->sq);
kfree(vi->ctrl);
}
static void _free_receive_bufs(struct virtnet_info *vi)
{
struct bpf_prog *old_prog;
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
while (vi->rq[i].pages)
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
if (old_prog)
bpf_prog_put(old_prog);
}
}
static void free_receive_bufs(struct virtnet_info *vi)
{
rtnl_lock();
_free_receive_bufs(vi);
rtnl_unlock();
}
static void free_receive_page_frags(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page)
put_page(vi->rq[i].alloc_frag.page);
}
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (!is_xdp_frame(buf))
dev_kfree_skb(buf);
else
xdp_return_frame(ptr_to_xdp(buf));
}
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (vi->mergeable_rx_bufs) {
put_page(virt_to_head_page(buf));
} else if (vi->big_packets) {
give_pages(&vi->rq[i], buf);
} else {
put_page(virt_to_head_page(buf));
}
}
}
}
static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
virtnet_clean_affinity(vi);
vdev->config->del_vqs(vdev);
virtnet_free_queues(vi);
}
/* How large should a single buffer be so a queue full of these can fit at
* least one full packet?
* Logic below assumes the mergeable buffer header is used.
*/
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{
const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int rq_size = virtqueue_get_vring_size(vq);
unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
return max(max(min_buf_len, hdr_len) - hdr_len,
(unsigned int)GOOD_PACKET_LEN);
}
static int virtnet_find_vqs(struct virtnet_info *vi)
{
vq_callback_t **callbacks;
struct virtqueue **vqs;
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
bool *ctx;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
* possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
* possible control vq.
*/
total_vqs = vi->max_queue_pairs * 2 +
virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
/* Allocate space for find_vqs parameters */
vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vq;
callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
if (!callbacks)
goto err_callback;
names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
goto err_ctx;
} else {
ctx = NULL;
}
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
callbacks[total_vqs - 1] = NULL;
names[total_vqs - 1] = "control";
}
/* Allocate/initialize parameters for send/receive virtqueues */
for (i = 0; i < vi->max_queue_pairs; i++) {
callbacks[rxq2vq(i)] = skb_recv_done;
callbacks[txq2vq(i)] = skb_xmit_done;
sprintf(vi->rq[i].name, "input.%d", i);
sprintf(vi->sq[i].name, "output.%d", i);
names[rxq2vq(i)] = vi->rq[i].name;
names[txq2vq(i)] = vi->sq[i].name;
if (ctx)
ctx[rxq2vq(i)] = true;
}
ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
names, ctx, NULL);
if (ret)
goto err_find;
if (vi->has_cvq) {
vi->cvq = vqs[total_vqs - 1];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].vq = vqs[rxq2vq(i)];
vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
vi->sq[i].vq = vqs[txq2vq(i)];
}
/* run here: ret == 0. */
err_find:
kfree(ctx);
err_ctx:
kfree(names);
err_names:
kfree(callbacks);
err_callback:
kfree(vqs);
err_vq:
return ret;
}
static int virtnet_alloc_queues(struct virtnet_info *vi)
{
int i;
vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
if (!vi->ctrl)
goto err_ctrl;
vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
if (!vi->sq)
goto err_sq;
vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
if (!vi->rq)
goto err_rq;
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
}
return 0;
err_rq:
kfree(vi->sq);
err_sq:
kfree(vi->ctrl);
err_ctrl:
return -ENOMEM;
}
static int init_vqs(struct virtnet_info *vi)
{
int ret;
/* Allocate send & receive queues */
ret = virtnet_alloc_queues(vi);
if (ret)
goto err;
ret = virtnet_find_vqs(vi);
if (ret)
goto err_free;
get_online_cpus();
virtnet_set_affinity(vi);
put_online_cpus();
return 0;
err_free:
virtnet_free_queues(vi);
err:
return ret;
}
#ifdef CONFIG_SYSFS
static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
char *buf)
{
struct virtnet_info *vi = netdev_priv(queue->dev);
unsigned int queue_index = get_netdev_rx_queue_index(queue);
unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
struct ewma_pkt_len *avg;
BUG_ON(queue_index >= vi->max_queue_pairs);
avg = &vi->rq[queue_index].mrg_avg_pkt_len;
return sprintf(buf, "%u\n",
get_mergeable_buf_len(&vi->rq[queue_index], avg,
SKB_DATA_ALIGN(headroom + tailroom)));
}
static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
__ATTR_RO(mergeable_rx_buffer_size);
static struct attribute *virtio_net_mrg_rx_attrs[] = {
&mergeable_rx_buffer_size_attribute.attr,
NULL
};
static const struct attribute_group virtio_net_mrg_rx_group = {
.name = "virtio_net",
.attrs = virtio_net_mrg_rx_attrs
};
#endif
static bool virtnet_fail_on_feature(struct virtio_device *vdev,
unsigned int fbit,
const char *fname, const char *dname)
{
if (!virtio_has_feature(vdev, fbit))
return false;
dev_err(&vdev->dev, "device advertises feature %s but not %s",
fname, dname);
return true;
}
#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
static bool virtnet_validate_features(struct virtio_device *vdev)
{
if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
(VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
return true;
}
#define MIN_MTU ETH_MIN_MTU
#define MAX_MTU ETH_MAX_MTU
static int virtnet_validate(struct virtio_device *vdev)
{
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (!virtnet_validate_features(vdev))
return -EINVAL;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
int mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
mtu));
if (mtu < MIN_MTU)
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
}
return 0;
}
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
int mtu;
/* Find if host supports multiqueue virtio_net device */
err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
struct virtio_net_config,
max_virtqueue_pairs, &max_queue_pairs);
/* We need at least 2 queue's */
if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
max_queue_pairs = 1;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
if (!dev)
return -ENOMEM;
/* Set up network device as normal. */
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
dev->ethtool_ops = &virtnet_ethtool_ops;
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
dev->hw_features |= NETIF_F_TSO;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
dev->hw_features |= NETIF_F_TSO6;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
dev->features |= NETIF_F_RXCSUM;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_LRO;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
dev->hw_features |= NETIF_F_LRO;
dev->vlan_features = dev->features;
/* MTU range: 68 - 65535 */
dev->min_mtu = MIN_MTU;
dev->max_mtu = MAX_MTU;
/* Configuration may specify what MAC to use. Otherwise random. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
virtio_cread_bytes(vdev,
offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
else
eth_hw_addr_random(dev);
/* Set up our device-specific information */
vi = netdev_priv(dev);
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
mtu));
if (mtu < dev->min_mtu) {
/* Should never trigger: MTU was previously validated
* in virtnet_validate.
*/
dev_err(&vdev->dev,
"device MTU appears to have changed it is now %d < %d",
mtu, dev->min_mtu);
err = -EINVAL;
goto free;
}
dev->mtu = mtu;
dev->max_mtu = mtu;
/* TODO: size buffers correctly in this case. */
if (dev->mtu > ETH_DATA_LEN)
vi->big_packets = true;
}
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
/* Enable multiqueue by default */
if (num_online_cpus() >= max_queue_pairs)
vi->curr_queue_pairs = max_queue_pairs;
else
vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
goto free;
#ifdef CONFIG_SYSFS
if (vi->mergeable_rx_bufs)
dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
#endif
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
virtnet_init_settings(dev);
if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
vi->failover = net_failover_create(vi->dev);
if (IS_ERR(vi->failover)) {
err = PTR_ERR(vi->failover);
goto free_vqs;
}
}
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
goto free_failover;
}
virtio_device_ready(vdev);
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
goto free_unregister_netdev;
}
virtnet_set_queues(vi, vi->curr_queue_pairs);
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_settings(vi);
netif_carrier_on(dev);
}
for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
if (virtio_has_feature(vi->vdev, guest_offloads[i]))
set_bit(guest_offloads[i], &vi->guest_offloads);
vi->guest_offloads_capable = vi->guest_offloads;
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
return 0;
free_unregister_netdev:
vi->vdev->config->reset(vdev);
unregister_netdev(dev);
free_failover:
net_failover_destroy(vi->failover);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
free:
free_netdev(dev);
return err;
}
static void remove_vq_common(struct virtnet_info *vi)
{
vi->vdev->config->reset(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
free_receive_bufs(vi);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
}
static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
virtnet_cpu_notif_remove(vi);
/* Make sure no work handler is accessing the device. */
flush_work(&vi->config_work);
unregister_netdev(vi->dev);
net_failover_destroy(vi->failover);
remove_vq_common(vi);
free_netdev(vi->dev);
}
static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
virtnet_cpu_notif_remove(vi);
virtnet_freeze_down(vdev);
remove_vq_common(vi);
return 0;
}
static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int err;
err = virtnet_restore_up(vdev);
if (err)
return err;
virtnet_set_queues(vi, vi->curr_queue_pairs);
err = virtnet_cpu_notif_add(vi);
if (err)
return err;
return 0;
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};
#define VIRTNET_FEATURES \
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
VIRTIO_NET_F_MAC, \
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
static unsigned int features[] = {
VIRTNET_FEATURES,
};
static unsigned int features_legacy[] = {
VIRTNET_FEATURES,
VIRTIO_NET_F_GSO,
VIRTIO_F_ANY_LAYOUT,
};
static struct virtio_driver virtio_net_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtnet_validate,
.probe = virtnet_probe,
.remove = virtnet_remove,
.config_changed = virtnet_config_changed,
#ifdef CONFIG_PM_SLEEP
.freeze = virtnet_freeze,
.restore = virtnet_restore,
#endif
};
static __init int virtio_net_driver_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
virtnet_cpu_online,
virtnet_cpu_down_prep);
if (ret < 0)
goto out;
virtionet_online = ret;
ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
ret = register_virtio_driver(&virtio_net_driver);
if (ret)
goto err_virtio;
return 0;
err_virtio:
cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
err_dead:
cpuhp_remove_multi_state(virtionet_online);
out:
return ret;
}
module_init(virtio_net_driver_init);
static __exit void virtio_net_driver_exit(void)
{
unregister_virtio_driver(&virtio_net_driver);
cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
cpuhp_remove_multi_state(virtionet_online);
}
module_exit(virtio_net_driver_exit);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
reuk/waveguide | wayverb/JuceLibraryCode/modules/juce_graphics/contexts/juce_GraphicsContext.h | 35638 | /*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2015 - ROLI Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_GRAPHICSCONTEXT_H_INCLUDED
#define JUCE_GRAPHICSCONTEXT_H_INCLUDED
//==============================================================================
/**
A graphics context, used for drawing a component or image.
When a Component needs painting, a Graphics context is passed to its
Component::paint() method, and this you then call methods within this
object to actually draw the component's content.
A Graphics can also be created from an image, to allow drawing directly onto
that image.
@see Component::paint
*/
class JUCE_API Graphics
{
public:
//==============================================================================
/** Creates a Graphics object to draw directly onto the given image.
The graphics object that is created will be set up to draw onto the image,
with the context's clipping area being the entire size of the image, and its
origin being the image's origin. To draw into a subsection of an image, use the
reduceClipRegion() and setOrigin() methods.
Obviously you shouldn't delete the image before this context is deleted.
*/
explicit Graphics (const Image& imageToDrawOnto);
/** Destructor. */
~Graphics();
//==============================================================================
/** Changes the current drawing colour.
This sets the colour that will now be used for drawing operations - it also
sets the opacity to that of the colour passed-in.
If a brush is being used when this method is called, the brush will be deselected,
and any subsequent drawing will be done with a solid colour brush instead.
@see setOpacity
*/
void setColour (Colour newColour);
/** Changes the opacity to use with the current colour.
If a solid colour is being used for drawing, this changes its opacity
to this new value (i.e. it doesn't multiply the colour's opacity by this amount).
If a gradient is being used, this will have no effect on it.
A value of 0.0 is completely transparent, 1.0 is completely opaque.
*/
void setOpacity (float newOpacity);
/** Sets the context to use a gradient for its fill pattern.
*/
void setGradientFill (const ColourGradient& gradient);
/** Sets the context to use a tiled image pattern for filling.
Make sure that you don't delete this image while it's still being used by
this context!
*/
void setTiledImageFill (const Image& imageToUse,
int anchorX, int anchorY,
float opacity);
/** Changes the current fill settings.
@see setColour, setGradientFill, setTiledImageFill
*/
void setFillType (const FillType& newFill);
//==============================================================================
/** Changes the font to use for subsequent text-drawing functions.
Note there's also a setFont (float, int) method to quickly change the size and
style of the current font.
@see drawSingleLineText, drawMultiLineText, drawText, drawFittedText
*/
void setFont (const Font& newFont);
/** Changes the size of the currently-selected font.
This is a convenient shortcut that changes the context's current font to a
different size. The typeface won't be changed.
@see Font
*/
void setFont (float newFontHeight);
/** Returns the currently selected font. */
Font getCurrentFont() const;
/** Draws a one-line text string.
This will use the current colour (or brush) to fill the text. The font is the last
one specified by setFont().
@param text the string to draw
@param startX the position to draw the left-hand edge of the text
@param baselineY the position of the text's baseline
@param justification the horizontal flags indicate which end of the text string is
anchored at the specified point.
@see drawMultiLineText, drawText, drawFittedText, GlyphArrangement::addLineOfText
*/
void drawSingleLineText (const String& text,
int startX, int baselineY,
Justification justification = Justification::left) const;
/** Draws text across multiple lines.
This will break the text onto a new line where there's a new-line or
carriage-return character, or at a word-boundary when the text becomes wider
than the size specified by the maximumLineWidth parameter.
@see setFont, drawSingleLineText, drawFittedText, GlyphArrangement::addJustifiedText
*/
void drawMultiLineText (const String& text,
int startX, int baselineY,
int maximumLineWidth) const;
/** Draws a line of text within a specified rectangle.
The text will be positioned within the rectangle based on the justification
flags passed-in. If the string is too long to fit inside the rectangle, it will
either be truncated or will have ellipsis added to its end (if the useEllipsesIfTooBig
flag is true).
@see drawSingleLineText, drawFittedText, drawMultiLineText, GlyphArrangement::addJustifiedText
*/
void drawText (const String& text,
int x, int y, int width, int height,
Justification justificationType,
bool useEllipsesIfTooBig = true) const;
/** Draws a line of text within a specified rectangle.
The text will be positioned within the rectangle based on the justification
flags passed-in. If the string is too long to fit inside the rectangle, it will
either be truncated or will have ellipsis added to its end (if the useEllipsesIfTooBig
flag is true).
@see drawSingleLineText, drawFittedText, drawMultiLineText, GlyphArrangement::addJustifiedText
*/
void drawText (const String& text,
Rectangle<int> area,
Justification justificationType,
bool useEllipsesIfTooBig = true) const;
/** Draws a line of text within a specified rectangle.
The text will be positioned within the rectangle based on the justification
flags passed-in. If the string is too long to fit inside the rectangle, it will
either be truncated or will have ellipsis added to its end (if the useEllipsesIfTooBig
flag is true).
@see drawSingleLineText, drawFittedText, drawMultiLineText, GlyphArrangement::addJustifiedText
*/
void drawText (const String& text,
Rectangle<float> area,
Justification justificationType,
bool useEllipsesIfTooBig = true) const;
/** Tries to draw a text string inside a given space.
This does its best to make the given text readable within the specified rectangle,
so it useful for labelling things.
If the text is too big, it'll be squashed horizontally or broken over multiple lines
if the maximumLinesToUse value allows this. If the text just won't fit into the space,
it'll cram as much as possible in there, and put some ellipsis at the end to show that
it's been truncated.
A Justification parameter lets you specify how the text is laid out within the rectangle,
both horizontally and vertically.
The minimumHorizontalScale parameter specifies how much the text can be squashed horizontally
to try to squeeze it into the space. If you don't want any horizontal scaling to occur, you
can set this value to 1.0f. Pass 0 if you want it to use a default value.
@see GlyphArrangement::addFittedText
*/
void drawFittedText (const String& text,
int x, int y, int width, int height,
Justification justificationFlags,
int maximumNumberOfLines,
float minimumHorizontalScale = 0.0f) const;
/** Tries to draw a text string inside a given space.
This does its best to make the given text readable within the specified rectangle,
so it useful for labelling things.
If the text is too big, it'll be squashed horizontally or broken over multiple lines
if the maximumLinesToUse value allows this. If the text just won't fit into the space,
it'll cram as much as possible in there, and put some ellipsis at the end to show that
it's been truncated.
A Justification parameter lets you specify how the text is laid out within the rectangle,
both horizontally and vertically.
The minimumHorizontalScale parameter specifies how much the text can be squashed horizontally
to try to squeeze it into the space. If you don't want any horizontal scaling to occur, you
can set this value to 1.0f. Pass 0 if you want it to use a default value.
@see GlyphArrangement::addFittedText
*/
void drawFittedText (const String& text,
Rectangle<int> area,
Justification justificationFlags,
int maximumNumberOfLines,
float minimumHorizontalScale = 0.0f) const;
//==============================================================================
/** Fills the context's entire clip region with the current colour or brush.
(See also the fillAll (Colour) method which is a quick way of filling
it with a given colour).
*/
void fillAll() const;
/** Fills the context's entire clip region with a given colour.
This leaves the context's current colour and brush unchanged, it just
uses the specified colour temporarily.
*/
void fillAll (Colour colourToUse) const;
//==============================================================================
/** Fills a rectangle with the current colour or brush.
@see drawRect, fillRoundedRectangle
*/
void fillRect (Rectangle<int> rectangle) const;
/** Fills a rectangle with the current colour or brush.
@see drawRect, fillRoundedRectangle
*/
void fillRect (Rectangle<float> rectangle) const;
/** Fills a rectangle with the current colour or brush.
@see drawRect, fillRoundedRectangle
*/
void fillRect (int x, int y, int width, int height) const;
/** Fills a rectangle with the current colour or brush.
@see drawRect, fillRoundedRectangle
*/
void fillRect (float x, float y, float width, float height) const;
/** Fills a set of rectangles using the current colour or brush.
If you have a lot of rectangles to draw, it may be more efficient
to create a RectangleList and use this method than to call fillRect()
multiple times.
*/
void fillRectList (const RectangleList<float>& rectangles) const;
/** Fills a set of rectangles using the current colour or brush.
If you have a lot of rectangles to draw, it may be more efficient
to create a RectangleList and use this method than to call fillRect()
multiple times.
*/
void fillRectList (const RectangleList<int>& rectangles) const;
/** Uses the current colour or brush to fill a rectangle with rounded corners.
@see drawRoundedRectangle, Path::addRoundedRectangle
*/
void fillRoundedRectangle (float x, float y, float width, float height,
float cornerSize) const;
/** Uses the current colour or brush to fill a rectangle with rounded corners.
@see drawRoundedRectangle, Path::addRoundedRectangle
*/
void fillRoundedRectangle (Rectangle<float> rectangle,
float cornerSize) const;
/** Fills a rectangle with a checkerboard pattern, alternating between two colours. */
void fillCheckerBoard (Rectangle<int> area,
int checkWidth, int checkHeight,
Colour colour1, Colour colour2) const;
/** Draws a rectangular outline, using the current colour or brush.
The lines are drawn inside the given rectangle, and greater line thicknesses extend inwards.
@see fillRect
*/
void drawRect (int x, int y, int width, int height, int lineThickness = 1) const;
/** Draws a rectangular outline, using the current colour or brush.
The lines are drawn inside the given rectangle, and greater line thicknesses extend inwards.
@see fillRect
*/
void drawRect (float x, float y, float width, float height, float lineThickness = 1.0f) const;
/** Draws a rectangular outline, using the current colour or brush.
The lines are drawn inside the given rectangle, and greater line thicknesses extend inwards.
@see fillRect
*/
void drawRect (Rectangle<int> rectangle, int lineThickness = 1) const;
/** Draws a rectangular outline, using the current colour or brush.
The lines are drawn inside the given rectangle, and greater line thicknesses extend inwards.
@see fillRect
*/
void drawRect (Rectangle<float> rectangle, float lineThickness = 1.0f) const;
/** Uses the current colour or brush to draw the outline of a rectangle with rounded corners.
@see fillRoundedRectangle, Path::addRoundedRectangle
*/
void drawRoundedRectangle (float x, float y, float width, float height,
float cornerSize, float lineThickness) const;
/** Uses the current colour or brush to draw the outline of a rectangle with rounded corners.
@see fillRoundedRectangle, Path::addRoundedRectangle
*/
void drawRoundedRectangle (Rectangle<float> rectangle,
float cornerSize, float lineThickness) const;
/** Fills a 1x1 pixel using the current colour or brush.
Note that because the context may be transformed, this is effectively the same as
calling fillRect (x, y, 1, 1), and the actual result may involve multiple pixels.
*/
void setPixel (int x, int y) const;
//==============================================================================
/** Fills an ellipse with the current colour or brush.
The ellipse is drawn to fit inside the given rectangle.
@see drawEllipse, Path::addEllipse
*/
void fillEllipse (float x, float y, float width, float height) const;
/** Fills an ellipse with the current colour or brush.
The ellipse is drawn to fit inside the given rectangle.
@see drawEllipse, Path::addEllipse
*/
void fillEllipse (Rectangle<float> area) const;
/** Draws an elliptical stroke using the current colour or brush.
@see fillEllipse, Path::addEllipse
*/
void drawEllipse (float x, float y, float width, float height,
float lineThickness) const;
/** Draws an elliptical stroke using the current colour or brush.
@see fillEllipse, Path::addEllipse
*/
void drawEllipse (Rectangle<float> area, float lineThickness) const;
//==============================================================================
/** Draws a line between two points.
The line is 1 pixel wide and drawn with the current colour or brush.
TIP: If you're trying to draw horizontal or vertical lines, don't use this -
it's better to use fillRect() instead unless you really need an angled line.
*/
void drawLine (float startX, float startY, float endX, float endY) const;
/** Draws a line between two points with a given thickness.
TIP: If you're trying to draw horizontal or vertical lines, don't use this -
it's better to use fillRect() instead unless you really need an angled line.
@see Path::addLineSegment
*/
void drawLine (float startX, float startY, float endX, float endY, float lineThickness) const;
/** Draws a line between two points.
The line is 1 pixel wide and drawn with the current colour or brush.
TIP: If you're trying to draw horizontal or vertical lines, don't use this -
it's better to use fillRect() instead unless you really need an angled line.
*/
void drawLine (const Line<float>& line) const;
/** Draws a line between two points with a given thickness.
@see Path::addLineSegment
TIP: If you're trying to draw horizontal or vertical lines, don't use this -
it's better to use fillRect() instead unless you really need an angled line.
*/
void drawLine (const Line<float>& line, float lineThickness) const;
/** Draws a dashed line using a custom set of dash-lengths.
@param line the line to draw
@param dashLengths a series of lengths to specify the on/off lengths - e.g.
{ 4, 5, 6, 7 } will draw a line of 4 pixels, skip 5 pixels,
draw 6 pixels, skip 7 pixels, and then repeat.
@param numDashLengths the number of elements in the array (this must be an even number).
@param lineThickness the thickness of the line to draw
@param dashIndexToStartFrom the index in the dash-length array to use for the first segment
@see PathStrokeType::createDashedStroke
*/
void drawDashedLine (const Line<float>& line,
const float* dashLengths, int numDashLengths,
float lineThickness = 1.0f,
int dashIndexToStartFrom = 0) const;
/** Draws a vertical line of pixels at a given x position.
The x position is an integer, but the top and bottom of the line can be sub-pixel
positions, and these will be anti-aliased if necessary.
The bottom parameter must be greater than or equal to the top parameter.
*/
void drawVerticalLine (int x, float top, float bottom) const;
/** Draws a horizontal line of pixels at a given y position.
The y position is an integer, but the left and right ends of the line can be sub-pixel
positions, and these will be anti-aliased if necessary.
The right parameter must be greater than or equal to the left parameter.
*/
void drawHorizontalLine (int y, float left, float right) const;
//==============================================================================
/** Fills a path using the currently selected colour or brush. */
void fillPath (const Path& path) const;
/** Fills a path using the currently selected colour or brush, and adds a transform. */
void fillPath (const Path& path, const AffineTransform& transform) const;
/** Draws a path's outline using the currently selected colour or brush. */
void strokePath (const Path& path,
const PathStrokeType& strokeType,
const AffineTransform& transform = AffineTransform()) const;
/** Draws a line with an arrowhead at its end.
@param line the line to draw
@param lineThickness the thickness of the line
@param arrowheadWidth the width of the arrow head (perpendicular to the line)
@param arrowheadLength the length of the arrow head (along the length of the line)
*/
void drawArrow (const Line<float>& line,
float lineThickness,
float arrowheadWidth,
float arrowheadLength) const;
//==============================================================================
/** Types of rendering quality that can be specified when drawing images.
@see Graphics::setImageResamplingQuality
*/
enum ResamplingQuality
{
lowResamplingQuality = 0, /**< Just uses a nearest-neighbour algorithm for resampling. */
mediumResamplingQuality = 1, /**< Uses bilinear interpolation for upsampling and area-averaging for downsampling. */
highResamplingQuality = 2, /**< Uses bicubic interpolation for upsampling and area-averaging for downsampling. */
};
/** Changes the quality that will be used when resampling images.
By default a Graphics object will be set to mediumRenderingQuality.
@see Graphics::drawImage, Graphics::drawImageTransformed, Graphics::drawImageWithin
*/
void setImageResamplingQuality (const ResamplingQuality newQuality);
/** Draws an image.
This will draw the whole of an image, positioning its top-left corner at the
given coordinates, and keeping its size the same. This is the simplest image
drawing method - the others give more control over the scaling and clipping
of the images.
Images are composited using the context's current opacity, so if you
don't want it to be drawn semi-transparently, be sure to call setOpacity (1.0f)
(or setColour() with an opaque colour) before drawing images.
*/
void drawImageAt (const Image& imageToDraw, int topLeftX, int topLeftY,
bool fillAlphaChannelWithCurrentBrush = false) const;
/** Draws part of an image, rescaling it to fit in a given target region.
The specified area of the source image is rescaled and drawn to fill the
specifed destination rectangle.
Images are composited using the context's current opacity, so if you
don't want it to be drawn semi-transparently, be sure to call setOpacity (1.0f)
(or setColour() with an opaque colour) before drawing images.
@param imageToDraw the image to overlay
@param destX the left of the destination rectangle
@param destY the top of the destination rectangle
@param destWidth the width of the destination rectangle
@param destHeight the height of the destination rectangle
@param sourceX the left of the rectangle to copy from the source image
@param sourceY the top of the rectangle to copy from the source image
@param sourceWidth the width of the rectangle to copy from the source image
@param sourceHeight the height of the rectangle to copy from the source image
@param fillAlphaChannelWithCurrentBrush if true, then instead of drawing the source image's pixels,
the source image's alpha channel is used as a mask with
which to fill the destination using the current colour
or brush. (If the source is has no alpha channel, then
it will just fill the target with a solid rectangle)
@see setImageResamplingQuality, drawImageAt, drawImageWithin, fillAlphaMap
*/
void drawImage (const Image& imageToDraw,
int destX, int destY, int destWidth, int destHeight,
int sourceX, int sourceY, int sourceWidth, int sourceHeight,
bool fillAlphaChannelWithCurrentBrush = false) const;
/** Draws an image, having applied an affine transform to it.
This lets you throw the image around in some wacky ways, rotate it, shear,
scale it, etc.
Images are composited using the context's current opacity, so if you
don't want it to be drawn semi-transparently, be sure to call setOpacity (1.0f)
(or setColour() with an opaque colour) before drawing images.
If fillAlphaChannelWithCurrentBrush is set to true, then the image's RGB channels
are ignored and it is filled with the current brush, masked by its alpha channel.
If you want to render only a subsection of an image, use Image::getClippedImage() to
create the section that you need.
@see setImageResamplingQuality, drawImage
*/
void drawImageTransformed (const Image& imageToDraw,
const AffineTransform& transform,
bool fillAlphaChannelWithCurrentBrush = false) const;
/** Draws an image to fit within a designated rectangle.
@param imageToDraw the source image to draw
@param targetArea the target rectangle to fit it into
@param placementWithinTarget this specifies how the image should be positioned
within the target rectangle - see the RectanglePlacement
class for more details about this.
@param fillAlphaChannelWithCurrentBrush if true, then instead of drawing the image, just its
alpha channel will be used as a mask with which to
draw with the current brush or colour. This is
similar to fillAlphaMap(), and see also drawImage()
@see drawImage, drawImageTransformed, drawImageAt, RectanglePlacement
*/
void drawImage (const Image& imageToDraw, Rectangle<float> targetArea,
RectanglePlacement placementWithinTarget = RectanglePlacement::stretchToFit,
bool fillAlphaChannelWithCurrentBrush = false) const;
/** Draws an image to fit within a designated rectangle.
If the image is too big or too small for the space, it will be rescaled
to fit as nicely as it can do without affecting its aspect ratio. It will
then be placed within the target rectangle according to the justification flags
specified.
@param imageToDraw the source image to draw
@param destX top-left of the target rectangle to fit it into
@param destY top-left of the target rectangle to fit it into
@param destWidth size of the target rectangle to fit the image into
@param destHeight size of the target rectangle to fit the image into
@param placementWithinTarget this specifies how the image should be positioned
within the target rectangle - see the RectanglePlacement
class for more details about this.
@param fillAlphaChannelWithCurrentBrush if true, then instead of drawing the image, just its
alpha channel will be used as a mask with which to
draw with the current brush or colour. This is
similar to fillAlphaMap(), and see also drawImage()
@see setImageResamplingQuality, drawImage, drawImageTransformed, drawImageAt, RectanglePlacement
*/
void drawImageWithin (const Image& imageToDraw,
int destX, int destY, int destWidth, int destHeight,
RectanglePlacement placementWithinTarget,
bool fillAlphaChannelWithCurrentBrush = false) const;
//==============================================================================
/** Returns the position of the bounding box for the current clipping region.
@see getClipRegion, clipRegionIntersects
*/
Rectangle<int> getClipBounds() const;
/** Checks whether a rectangle overlaps the context's clipping region.
If this returns false, no part of the given area can be drawn onto, so this
method can be used to optimise a component's paint() method, by letting it
avoid drawing complex objects that aren't within the region being repainted.
*/
bool clipRegionIntersects (Rectangle<int> area) const;
/** Intersects the current clipping region with another region.
@returns true if the resulting clipping region is non-zero in size
@see setOrigin, clipRegionIntersects
*/
bool reduceClipRegion (int x, int y, int width, int height);
/** Intersects the current clipping region with another region.
@returns true if the resulting clipping region is non-zero in size
@see setOrigin, clipRegionIntersects
*/
bool reduceClipRegion (Rectangle<int> area);
/** Intersects the current clipping region with a rectangle list region.
@returns true if the resulting clipping region is non-zero in size
@see setOrigin, clipRegionIntersects
*/
bool reduceClipRegion (const RectangleList<int>& clipRegion);
/** Intersects the current clipping region with a path.
@returns true if the resulting clipping region is non-zero in size
@see reduceClipRegion
*/
bool reduceClipRegion (const Path& path, const AffineTransform& transform = AffineTransform());
/** Intersects the current clipping region with an image's alpha-channel.
The current clipping path is intersected with the area covered by this image's
alpha-channel, after the image has been transformed by the specified matrix.
@param image the image whose alpha-channel should be used. If the image doesn't
have an alpha-channel, it is treated as entirely opaque.
@param transform a matrix to apply to the image
@returns true if the resulting clipping region is non-zero in size
@see reduceClipRegion
*/
bool reduceClipRegion (const Image& image, const AffineTransform& transform);
/** Excludes a rectangle to stop it being drawn into. */
void excludeClipRegion (Rectangle<int> rectangleToExclude);
/** Returns true if no drawing can be done because the clip region is zero. */
bool isClipEmpty() const;
//==============================================================================
/** Saves the current graphics state on an internal stack.
To restore the state, use restoreState().
@see ScopedSaveState
*/
void saveState();
/** Restores a graphics state that was previously saved with saveState().
@see ScopedSaveState
*/
void restoreState();
/** Uses RAII to save and restore the state of a graphics context.
On construction, this calls Graphics::saveState(), and on destruction it calls
Graphics::restoreState() on the Graphics object that you supply.
*/
class ScopedSaveState
{
public:
ScopedSaveState (Graphics&);
~ScopedSaveState();
private:
Graphics& context;
JUCE_DECLARE_NON_COPYABLE (ScopedSaveState)
};
//==============================================================================
/** Begins rendering to an off-screen bitmap which will later be flattened onto the current
context with the given opacity.
The context uses an internal stack of temporary image layers to do this. When you've
finished drawing to the layer, call endTransparencyLayer() to complete the operation and
composite the finished layer. Every call to beginTransparencyLayer() MUST be matched
by a corresponding call to endTransparencyLayer()!
This call also saves the current state, and endTransparencyLayer() restores it.
*/
void beginTransparencyLayer (float layerOpacity);
/** Completes a drawing operation to a temporary semi-transparent buffer.
See beginTransparencyLayer() for more details.
*/
void endTransparencyLayer();
/** Moves the position of the context's origin.
This changes the position that the context considers to be (0, 0) to
the specified position.
So if you call setOrigin with (100, 100), then the position that was previously
referred to as (100, 100) will subsequently be considered to be (0, 0).
@see reduceClipRegion, addTransform
*/
void setOrigin (Point<int> newOrigin);
/** Moves the position of the context's origin.
This changes the position that the context considers to be (0, 0) to
the specified position.
So if you call setOrigin (100, 100), then the position that was previously
referred to as (100, 100) will subsequently be considered to be (0, 0).
@see reduceClipRegion, addTransform
*/
void setOrigin (int newOriginX, int newOriginY);
/** Adds a transformation which will be performed on all the graphics operations that
the context subsequently performs.
After calling this, all the coordinates that are passed into the context will be
transformed by this matrix.
@see setOrigin
*/
void addTransform (const AffineTransform& transform);
/** Resets the current colour, brush, and font to default settings. */
void resetToDefaultState();
/** Returns true if this context is drawing to a vector-based device, such as a printer. */
bool isVectorDevice() const;
//==============================================================================
/** Create a graphics that draws with a given low-level renderer.
This method is intended for use only by people who know what they're doing.
Note that the LowLevelGraphicsContext will NOT be deleted by this object.
*/
Graphics (LowLevelGraphicsContext&) noexcept;
/** @internal */
LowLevelGraphicsContext& getInternalContext() const noexcept { return context; }
private:
//==============================================================================
LowLevelGraphicsContext& context;
ScopedPointer<LowLevelGraphicsContext> contextToDelete;
bool saveStatePending;
void saveStateIfPending();
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Graphics)
};
#endif // JUCE_GRAPHICSCONTEXT_H_INCLUDED
| gpl-2.0 |
GargoyleSoftware/voip-client-ios | submodules/linphone/mediastreamer2/src/msqueue.c | 2949 | /*
mediastreamer2 library - modular sound and video processing and streaming
Copyright (C) 2006 Simon MORLAT ([email protected])
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#if defined(HAVE_CONFIG_H)
#include "mediastreamer-config.h"
#endif
#include "mediastreamer2/msqueue.h"
#include "mediastreamer2/msvideo.h"
#include <string.h>
#ifdef WIN32
#include <malloc.h> /* for alloca */
#endif
MSQueue * ms_queue_new(struct _MSFilter *f1, int pin1, struct _MSFilter *f2, int pin2 ){
MSQueue *q=(MSQueue*)ms_new(MSQueue,1);
qinit(&q->q);
q->prev.filter=f1;
q->prev.pin=pin1;
q->next.filter=f2;
q->next.pin=pin2;
return q;
}
void ms_queue_init(MSQueue *q){
q->prev.filter=0;
q->prev.pin=0;
q->next.filter=0;
q->next.pin=0;
qinit(&q->q);
}
void ms_queue_destroy(MSQueue *q){
flushq(&q->q,0);
ms_free(q);
}
void ms_queue_flush(MSQueue *q){
flushq(&q->q,0);
}
void ms_bufferizer_init(MSBufferizer *obj){
qinit(&obj->q);
obj->size=0;
}
MSBufferizer * ms_bufferizer_new(){
MSBufferizer *obj=(MSBufferizer *)ms_new(MSBufferizer,1);
ms_bufferizer_init(obj);
return obj;
}
void ms_bufferizer_put(MSBufferizer *obj, mblk_t *m){
obj->size+=msgdsize(m);
putq(&obj->q,m);
}
void ms_bufferizer_put_from_queue(MSBufferizer *obj, MSQueue *q){
mblk_t *m;
while((m=ms_queue_get(q))!=NULL){
ms_bufferizer_put(obj,m);
}
}
int ms_bufferizer_read(MSBufferizer *obj, uint8_t *data, int datalen){
if (obj->size>=datalen){
int sz=0;
int cplen;
mblk_t *m=peekq(&obj->q);
/*we can return something */
while(sz<datalen){
cplen=MIN(m->b_wptr-m->b_rptr,datalen-sz);
memcpy(data+sz,m->b_rptr,cplen);
sz+=cplen;
m->b_rptr+=cplen;
if (m->b_rptr==m->b_wptr){
/* check cont */
if (m->b_cont!=NULL) {
m=m->b_cont;
}
else{
mblk_t *remove=getq(&obj->q);
freemsg(remove);
m=peekq(&obj->q);
}
}
}
obj->size-=datalen;
return datalen;
}
return 0;
}
void ms_bufferizer_skip_bytes(MSBufferizer *obj, int bytes){
uint8_t *tmp=(uint8_t*)alloca(bytes);
ms_bufferizer_read(obj,tmp,bytes);
}
void ms_bufferizer_flush(MSBufferizer *obj){
obj->size=0;
flushq(&obj->q,0);
}
void ms_bufferizer_uninit(MSBufferizer *obj){
flushq(&obj->q,0);
}
void ms_bufferizer_destroy(MSBufferizer *obj){
ms_bufferizer_uninit(obj);
ms_free(obj);
}
| gpl-2.0 |
chancegrissom/qmk_firmware | keyboards/handwired/dactyl_manuform/5x6/keymaps/default/keymap.c | 3031 | /* A standard layout for the Dactyl Manuform 5x6 Keyboard */
#include QMK_KEYBOARD_H
extern keymap_config_t keymap_config;
#define _QWERTY 0
#define _LOWER 1
#define _RAISE 2
#define RAISE MO(_RAISE)
#define LOWER MO(_LOWER)
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
[_QWERTY] = LAYOUT_5x6(
KC_ESC , KC_1 , KC_2 , KC_3 , KC_4 , KC_5 , KC_6 , KC_7 , KC_8 , KC_9 , KC_0 ,KC_BSPC,
KC_TAB , KC_Q , KC_W , KC_E , KC_R , KC_T , KC_Y , KC_U , KC_I , KC_O , KC_P ,KC_MINS,
KC_LSFT, KC_A , KC_S , KC_D , KC_F , KC_G , KC_H , KC_J , KC_K , KC_L ,KC_SCLN,KC_QUOT,
KC_LCTL, KC_Z , KC_X , KC_C , KC_V , KC_B , KC_N , KC_M ,KC_COMM,KC_DOT ,KC_SLSH,KC_BSLASH,
KC_LBRC,KC_RBRC, KC_PLUS, KC_EQL,
RAISE,KC_SPC, KC_ENT, LOWER,
KC_TAB,KC_HOME, KC_END, KC_DEL,
KC_BSPC, KC_GRV, KC_LGUI, KC_LALT
),
[_LOWER] = LAYOUT_5x6(
KC_TILD,KC_EXLM, KC_AT ,KC_HASH,KC_DLR ,KC_PERC, KC_CIRC,KC_AMPR,KC_ASTR,KC_LPRN,KC_RPRN,KC_DEL,
_______,_______,_______,_______,_______,KC_LBRC, KC_RBRC, KC_P7 , KC_P8 , KC_P9 ,_______,KC_PLUS,
_______,KC_HOME,KC_PGUP,KC_PGDN,KC_END ,KC_LPRN, KC_RPRN, KC_P4 , KC_P5 , KC_P6 ,KC_MINS,KC_PIPE,
_______,_______,_______,_______,_______,_______, _______, KC_P1 , KC_P2 , KC_P3 ,KC_EQL ,KC_UNDS,
_______,KC_PSCR, _______, KC_P0,
_______,_______, _______,_______,
_______,_______, _______,_______,
_______,_______, _______,_______
),
[_RAISE] = LAYOUT_5x6(
KC_F12 , KC_F1 , KC_F2 , KC_F3 , KC_F4 , KC_F5 , KC_F6 , KC_F7 , KC_F8 , KC_F9 ,KC_F10 ,KC_F11 ,
_______,_______,_______,_______,_______,KC_LBRC, KC_RBRC,_______,KC_NLCK,KC_INS ,KC_SLCK,KC_MUTE,
_______,KC_LEFT,KC_UP ,KC_DOWN,KC_RGHT,KC_LPRN, KC_RPRN,KC_MPRV,KC_MPLY,KC_MNXT,_______,KC_VOLU,
_______,_______,_______,_______,_______,_______, _______,_______,_______,_______,_______,KC_VOLD,
_______,_______, KC_EQL ,_______,
_______,_______, _______,_______,
_______,_______, _______,_______,
_______,_______, _______,_______
),
};
| gpl-2.0 |
kenorb/mangalashribhuti | tmp/omega-tools/jalu-d672bf72/css/footer.css | 473 | /* ----------- styles for Footer Zone ---------------------- */
.footer-outer-wrapper { /* mauve bg */
padding-top: 20px;
padding-bottom: 20px;
}
#zone-footer {
font: 11px verdana, arial, sans-serif;
color: #fff;
}
#zone-footer a:link { color: #fff; text-decoration:none; }
#zone-footer a:visited { color: #fff; text-decoration:none; }
#zone-footer a:hover { color:#fbcb60; text-decoration: underline; }
#zone-footer a:active { color: #fff; text-decoration:none; } | gpl-2.0 |
dwihn0r/keepassx | src/dialogs/TrashCanDlg.cpp | 3752 | /***************************************************************************
* Copyright (C) 2007 by Tarek Saidi *
* [email protected] *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; version 2 of the License. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include <QTreeWidget>
#include <QPainter>
#include <QPaintEvent>
#include <QResizeEvent>
#include "main.h"
#include "TrashCanDlg.h"
TrashCanDialog::TrashCanDialog(QWidget* parent,IDatabase* database,const QList<IEntryHandle*>& TrashItems):QDialog(parent){
setupUi(this);
Entries=TrashItems;
for(int i=0;i<Entries.size();i++){
QTreeWidgetItem* item=new QTreeWidgetItem(treeWidget);
item->setData(0,Qt::UserRole,i);
item->setText(0,Entries[i]->group()->title());
item->setText(1,Entries[i]->title());
item->setText(2,Entries[i]->username());
item->setText(3,Entries[i]->expire().dateToString(Qt::LocalDate));
item->setIcon(0,database->icon(Entries[i]->group()->image()));
item->setIcon(1,database->icon(Entries[i]->image()));
}
connect(treeWidget,SIGNAL(itemDoubleClicked(QTreeWidgetItem*,int)),this,SLOT(OnItemDoubleClicked(QTreeWidgetItem*)));
connect(treeWidget,SIGNAL(customContextMenuRequested(const QPoint&)),this,SLOT(OnContextMenu(const QPoint&)));
ContextMenu=new QMenu(this);
ContextMenu->addAction(getIcon("restore"),"Restore");
ContextMenu->addAction(getIcon("deleteentry"),"Delete");
}
void TrashCanDialog::paintEvent(QPaintEvent* event){
QDialog::paintEvent(event);
QPainter painter(this);
painter.setClipRegion(event->region());
painter.drawPixmap(QPoint(0,0),BannerPixmap);
}
void TrashCanDialog::resizeEvent(QResizeEvent* event){
createBanner(&BannerPixmap,getPixmap("trashcan"),tr("Recycle Bin"),width());
QDialog::resizeEvent(event);
}
void TrashCanDialog::OnItemDoubleClicked(QTreeWidgetItem* item){
SelectedEntry=Entries[item->data(0,Qt::UserRole).toInt()];
accept();
}
void TrashCanDialog::OnContextMenu(const QPoint& pos){
if(treeWidget->itemAt(pos)){
QTreeWidgetItem* item=treeWidget->itemAt(pos);
if(treeWidget->selectedItems().size()==0){
treeWidget->setItemSelected(item,true);
}
else{
if(!treeWidget->isItemSelected(item)){
while(treeWidget->selectedItems().size()){
treeWidget->setItemSelected(treeWidget->selectedItems()[0],false);
}
treeWidget->setItemSelected(item,true);
}
}
}
else
{
while(treeWidget->selectedItems().size())
treeWidget->setItemSelected(treeWidget->selectedItems()[0],false);
}
ContextMenu->popup(treeWidget->viewport()->mapToGlobal(pos));
}
///TODO 0.2.3 locale aware string/date compare for correct sorting
| gpl-2.0 |
zhuowei/dolphin | Source/Core/Core/HW/Sram.cpp | 1852 | // Copyright 2013 Dolphin Emulator Project
// Licensed under GPLv2
// Refer to the license.txt file included.
#include "Core/ConfigManager.h"
#include "Core/HW/Sram.h"
// english
SRAM sram_dump = {{
0xFF, 0x6B,
0x00, 0x91,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xFF, 0x40,
0x00,
0x00,
0x00,
0x2C,
0x44, 0x4F, 0x4C, 0x50, 0x48, 0x49, 0x4E, 0x53, 0x4C, 0x4F, 0x54, 0x41,
0x44, 0x4F, 0x4C, 0x50, 0x48, 0x49, 0x4E, 0x53, 0x4C, 0x4F, 0x54, 0x42,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00,
0x00, 0x00,
0x00, 0x00,
0x00,
0x00,
0x6E, 0x6D,
0x00, 0x00,
0x00, 0x00
}};
#if 0
// german
SRAM sram_dump_german = {{
0x1F, 0x66,
0xE0, 0x96,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x04, 0xEA, 0x19, 0x40,
0x00,
0x00,
0x01,
0x3C,
0x12, 0xD5, 0xEA, 0xD3, 0x00, 0xFA, 0x2D, 0x33, 0x13, 0x41, 0x26, 0x03,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00,
0x00, 0x00,
0x00, 0x00,
0x00,
0x00,
0x84, 0xFF,
0x00, 0x00,
0x00, 0x00
}};
#endif
void InitSRAM()
{
File::IOFile file(SConfig::GetInstance().m_LocalCoreStartupParameter.m_strSRAM, "rb");
if (file)
{
if (!file.ReadArray(&g_SRAM, 1))
{
ERROR_LOG(EXPANSIONINTERFACE, "EXI IPL-DEV: Could not read all of SRAM");
g_SRAM = sram_dump;
}
}
else
{
g_SRAM = sram_dump;
}
}
void SetCardFlashID(u8* buffer, u8 card_index)
{
u64 rand = Common::swap64( *(u64*)&(buffer[12]));
u8 csum=0;
for (int i = 0; i < 12; i++)
{
rand = (((rand * (u64)0x0000000041c64e6dULL) + (u64)0x0000000000003039ULL) >> 16);
csum += g_SRAM.flash_id[card_index][i] = buffer[i] - ((u8)rand&0xff);
rand = (((rand * (u64)0x0000000041c64e6dULL) + (u64)0x0000000000003039ULL) >> 16);
rand &= (u64)0x0000000000007fffULL;
}
g_SRAM.flashID_chksum[card_index] = csum^0xFF;
}
| gpl-2.0 |
clone2727/cabal | engines/wintermute/debugger.cpp | 2600 | /* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "engines/wintermute/debugger.h"
#include "engines/wintermute/wintermute.h"
#include "engines/wintermute/base/base_engine.h"
#include "engines/wintermute/base/base_file_manager.h"
#include "engines/wintermute/base/base_game.h"
namespace Wintermute {
Console::Console(WintermuteEngine *vm) : GUI::Debugger(), _engineRef(vm) {
registerCmd("show_fps", WRAP_METHOD(Console, Cmd_ShowFps));
registerCmd("dump_file", WRAP_METHOD(Console, Cmd_DumpFile));
}
Console::~Console(void) {
}
bool Console::Cmd_ShowFps(int argc, const char **argv) {
if (argc > 1) {
if (Common::String(argv[1]) == "true") {
_engineRef->_game->setShowFPS(true);
} else if (Common::String(argv[1]) == "false") {
_engineRef->_game->setShowFPS(false);
}
}
return true;
}
bool Console::Cmd_DumpFile(int argc, const char **argv) {
if (argc != 3) {
debugPrintf("Usage: %s <file path> <output file name>\n", argv[0]);
return true;
}
Common::String filePath = argv[1];
Common::String outFileName = argv[2];
BaseFileManager *fileManager = BaseEngine::instance().getFileManager();
Common::SeekableReadStream *inFile = fileManager->openFile(filePath);
if (!inFile) {
debugPrintf("File '%s' not found\n", argv[1]);
return true;
}
Common::DumpFile *outFile = new Common::DumpFile();
outFile->open(outFileName);
byte *data = new byte[inFile->size()];
inFile->read(data, inFile->size());
outFile->write(data, inFile->size());
outFile->finalize();
outFile->close();
delete[] data;
delete outFile;
delete inFile;
debugPrintf("Resource file '%s' dumped to file '%s'\n", argv[1], argv[2]);
return true;
}
} // End of namespace Wintermute
| gpl-2.0 |
lexical/backports | drivers/net/wireless/iwlwifi/iwl-io.c | 6861 | /******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <[email protected]>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/export.h>
#include "iwl-drv.h"
#include "iwl-io.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-prph.h"
#include "iwl-fh.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
if ((iwl_read32(trans, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
return -ETIMEDOUT;
}
IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value = 0x5a5a5a5a;
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
value = iwl_read32(trans, reg);
iwl_trans_release_nic_access(trans, &flags);
}
return value;
}
IWL_EXPORT_SYMBOL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, reg, value);
iwl_trans_release_nic_access(trans, &flags);
}
}
IWL_EXPORT_SYMBOL(iwl_write_direct32);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
{
int t = 0;
do {
if ((iwl_read_direct32(trans, addr) & mask) == mask)
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
return -ETIMEDOUT;
}
IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read_prph(trans, ofs);
trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
return val;
}
void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
iwl_trans_write_prph(trans, ofs, val);
}
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
u32 val = 0x5a5a5a5a;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
val = __iwl_read_prph(trans, ofs);
iwl_trans_release_nic_access(trans, &flags);
}
return val;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
}
}
IWL_EXPORT_SYMBOL(iwl_write_prph);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
return -ETIMEDOUT;
}
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
__iwl_read_prph(trans, ofs) | mask);
iwl_trans_release_nic_access(trans, &flags);
}
}
IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
(__iwl_read_prph(trans, ofs) & mask) | bits);
iwl_trans_release_nic_access(trans, &flags);
}
}
IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
u32 val;
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
val = __iwl_read_prph(trans, ofs);
__iwl_write_prph(trans, ofs, (val & ~mask));
iwl_trans_release_nic_access(trans, &flags);
}
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_HW);
} else {
iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
DEVICE_SET_NMI_8000_VAL);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV);
}
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
static const char *get_fh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
switch (cmd) {
IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
IWL_CMD(FH_RSCSR_CHNL0_WPTR);
IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
IWL_CMD(FH_TSSR_TX_STATUS_REG);
IWL_CMD(FH_TSSR_TX_ERROR_REG);
default:
return "UNKNOWN";
}
#undef IWL_CMD
}
int iwl_dump_fh(struct iwl_trans *trans, char **buf)
{
int i;
static const u32 fh_tbl[] = {
FH_RSCSR_CHNL0_STTS_WPTR_REG,
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
FH_RSCSR_CHNL0_WPTR,
FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_MEM_RSSR_SHARED_CTRL_REG,
FH_MEM_RSSR_RX_STATUS_REG,
FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_ERROR_REG
};
#ifdef CPTCFG_IWLWIFI_DEBUGFS
if (buf) {
int pos = 0;
size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
pos += scnprintf(*buf + pos, bufsz - pos,
"FH register values:\n");
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i]));
return pos;
}
#endif
IWL_ERR(trans, "FH register values:\n");
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i]));
return 0;
}
| gpl-2.0 |
embecosm/avr32-gcc | libstdc++-v3/testsuite/23_containers/vector/types/1.cc | 1294 | // 2005-12-01 Paolo Carlini <[email protected]>
// Copyright (C) 2005, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do compile }
#include <vector>
namespace N
{
struct X { };
template<typename T>
X operator+(T, std::size_t)
{ return X(); }
template<typename T>
X operator-(T, T)
{ return X(); }
}
int main()
{
std::vector<N::X> v(5);
const std::vector<N::X> w(1);
v[0];
w[0];
v.size();
v.capacity();
v.resize(1);
v.insert(v.begin(), N::X());
v.insert(v.begin(), 1, N::X());
v.insert(v.begin(), w.begin(), w.end());
v = w;
return 0;
}
| gpl-2.0 |
Tate-ad/revive-adserver | lib/max/language/bg/index.lang.php | 723 | <?php
/*
+---------------------------------------------------------------------------+
| Revive Adserver |
| http://www.revive-adserver.com |
| |
| Copyright: See the COPYRIGHT.txt file. |
| License: GPLv2 or later, see the LICENSE.txt file. |
+---------------------------------------------------------------------------+
*/
// Meta information
$translation_readable = "Bulgarian";
$translation_maintainer = "Revive Adserver Team";
$translation_contact = "[email protected]";
| gpl-2.0 |
Distrotech/clamav | libclamav/7z/7zDec.c | 12759 | /* 7zDec.c -- Decoding from 7z folder
2010-11-02 : Igor Pavlov : Public domain */
#include <string.h>
#if defined(_WIN32)
#include <WinSock2.h>
#include <Windows.h>
#endif
#define _7ZIP_PPMD_SUPPPORT
#include "7z.h"
#include "Bcj2.h"
#include "Bra.h"
#include "CpuArch.h"
#include "LzmaDec.h"
#include "Lzma2Dec.h"
#ifdef _7ZIP_PPMD_SUPPPORT
#include "Ppmd7.h"
#endif
#define k_Copy 0
#define k_LZMA2 0x21
#define k_LZMA 0x30101
#define k_BCJ 0x03030103
#define k_PPC 0x03030205
#define k_ARM 0x03030501
#define k_ARMT 0x03030701
#define k_SPARC 0x03030805
#define k_BCJ2 0x0303011B
#ifdef _7ZIP_PPMD_SUPPPORT
#define k_PPMD 0x30401
typedef struct
{
IByteIn p;
const Byte *cur;
const Byte *end;
const Byte *begin;
UInt64 processed;
Bool extra;
SRes res;
ILookInStream *inStream;
} CByteInToLook;
static Byte ReadByte(void *pp)
{
CByteInToLook *p = (CByteInToLook *)pp;
if (p->cur != p->end)
return *p->cur++;
if (p->res == SZ_OK)
{
size_t size = p->cur - p->begin;
p->processed += size;
p->res = p->inStream->Skip(p->inStream, size);
size = (1 << 25);
p->res = p->inStream->Look(p->inStream, (const void **)&p->begin, &size);
p->cur = p->begin;
p->end = p->begin + size;
if (size != 0)
return *p->cur++;;
}
p->extra = True;
return 0;
}
static SRes SzDecodePpmd(CSzCoderInfo *coder, UInt64 inSize, ILookInStream *inStream,
Byte *outBuffer, SizeT outSize, ISzAlloc *allocMain)
{
CPpmd7 ppmd;
CByteInToLook s;
SRes res = SZ_OK;
s.p.Read = ReadByte;
s.inStream = inStream;
s.begin = s.end = s.cur = NULL;
s.extra = False;
s.res = SZ_OK;
s.processed = 0;
if (coder->Props.size != 5)
return SZ_ERROR_UNSUPPORTED;
{
unsigned order = coder->Props.data[0];
UInt32 memSize = GetUi32(coder->Props.data + 1);
if (order < PPMD7_MIN_ORDER ||
order > PPMD7_MAX_ORDER ||
memSize < PPMD7_MIN_MEM_SIZE ||
memSize > PPMD7_MAX_MEM_SIZE)
return SZ_ERROR_UNSUPPORTED;
Ppmd7_Construct(&ppmd);
if (!Ppmd7_Alloc(&ppmd, memSize, allocMain))
return SZ_ERROR_MEM;
Ppmd7_Init(&ppmd, order);
}
{
CPpmd7z_RangeDec rc;
Ppmd7z_RangeDec_CreateVTable(&rc);
rc.Stream = &s.p;
if (!Ppmd7z_RangeDec_Init(&rc))
res = SZ_ERROR_DATA;
else if (s.extra)
res = (s.res != SZ_OK ? s.res : SZ_ERROR_DATA);
else
{
SizeT i;
for (i = 0; i < outSize; i++)
{
int sym = Ppmd7_DecodeSymbol(&ppmd, &rc.p);
if (s.extra || sym < 0)
break;
outBuffer[i] = (Byte)sym;
}
if (i != outSize)
res = (s.res != SZ_OK ? s.res : SZ_ERROR_DATA);
else if (s.processed + (s.cur - s.begin) != inSize || !Ppmd7z_RangeDec_IsFinishedOK(&rc))
res = SZ_ERROR_DATA;
}
}
Ppmd7_Free(&ppmd, allocMain);
return res;
}
#endif
static SRes SzDecodeLzma(CSzCoderInfo *coder, UInt64 inSize, ILookInStream *inStream,
Byte *outBuffer, SizeT outSize, ISzAlloc *allocMain)
{
CLzmaDec state;
SRes res = SZ_OK;
LzmaDec_Construct(&state);
RINOK(LzmaDec_AllocateProbs(&state, coder->Props.data, (unsigned)coder->Props.size, allocMain));
state.dic = outBuffer;
state.dicBufSize = outSize;
LzmaDec_Init(&state);
for (;;)
{
Byte *inBuf = NULL;
size_t lookahead = (1 << 18);
if (lookahead > inSize)
lookahead = (size_t)inSize;
res = inStream->Look((void *)inStream, (const void **)&inBuf, &lookahead);
if (res != SZ_OK)
break;
{
SizeT inProcessed = (SizeT)lookahead, dicPos = state.dicPos;
ELzmaStatus status;
res = LzmaDec_DecodeToDic(&state, outSize, inBuf, &inProcessed, LZMA_FINISH_END, &status);
lookahead -= inProcessed;
inSize -= inProcessed;
if (res != SZ_OK)
break;
if (state.dicPos == state.dicBufSize || (inProcessed == 0 && dicPos == state.dicPos))
{
if (state.dicBufSize != outSize || lookahead != 0 ||
(status != LZMA_STATUS_FINISHED_WITH_MARK &&
status != LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK))
res = SZ_ERROR_DATA;
break;
}
res = inStream->Skip((void *)inStream, inProcessed);
if (res != SZ_OK)
break;
}
}
LzmaDec_FreeProbs(&state, allocMain);
return res;
}
static SRes SzDecodeLzma2(CSzCoderInfo *coder, UInt64 inSize, ILookInStream *inStream,
Byte *outBuffer, SizeT outSize, ISzAlloc *allocMain)
{
CLzma2Dec state;
SRes res = SZ_OK;
Lzma2Dec_Construct(&state);
if (coder->Props.size != 1)
return SZ_ERROR_DATA;
RINOK(Lzma2Dec_AllocateProbs(&state, coder->Props.data[0], allocMain));
state.decoder.dic = outBuffer;
state.decoder.dicBufSize = outSize;
Lzma2Dec_Init(&state);
for (;;)
{
Byte *inBuf = NULL;
size_t lookahead = (1 << 18);
if (lookahead > inSize)
lookahead = (size_t)inSize;
res = inStream->Look((void *)inStream, (const void **)&inBuf, &lookahead);
if (res != SZ_OK)
break;
{
SizeT inProcessed = (SizeT)lookahead, dicPos = state.decoder.dicPos;
ELzmaStatus status;
res = Lzma2Dec_DecodeToDic(&state, outSize, inBuf, &inProcessed, LZMA_FINISH_END, &status);
lookahead -= inProcessed;
inSize -= inProcessed;
if (res != SZ_OK)
break;
if (state.decoder.dicPos == state.decoder.dicBufSize || (inProcessed == 0 && dicPos == state.decoder.dicPos))
{
if (state.decoder.dicBufSize != outSize || lookahead != 0 ||
(status != LZMA_STATUS_FINISHED_WITH_MARK))
res = SZ_ERROR_DATA;
break;
}
res = inStream->Skip((void *)inStream, inProcessed);
if (res != SZ_OK)
break;
}
}
Lzma2Dec_FreeProbs(&state, allocMain);
return res;
}
static SRes SzDecodeCopy(UInt64 inSize, ILookInStream *inStream, Byte *outBuffer)
{
while (inSize > 0)
{
void *inBuf;
size_t curSize = (1 << 18);
if (curSize > inSize)
curSize = (size_t)inSize;
RINOK(inStream->Look((void *)inStream, (const void **)&inBuf, &curSize));
if (curSize == 0)
return SZ_ERROR_INPUT_EOF;
memcpy(outBuffer, inBuf, curSize);
outBuffer += curSize;
inSize -= curSize;
RINOK(inStream->Skip((void *)inStream, curSize));
}
return SZ_OK;
}
static Bool IS_MAIN_METHOD(UInt32 m)
{
switch(m)
{
case k_Copy:
case k_LZMA:
case k_LZMA2:
#ifdef _7ZIP_PPMD_SUPPPORT
case k_PPMD:
#endif
return True;
}
return False;
}
static Bool IS_SUPPORTED_CODER(const CSzCoderInfo *c)
{
return
c->NumInStreams == 1 &&
c->NumOutStreams == 1 &&
c->MethodID <= (UInt32)0xFFFFFFFF &&
IS_MAIN_METHOD((UInt32)c->MethodID);
}
#define IS_BCJ2(c) ((c)->MethodID == k_BCJ2 && (c)->NumInStreams == 4 && (c)->NumOutStreams == 1)
static SRes CheckSupportedFolder(const CSzFolder *f)
{
if (f->NumCoders < 1 || f->NumCoders > 4)
return SZ_ERROR_UNSUPPORTED;
if (f->Coders[0].MethodID == 0x06F10701) /* ACAB */
return SZ_ERROR_ENCRYPTED;
if (!IS_SUPPORTED_CODER(&f->Coders[0]))
return SZ_ERROR_UNSUPPORTED;
if (f->NumCoders == 1)
{
if (f->NumPackStreams != 1 || f->PackStreams[0] != 0 || f->NumBindPairs != 0)
return SZ_ERROR_UNSUPPORTED;
return SZ_OK;
}
if (f->NumCoders == 2)
{
CSzCoderInfo *c = &f->Coders[1];
if (c->MethodID > (UInt32)0xFFFFFFFF ||
c->NumInStreams != 1 ||
c->NumOutStreams != 1 ||
f->NumPackStreams != 1 ||
f->PackStreams[0] != 0 ||
f->NumBindPairs != 1 ||
f->BindPairs[0].InIndex != 1 ||
f->BindPairs[0].OutIndex != 0)
return SZ_ERROR_UNSUPPORTED;
switch ((UInt32)c->MethodID)
{
case k_BCJ:
case k_ARM:
break;
default:
return SZ_ERROR_UNSUPPORTED;
}
return SZ_OK;
}
if (f->NumCoders == 4)
{
if (!IS_SUPPORTED_CODER(&f->Coders[1]) ||
!IS_SUPPORTED_CODER(&f->Coders[2]) ||
!IS_BCJ2(&f->Coders[3]))
return SZ_ERROR_UNSUPPORTED;
if (f->NumPackStreams != 4 ||
f->PackStreams[0] != 2 ||
f->PackStreams[1] != 6 ||
f->PackStreams[2] != 1 ||
f->PackStreams[3] != 0 ||
f->NumBindPairs != 3 ||
f->BindPairs[0].InIndex != 5 || f->BindPairs[0].OutIndex != 0 ||
f->BindPairs[1].InIndex != 4 || f->BindPairs[1].OutIndex != 1 ||
f->BindPairs[2].InIndex != 3 || f->BindPairs[2].OutIndex != 2)
return SZ_ERROR_UNSUPPORTED;
return SZ_OK;
}
return SZ_ERROR_UNSUPPORTED;
}
static UInt64 GetSum(const UInt64 *values, UInt32 index)
{
UInt64 sum = 0;
UInt32 i;
for (i = 0; i < index; i++)
sum += values[i];
return sum;
}
#define CASE_BRA_CONV(isa) case k_ ## isa: isa ## _Convert(outBuffer, outSize, 0, 0); break;
static SRes SzFolder_Decode2(const CSzFolder *folder, const UInt64 *packSizes,
ILookInStream *inStream, UInt64 startPos,
Byte *outBuffer, SizeT outSize, ISzAlloc *allocMain,
Byte *tempBuf[])
{
UInt32 ci;
SizeT tempSizes[3] = { 0, 0, 0};
SizeT tempSize3 = 0;
Byte *tempBuf3 = 0;
RINOK(CheckSupportedFolder(folder));
for (ci = 0; ci < folder->NumCoders; ci++)
{
CSzCoderInfo *coder = &folder->Coders[ci];
if (IS_MAIN_METHOD((UInt32)coder->MethodID))
{
UInt32 si = 0;
UInt64 offset;
UInt64 inSize;
Byte *outBufCur = outBuffer;
SizeT outSizeCur = outSize;
if (folder->NumCoders == 4)
{
UInt32 indices[] = { 3, 2, 0 };
UInt64 unpackSize = folder->UnpackSizes[ci];
si = indices[ci];
if (ci < 2)
{
Byte *temp;
outSizeCur = (SizeT)unpackSize;
if (outSizeCur != unpackSize)
return SZ_ERROR_MEM;
temp = (Byte *)IAlloc_Alloc(allocMain, outSizeCur);
if (temp == 0 && outSizeCur != 0)
return SZ_ERROR_MEM;
outBufCur = tempBuf[1 - ci] = temp;
tempSizes[1 - ci] = outSizeCur;
}
else if (ci == 2)
{
if (unpackSize > outSize) /* check it */
return SZ_ERROR_PARAM;
tempBuf3 = outBufCur = outBuffer + (outSize - (size_t)unpackSize);
tempSize3 = outSizeCur = (SizeT)unpackSize;
}
else
return SZ_ERROR_UNSUPPORTED;
}
offset = GetSum(packSizes, si);
inSize = packSizes[si];
RINOK(LookInStream_SeekTo(inStream, startPos + offset));
if (coder->MethodID == k_Copy)
{
if (inSize != outSizeCur) /* check it */
return SZ_ERROR_DATA;
RINOK(SzDecodeCopy(inSize, inStream, outBufCur));
}
else if (coder->MethodID == k_LZMA)
{
RINOK(SzDecodeLzma(coder, inSize, inStream, outBufCur, outSizeCur, allocMain));
}
else if (coder->MethodID == k_LZMA2)
{
RINOK(SzDecodeLzma2(coder, inSize, inStream, outBufCur, outSizeCur, allocMain));
}
else
{
#ifdef _7ZIP_PPMD_SUPPPORT
RINOK(SzDecodePpmd(coder, inSize, inStream, outBufCur, outSizeCur, allocMain));
#else
return SZ_ERROR_UNSUPPORTED;
#endif
}
}
else if (coder->MethodID == k_BCJ2)
{
UInt64 offset = GetSum(packSizes, 1);
UInt64 s3Size = packSizes[1];
SRes res;
if (ci != 3)
return SZ_ERROR_UNSUPPORTED;
RINOK(LookInStream_SeekTo(inStream, startPos + offset));
tempSizes[2] = (SizeT)s3Size;
if (tempSizes[2] != s3Size)
return SZ_ERROR_MEM;
tempBuf[2] = (Byte *)IAlloc_Alloc(allocMain, tempSizes[2]);
if (tempBuf[2] == 0 && tempSizes[2] != 0)
return SZ_ERROR_MEM;
res = SzDecodeCopy(s3Size, inStream, tempBuf[2]);
RINOK(res)
res = Bcj2_Decode(
tempBuf3, tempSize3,
tempBuf[0], tempSizes[0],
tempBuf[1], tempSizes[1],
tempBuf[2], tempSizes[2],
outBuffer, outSize);
RINOK(res)
}
else
{
if (ci != 1)
return SZ_ERROR_UNSUPPORTED;
switch(coder->MethodID)
{
case k_BCJ:
{
UInt32 state;
x86_Convert_Init(state);
x86_Convert(outBuffer, outSize, 0, &state, 0);
break;
}
CASE_BRA_CONV(ARM)
default:
return SZ_ERROR_UNSUPPORTED;
}
}
}
return SZ_OK;
}
SRes SzFolder_Decode(const CSzFolder *folder, const UInt64 *packSizes,
ILookInStream *inStream, UInt64 startPos,
Byte *outBuffer, size_t outSize, ISzAlloc *allocMain)
{
Byte *tempBuf[3] = { 0, 0, 0};
int i;
SRes res = SzFolder_Decode2(folder, packSizes, inStream, startPos,
outBuffer, (SizeT)outSize, allocMain, tempBuf);
for (i = 0; i < 3; i++)
IAlloc_Free(allocMain, tempBuf[i]);
return res;
}
| gpl-2.0 |
ForAEdesWeb/AEW11 | components/com_djcatalog2/themes/bootstrapped/css/theme.css | 16626 |
/* general */
.djc_clearfix:after {content: "."; display: block; height: 0; clear: both; visibility: hidden;}
.djc_clearfix {min-width: 0; display: inline-block; /* \*/ display: block;} /* trigger hasLayout for IE7. Hide from IE Mac */
* html .djc_clearfix {height: 1%;} /* Hide from IE Mac */
.clear,
.djc_clear {
clear: both;
}
#lbPrevLink,
#lbNextLink {
background-color: transparent !important;
}
#djcatalog {
position: relative;
}
#djcatalog a img {
/*border: none;*/
outline: none;
}
#djcatalog .djc_images a:link,
#djcatalog .djc_images a:visited,
#djcatalog .djc_images a:hover,
#djcatalog .djc_images a:focus,
#djcatalog .djc_image a:link,
#djcatalog .djc_image a:visited,
#djcatalog .djc_image a:hover,
#djcatalog .djc_image a:focus {
outline: none;
background: none;
zoom: 1;
}
/* single item/category/producer images */
#djcatalog .djc_images {
float: right;
margin-left: 20px;
margin-bottom: 20px;
}
#djcatalog .djc_mainimage {
text-align: center;
overflow: hidden;
}
#djcatalog .djc_thumbnails {
clear: both;
}
#djcatalog .djc_thumbnail {
float: left;
text-align: center;
}
#djcatalog .djc_mainimage img {
display: block;
}
#djcatalog .djc_mainimage a,
#djcatalog .djc_thumbnail a{
display: block !important;
}
#djcatalog .djc_mainimage img,
#djcatalog .djc_thumbnail img {
/* background: url(../images/bg2.png) repeat;
border-radius: 4px 4px 4px 4px;
-moz-border-radius: 4px;
-webkit-border-radius: 4px;
vertical-align: middle;
display: inline;*/
}
#djcatalog.djc_item .djc_featured_image {
float: left;
margin: 0 0 3px;
}
#djcatalog.djc_item .djc_producer_items_link {
margin: 0 5px;
}
#djcatalog.djc_item .djc_fulltext {
padding-top: 10px;
}
/* subcategories */
#djcatalog .djc_subcategories_grid .djc_subcategory {
float: left;
}
#djcatalog .djc_subcategories_grid .djc_subcategory_bg {
margin: 2px;
}
#djcatalog .djc_subcategories_grid .djc_subcategory_col_first .djc_subcategory_bg {
margin-left: 0;
}
#djcatalog .djc_subcategories_grid .djc_subcategory_col_last .djc_subcategory_bg {
margin-right: 0;
}
#djcatalog .djc_subcategories_grid .djc_subcategory_in {
padding: 10px 20px;
}
#djcatalog .djc_subcategories_grid {
margin-left: -10px;
}
#djcatalog .djc_subcategories_grid .djc_subcategory_bg {
margin: 0 0 10px 10px;
}
#djcatalog .djc_subcategories_grid .djc_image {
text-align: center;
margin: 0 0 10px;
overflow: hidden;
}
#djcatalog .djc_subcategories_grid .djc_title {
text-align: center;
}
/*#djcatalog .djc_subcategories_grid .djc_image img {
background: url(../images/bg1.png) repeat;
border-radius: 4px 4px 4px 4px;
-moz-border-radius: 4px;
-webkit-border-radius: 4px;
}*/
/* items - general */
/*#djcatalog .djc_items .djc_image img,
#djcatalog .djc_related_items .djc_image img {
background: url(../images/bg1.png) repeat;
border-radius: 4px 4px 4px 4px;
-moz-border-radius: 4px;
-webkit-border-radius: 4px;
}*/
#djcatalog .thumbnail {
padding: 19px;
margin-bottom: 10px;
}
#djcatalog .djc_items .djc_image,
#djcatalog .djc_related_items .djc_image {
text-align: center;
margin: 0 0 10px 0;
overflow: hidden;
}
#djcatalog .djc_items .djc2_cols_1 .djc_image,
#djcatalog .djc_items .djc2_cols_2 .djc_image {
float: left;
margin: 0 10px 10px 0px;
}
#djcatalog .djc_items .djc2_cols_1 .djc_introtext,
#djcatalog .djc_items .djc2_cols_2 .djc_introtext {
clear: both;
}
#djcatalog .djc_producers .djc_producer_item .djc_image {
float: none;
text-align: center;
}
#djcatalog .djc_producers .djc_producer_item .djc_title h3 {
text-align: center;
}
/* items filters/toolbar */
/*#djcatalog .djc_filters .djc_filters_in,
#djcatalog .djc_order .djc_order_in {
padding: 10px;
background: url(../images/bg2.png) repeat;
border-radius: 8px 8px 8px 8px;
-moz-border-radius: 8px;
-webkit-border-radius: 8px;
}*/
#djcatalog .djc_filters_in {
margin-bottom: 10px;
margin-top: 10px;
}
#djcatalog .djc_filters form {
margin: 0;
margin: 0;
}
#djcatalog .djc_filters select {
margin: 0;
}
#djcatalog .djc_filters input.inputbox {
margin: 0;
}
#djcatalog .djc_filter_list,
#djcatalog .djc_filter_search,
#djcatalog .djc_order_buttons {
list-style: none;
margin: 0;
padding: 0;
}
#djcatalog .djc_filter_list {
margin: 0 0 10px 0;
}
#djcatalog .djc_atoz .djc_atoz_in {
padding: 10px 0;
}
#djcatalog .djc_atoz .djc_atoz_in {
margin-bottom: 10px;
}
#djcatalog .djc_atoz_list {
text-align: center;
margin: 0;
padding: 0;
}
#djcatalog .djc_atoz_list li {
display: block;
float: left;
margin: 0 0.2%;
padding: 0;
list-style: none;
background: none;
width: 3.44%;
text-align: center;
overflow: hidden;
}
#djcatalog .djc_atoz_list li a span,
#djcatalog .djc_atoz_list li span span {
display: block;
padding: 2px 0;
text-transform: uppercase;
}
/*#djcatalog .djc_atoz_list li a span {
background: url(../images/bg2.png) repeat;
}
#djcatalog .djc_atoz_list li span span {
background: url(../images/bg1.png) repeat;
}
#djcatalog .djc_atoz_list li a:hover span {
background: url(../images/bg3.png) repeat;
}*/
#djcatalog .djc_filter_list li,
#djcatalog .djc_filter_search li,
#djcatalog .djc_order_buttons li {
display: block;
float: left;
list-style: none;
padding: 0;
margin: 0 10px 0 0;
background: none;
}
#djcatalog .djc_filter_list li span,
#djcatalog .djc_filter_search li span,
#djcatalog .djc_order_buttons li span {
font-weight: bold;
}
#djcatalog .djc_order_in {
margin-bottom: 10px;
}
/*#djcatalog .djc_readon {
margin: 0.5em 0;
}*/
#djcatalog .djc_layout_switch {
margin: 10px 0;
}
#djcatalog .djc_layout_buttons {
list-style: none;
margin: 0;
padding: 0;
float: right;
}
#djcatalog .djc_layout_buttons li {
list-style: none;
background: none;
padding: 0;
margin: 0 0 0 4px;
float: right;
}
#djcatalog .djc_layout_buttons li a {
display: block;
}
#djcatalog .djc_layout_buttons li a.active,
#djcatalog .djc_layout_buttons li a:hover {
}
/* items / related items - grid */
#djcatalog .djc_items .djc_item,
#djcatalog .djc_related_items .djc_item {
float: left;
}
#djcatalog .djc_items .djc_item_bg,
#djcatalog .djc_related_items .djc_item_bg {
/* background: url(../images/bg1.png) repeat;
margin: 2px;*/
position: relative;
}
#djcatalog .well {
background-color: #fff;
}
#djcatalog .featured_item .djc_item_bg {
background-color: #f5f5f5;
}
#djcatalog .djc_item_row {
margin-left: -10px;
}
#djcatalog .djc_item_bg {
margin: 0 0 10px 10px;
}
#djcatalog .djc_items .djc_item .djc_featured_image,
#djcatalog .featured_item .djc_featured_image {
position: absolute;
top: 4px;
right: 4px;
left: auto;
z-index: 2;
}
#djcatalog .djc_item .djc_title h3 {
margin-top: 0;
margin-bottom: 0;
}
#djcatalog .djc_item .djc_introtext {
padding-top: 10px;
}
#djcatalog .djc_item .djc_price span {
font-weight: bold;
}
/* discount price */
.djc_price span.djc_price_old {
text-decoration: line-through;
}
#djcatalog .djc_item_col_first .djc_item_bg {
margin-left: 0;
}
#djcatalog .djc_item_col_last .djc_item_bg {
margin-right: 0;
}
#djcatalog .djc_items .djc_item_in,
#djcatalog .djc_related_items .djc_item_in {
padding: 10px 20px;
}
/* items - table */
/*#djcatalog .djc_items_table td,
#djcatalog .djc_items_table th {
padding: 4px 8px;
}
*/
#djcatalog tr.featured_item {
background: url(../images/bg1.png) repeat;
}
#djcatalog .djc_items_table .djc_featured_image {
position: static;
margin: 0 0 0 4px;
left: auto;
right: auto;
top: auto;
}
.djc_items_table .djc_price {
font-weight: bold;
}
/* mod_djc2items module */
/*div.mod_djc_item .djc_category_info,
div.mod_djc_item .djc_producer_info,
div.mod_djc_item .djc_price {
font-size: 90%;
opacity: 0.7;
margin: 0.5em 0;
}*/
div.mod_djc_item {
margin-top: 20px;
}
div.mod_djc_item:first-child {
margin-top: 0;
}
div.mod_djc_item .djc_price span {
font-weight: bold;
}
div.mod_djc_item .djc_introtext {
padding-top: 10px;
}
/* mod_djc2filters module */
div.mod_djc2filters dl {
margin: 1em 0 ;
padding: 0;
}
div.mod_djc2filters dl dt {
font-weight: bold;
margin: 0.5em 0;
padding: 0;
}
div.mod_djc2filters dl dd {
margin: 0;
padding: 0;
}
/* mod producers */
select#mod_djc2producers_pid {
max-width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
-webkit-box-sizing: border-box;
}
/* mod search */
.djc_mod_search input#mod_djcatsearch {
max-width: 110px;
}
.djc_mod_search button {
margin-bottom: 9px;
}
/* custom attributes */
div.djc_attributes {
margin: 1em 0;
clear: both;
}
.djc_attributes table {
border-collapse: collapse;
border-spacing: 0;
width: 100%;
border: none !important;
-webkit-box-shadow: 0px 0px 0px 1px rgba(0, 0, 0, 0.1);
box-shadow: 0px 0px 0px 1px rgba(0, 0, 0, 0.1);
}
.djc_attributes table tr.djc_attribute {
border: none;
}
.djc_attributes table tr.djc_attribute td {
padding: 10px;
border: none;
vertical-align: middle;
}
.djc_attributes table tr.djc_attribute td.djc_label {
font-weight: bold;
background: url(../images/bg1.png) repeat;
width: 25%;
-webkit-box-shadow: inset -10px 0px 10px -10px rgba(0, 0, 0, 0.2);
box-shadow: inset -10px 0px 10px -10px rgba(0, 0, 0, 0.2);
}
.djc_attributes table tr.djc_attribute:nth-child(2n+1) td.djc_label {
background: url(../images/bg2.png) repeat;
}
.djc_attributes table tr.djc_attribute:nth-child(2n+1) td.djc_value {
background: url(../images/bg1.png) repeat;
}
#djcatalog .djc_items_table .djc_attributes {
padding: 0;
vertical-align: top;
}
.djc_items_table .djc_attributes table {
box-shadow: none;
-webkit-box-shadow: none;
}
.djc_items_table .djc_attributes table td {
display: block !important;
}
.djc_items_table .djc_attributes table tr.djc_attribute td.djc_label {
width: auto;
-webkit-box-shadow: none;
box-shadow: none;
}
.djc_items_table .djc_attributes table tr.djc_attribute td.djc_label {
background: url(../images/bg2.png) repeat;
}
.djc_items_table .djc_attributes table tr.djc_attribute td.djc_value {
background: none;
}
#djcatalog .nav-tabs>li>a {
cursor: pointer;
}
#djcatalog .accordion-heading .accordion-toggle {
cursor: pointer;
}
#djcatalog .djc_contact_form {
clear: both;
}
/* social buttons code: top, after title, after desc, bottom */
.djc_social_t {
margin: 0 0 1em 0;
}
.djc_social_at {
display: table-cell;
}
.djc_social_ad {
margin: 0.5em 0;
}
.djc_social_b {
margin: 0.5em 0 0 0;
}
.djc_comments {
clear: both;
margin: 0.5em 0;
}
/* navigation */
#djcatalog .djc_product_top_nav {
clear: both;
margin: 0 0 0.5em 0;
}
#djcatalog .djc_product_bottom_nav {
clear: both;
margin: 0.5em 0 0 0;
text-align: center;
}
#djcatalog .djc_product_top_nav a.djc_prev_btn {
float: left;
}
#djcatalog .djc_product_top_nav a.djc_next_btn {
float: right;
}
#djcatalog .djc_product_top_nav a.djc_prev_btn,
#djcatalog .djc_product_top_nav a.djc_next_btn {
display: block;
}
#djcatalog .djc_product_bottom_nav a.djc_prev_btn,
#djcatalog .djc_product_bottom_nav a.djc_next_btn {
display: inline-block;
}
/* item submission */
#djcatalog #djc_my_items_table .djc_formbutton {
display: inline-block;
line-height: 2em;
float: left;
clear: both;
padding-left: 4px;
outline: none;
}
#djcatalog #djc_my_items_table .djc_formbutton:hover {
text-decoration: none;
}
#djcatalog #djc_my_items_table .djc_formbutton span {
font-size: 0.8em;
padding: 4px;
}
#djcatalog #djc_my_items_table .djc_formbutton img {
width: 16px;
height: 16px;
vertical-align: middle;
margin: 0;
padding: 0;
border: none;
float: none;
}
/*
* Uploader
*/
.djc_uploader table.djc_uploader_table {
width: auto;
}
.djc_uploader table thead th {
}
.djc_uploader table thead th.djc_uploader_img {
width: 75px;
}
.djc_uploader table thead th.djc_uploader_exclude,
.djc_uploader table thead th.djc_uploader_delete,
.djc_uploader table thead th.djc_uploader_hits {
width: 5%;
}
.djc_uploader table thead th.djc_uploader_caption {
max-width: 200px;
}
.djc_uploader table tbody tr {
cursor: move;
}
.djc_uploader table tbody td {
}
.djc_uploader table tbody tr:hover td {
background-color: #fafafa;
}
.djc_uploader table tbody td img {
max-width: 75px;
max-height: 45px;
}
.djc_uploader table tbody td input {
float: none;
}
.djc_uploader table tbody td input.djc_uploader_caption {
width:
}
.djc_uploader table.adminlist tfoot td {
text-align: left !important;
}
div.djc_addtocart {
margin: 1em 0;
}
/**
* Order
*/
#djc_orders_table td,
#djc_orders_table th {
width: 20%;
text-align: left;
}
#djc_orders_table .djc_thead_order_date,
#djc_orders_table .djc_thead_order_status,
#djc_orders_table .djc_td_order_date,
#djc_orders_table .djc_td_order_status {
text-align: center;
}
#djc_orders_table .djc_thead_order_total,
#djc_orders_table .djc_td_order_total {
text-align: right;
}
#djc_order_items_table .djc_th_title {
width: 50%;
}
#djc_order_items_table .djc_th_qty,
#djc_cart_checkout_table .djc_th_qty {
width: 5%;
text-align: center;
}
#djc_order_items_table tfoot td {
text-align: center;
font-weight: bold;
}
#djc_order_items_table .djc_ft_total_label {
text-align: right;
}
#djc_order_items_table .djc_th_price,
#djc_order_items_table .djc_td_price,
#djc_order_items_table .djc_td_qty {
text-align: center;
}
/**
* Cart
*/
.djc_cart_table {
border-collapse: collapse;
border-spacing: 0;
width: 100%;
}
.djc_cart_table form {
margin: 0;
padding: 0;
}
.djc_cart_table tbody td {
vertical-align: middle;
}
.djc_cart_table .djc_thead_order_date,
.djc_cart_table .djc_thead_order_status,
.djc_cart_table .djc_td_order_date,
.djc_cart_table .djc_td_order_status {
text-align: center;
}
.djc_cart_table .djc_thead_order_total,
.djc_cart_table .djc_td_order_total {
text-align: right;
}
.djc_cart_table .djc_th_title {
}
.djc_cart_table .djc_th_qty {
text-align: center;
width: 20%;
}
.djc_cart_table .djc_td_update_qty {
border-right: none;
text-align: center;
padding-right: 0;
}
.djc_cart_table .djc_td_cart_remove {
border-left: none;
padding-left: 0;
}
.djc_cart_table form input {
}
.djc_cart_table form input.btn {
border: none;
padding: 0;
margin: 0;
background: none;
font-size: 0;
cursor: pointer;
width: 32px;
height: 32px;
}
.djc_cart_table form input.djc_update_qty_btn {
background: url(../images/cart_update.png) center center no-repeat;
}
.djc_cart_table form input.djc_cart_remove_btn {
background: url(../images/cart_remove.png) center center no-repeat;
}
.djc_cart_table form input.djc_qty_input {
width: 20px;
margin-right: 10px;
}
.djc_cart_table .djc_td_title {
vertical-align: top;
}
.djc_cart_table .djc_td_title .djc_image {
float: left;
margin: 0 1em 0 0;
}
.djc_cart_table .djc_td_title .djc_image img {
max-width: 60px;
}
.djc_cart_table .djc_td_title strong a {
float: none;
display: inline-block;
}
.djc_cart_table tfoot td {
text-align: center;
font-weight: bold;
}
.djc_cart_table .djc_ft_total_label {
text-align: right;
}
.djc_cart_table .djc_th_price {
width: 1%;
}
.djc_cart_table .djc_th_price,
.djc_cart_table .djc_td_price,
.djc_cart_table .djc_td_qty {
text-align: center;
white-space: nowrap;
}
/**
* Cart popup
*/
.djc_cart_popup {
position: relative;
height: 100%;
}
.djc_cart_popup p {
display: block;
font-size: 1.2em;
font-weight: normal;
margin: 0;
padding: 0;
position: absolute;
text-align: center;
line-height: 2em;
top: 33%;
width: 100%;
}
.djc_cart_popup strong {
white-space: nowrap;
}
.djc_cart_popup a {
font-weight: bold;
}
#djc_cart_popup_loader {
display: none;
height: 40px;
left: 0;
top: 0;
width: 100%;
height: 100%;
position: fixed;
z-index: 65000;
opacity: 0.7;
}
#djc_cart_popup_loader span {
background: #000000 url(../images/ajax-loader.gif) center center no-repeat;
width: 32px;
height: 32px;
padding: 8px;
display: block;
position: absolute;
left: 50%;
top: 50%;
margin: -20px 0 0 -20px;
border-radius: 8px 8px 8px 8px;
}
#sbox-overlay.djc_cart_modal {
width: 100%;
height: 100%;
opacity: 0.5 !important;
}
/**
* Printable version
*/
#djcatalog.djc_printable a[href]:after {
content: "";
}
#djcatalog.djc_printable .djc_printable_version,
#djcatalog.djc_printable .djc_product_top_nav,
#djcatalog.djc_printable .djc_product_bottom_nav,
#djcatalog.djc_printable .djc_producer_items_link,
#djcatalog.djc_printable .djc_contact_form_toggler,
#djcatalog.djc_printable .djc_addtocart,
#djcatalog.djc_printable .djc_contact_form_wrapper {
display: none !important;
}
@media print {
.djc_back_button,
.djc_print_button {
display: none !important;
}
}
| gpl-2.0 |
fabianoleittes/discourse | script/import_scripts/kunena.rb | 4377 | require "mysql2"
require File.expand_path(File.dirname(__FILE__) + "/base.rb")
class ImportScripts::Kunena < ImportScripts::Base
KUNENA_DB = "kunena"
def initialize
super
@users = {}
@client = Mysql2::Client.new(
host: "localhost",
username: "root",
#password: "password",
database: KUNENA_DB
)
end
def execute
parse_users
puts "creating users"
create_users(@users) do |id, user|
{ id: id,
email: user[:email],
username: user[:username],
created_at: user[:created_at],
bio_raw: user[:bio],
moderator: user[:moderator] ? true : false,
admin: user[:admin] ? true : false,
suspended_at: user[:suspended] ? Time.zone.now : nil,
suspended_till: user[:suspended] ? 100.years.from_now : nil }
end
@users = nil
create_categories(@client.query("SELECT id, parent, name, description, ordering FROM jos_kunena_categories ORDER BY parent, id;")) do |c|
h = { id: c['id'], name: c['name'], description: c['description'], position: c['ordering'].to_i }
if c['parent'].to_i > 0
h[:parent_category_id] = category_id_from_imported_category_id(c['parent'])
end
h
end
import_posts
begin
create_admin(email: '[email protected]', username: UserNameSuggester.suggest('neil'))
rescue => e
puts '', "Failed to create admin user"
puts e.message
end
end
def parse_users
# Need to merge data from joomla with kunena
puts "fetching Joomla users data from mysql"
results = @client.query("SELECT id, username, email, registerDate FROM jos_users;", cache_rows: false)
results.each do |u|
next unless u['id'].to_i > (0) && u['username'].present? && u['email'].present?
username = u['username'].gsub(' ', '_').gsub(/[^A-Za-z0-9_]/, '')[0, User.username_length.end]
if username.length < User.username_length.first
username = username * User.username_length.first
end
@users[u['id'].to_i] = { id: u['id'].to_i, username: username, email: u['email'], created_at: u['registerDate'] }
end
puts "fetching Kunena user data from mysql"
results = @client.query("SELECT userid, signature, moderator, banned FROM jos_kunena_users;", cache_rows: false)
results.each do |u|
next unless u['userid'].to_i > 0
user = @users[u['userid'].to_i]
if user
user[:bio] = u['signature']
user[:moderator] = (u['moderator'].to_i == 1)
user[:suspended] = u['banned'].present?
end
end
end
def import_posts
puts '', "creating topics and posts"
total_count = @client.query("SELECT COUNT(*) count FROM jos_kunena_messages m;").first['count']
batch_size = 1000
batches(batch_size) do |offset|
results = @client.query("
SELECT m.id id,
m.thread thread,
m.parent parent,
m.catid catid,
m.userid userid,
m.subject subject,
m.time time,
t.message message
FROM jos_kunena_messages m,
jos_kunena_messages_text t
WHERE m.id = t.mesid
ORDER BY m.id
LIMIT #{batch_size}
OFFSET #{offset};
", cache_rows: false)
break if results.size < 1
next if all_records_exist? :posts, results.map { |p| p['id'].to_i }
create_posts(results, total: total_count, offset: offset) do |m|
skip = false
mapped = {}
mapped[:id] = m['id']
mapped[:user_id] = user_id_from_imported_user_id(m['userid']) || -1
mapped[:raw] = m["message"]
mapped[:created_at] = Time.zone.at(m['time'])
if m['id'] == m['thread']
mapped[:category] = category_id_from_imported_category_id(m['catid'])
mapped[:title] = m['subject']
else
parent = topic_lookup_from_imported_post_id(m['parent'])
if parent
mapped[:topic_id] = parent[:topic_id]
mapped[:reply_to_post_number] = parent[:post_number] if parent[:post_number] > 1
else
puts "Parent post #{m['parent']} doesn't exist. Skipping #{m["id"]}: #{m["subject"][0..40]}"
skip = true
end
end
skip ? nil : mapped
end
end
end
end
ImportScripts::Kunena.new.perform
| gpl-2.0 |
nobooya/e975-kk-kernel | arch/arm/mach-msm/lge/awifi/board-awifi-display.c | 60377 | /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
* Copyright (c) 2012, LGE Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
#include <linux/msm_ion.h>
#include <asm/mach-types.h>
#include <mach/msm_memtypes.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/gpiomux.h>
#include <mach/ion.h>
#include <mach/msm_bus_board.h>
#include <mach/socinfo.h>
#include "devices.h"
#include "board-awifi.h"
#include "../../../../drivers/video/msm/msm_fb.h"
#include "../../../../drivers/video/msm/msm_fb_def.h"
#include "../../../../drivers/video/msm/mipi_dsi.h"
#include <mach/board_lge.h>
#if defined(CONFIG_BACKLIGHT_I2C_BL)
#include <linux/i2c_bl.h>
#endif
#include <linux/i2c.h>
#include <linux/kernel.h>
#ifndef LGE_DSDR_SUPPORT
#define LGE_DSDR_SUPPORT
#endif
#ifdef CONFIG_LGE_KCAL
#ifdef CONFIG_LGE_QC_LCDC_LUT
extern int set_qlut_kcal_values(int kcal_r, int kcal_g, int kcal_b);
extern int refresh_qlut_display(void);
#else
#error only kcal by Qucalcomm LUT is supported now!!!
#endif
#endif
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
/* prim = 1366 x 768 x 3(bpp) x 3(pages) */
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT)
#define MSM_FB_PRIM_BUF_SIZE roundup(768 * 1280 * 4 * 3, 0x10000)
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
#define MSM_FB_PRIM_BUF_SIZE roundup(1088 * 1920 * 4 * 3, 0x10000)
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
#define MSM_FB_PRIM_BUF_SIZE roundup(1208 * 1920 * 4 * 3, 0x10000)
#else
#define MSM_FB_PRIM_BUF_SIZE roundup(1920 * 1088 * 4 * 3, 0x10000)
#endif
#else
/* prim = 1366 x 768 x 3(bpp) x 2(pages) */
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT)
#define MSM_FB_PRIM_BUF_SIZE roundup(768 * 1280 * 4 * 2, 0x10000)
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
#define MSM_FB_PRIM_BUF_SIZE roundup(1088 * 1920 * 4 * 2, 0x10000)
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
#define MSM_FB_PRIM_BUF_SIZE roundup(1208 * 1920 * 4 * 2, 0x10000)
#else
#define MSM_FB_PRIM_BUF_SIZE roundup(1920 * 1088 * 4 * 2, 0x10000)
#endif
#endif /*CONFIG_FB_MSM_TRIPLE_BUFFER */
#ifdef LGE_DSDR_SUPPORT
#define MSM_FB_EXT_BUF_SIZE \
(roundup((1920 * 1088 * 4), 4096) * 3) /* 4 bpp x 3 page */
#else /* */
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
#define MSM_FB_EXT_BUF_SIZE \
(roundup((1920 * 1088 * 2), 4096) * 1) /* 2 bpp x 1 page */
#elif defined(CONFIG_FB_MSM_TVOUT)
#define MSM_FB_EXT_BUF_SIZE \
(roundup((720 * 576 * 2), 4096) * 2) /* 2 bpp x 2 pages */
#else
#define MSM_FB_EXT_BUF_SIZE 0
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL */
#endif /* */
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
#define MSM_FB_WFD_BUF_SIZE \
(roundup((1920 * 1088 * 2), 4096) * 3) /* 2 bpp x 3 page */
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
#define MSM_FB_WFD_BUF_SIZE \
(roundup((1920 * 1208 * 2), 4096) * 3) /* 2 bpp x 3 page */
#else
#define MSM_FB_WFD_BUF_SIZE \
(roundup((1280 * 736 * 2), 4096) * 3) /* 2 bpp x 3 page */
#endif
#else
#define MSM_FB_WFD_BUF_SIZE 0
#endif
#define MSM_FB_SIZE \
roundup(MSM_FB_PRIM_BUF_SIZE + \
MSM_FB_EXT_BUF_SIZE + MSM_FB_WFD_BUF_SIZE, 4096)
#ifdef CONFIG_FB_MSM_OVERLAY0_WRITEBACK
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT)
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE roundup((768 * 1280 * 3 * 2), 4096)
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE roundup((1088 * 1920 * 3 * 2), 4096)
#elif defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE roundup((1208 * 1920 * 3 * 2), 4096)
#else
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE (0)
#endif
#else
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE (0)
#endif /* CONFIG_FB_MSM_OVERLAY0_WRITEBACK */
#ifdef CONFIG_FB_MSM_OVERLAY1_WRITEBACK
#define MSM_FB_OVERLAY1_WRITEBACK_SIZE roundup((1920 * 1088 * 3 * 2), 4096)
#else
#define MSM_FB_OVERLAY1_WRITEBACK_SIZE (0)
#endif /* CONFIG_FB_MSM_OVERLAY1_WRITEBACK */
static struct resource msm_fb_resources[] = {
{
.flags = IORESOURCE_DMA,
}
};
#define LVDS_CHIMEI_PANEL_NAME "lvds_chimei_wxga"
#define LVDS_FRC_PANEL_NAME "lvds_frc_fhd"
#define MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME "mipi_video_toshiba_wsvga"
#define MIPI_VIDEO_CHIMEI_WXGA_PANEL_NAME "mipi_video_chimei_wxga"
#define HDMI_PANEL_NAME "hdmi_msm"
#define MHL_PANEL_NAME "hdmi_msm,mhl_8334"
#define TVOUT_PANEL_NAME "tvout_msm"
#define LVDS_PIXEL_MAP_PATTERN_1 1
#define LVDS_PIXEL_MAP_PATTERN_2 2
#ifndef CONFIG_MACH_LGE
#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
static unsigned char hdmi_is_primary = 1;
#else
static unsigned char hdmi_is_primary;
#endif /* CONFIG_FB_MSM_HDMI_AS_PRIMARY */
static unsigned char mhl_display_enabled;
unsigned char apq8064_hdmi_as_primary_selected(void)
{
return hdmi_is_primary;
}
unsigned char apq8064_mhl_display_enabled(void)
{
return mhl_display_enabled;
}
static void set_mdp_clocks_for_wuxga(void);
#endif /* */
static int msm_fb_detect_panel(const char *name)
{
#ifndef CONFIG_MACH_LGE
u32 version;
if (machine_is_apq8064_liquid()) {
version = socinfo_get_platform_version();
if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
(SOCINFO_VERSION_MINOR(version) == 1)) {
if (!strncmp(name, MIPI_VIDEO_CHIMEI_WXGA_PANEL_NAME,
strnlen(MIPI_VIDEO_CHIMEI_WXGA_PANEL_NAME,
PANEL_NAME_MAX_LEN)))
return 0;
} else {
if (!strncmp(name, LVDS_CHIMEI_PANEL_NAME,
strnlen(LVDS_CHIMEI_PANEL_NAME,
PANEL_NAME_MAX_LEN)))
return 0;
}
} else if (machine_is_apq8064_mtp()) {
if (!strncmp(name, MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME,
strnlen(MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME,
PANEL_NAME_MAX_LEN)))
return 0;
} else if (machine_is_apq8064_cdp()) {
if (!strncmp(name, LVDS_CHIMEI_PANEL_NAME,
strnlen(LVDS_CHIMEI_PANEL_NAME,
PANEL_NAME_MAX_LEN)))
return 0;
} else if (machine_is_mpq8064_dtv()) {
if (!strncmp(name, LVDS_FRC_PANEL_NAME,
strnlen(LVDS_FRC_PANEL_NAME,
PANEL_NAME_MAX_LEN))) {
set_mdp_clocks_for_wuxga();
return 0;
}
}
if (!strncmp(name, HDMI_PANEL_NAME,
strnlen(HDMI_PANEL_NAME,
PANEL_NAME_MAX_LEN))) {
if (apq8064_hdmi_as_primary_selected())
set_mdp_clocks_for_wuxga();
return 0;
}
return -ENODEV;
#else
return 0;
#endif /* */
}
static struct msm_fb_platform_data msm_fb_pdata = {
.detect_client = msm_fb_detect_panel,
};
static struct platform_device msm_fb_device = {
.name = "msm_fb",
.id = 0,
.num_resources = ARRAY_SIZE(msm_fb_resources),
.resource = msm_fb_resources,
.dev.platform_data = &msm_fb_pdata,
};
void __init apq8064_allocate_fb_region(void)
{
void *addr;
unsigned long size;
size = MSM_FB_SIZE;
addr = alloc_bootmem_align(size, 0x1000);
msm_fb_resources[0].start = __pa(addr);
msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
pr_info("allocating %lu bytes at %p (%lx physical) for fb\n",
size, addr, __pa(addr));
}
#define MDP_VSYNC_GPIO 0
static struct msm_bus_vectors mdp_init_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors mdp_ui_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 577474560 * 2,//.ab = 2000000000, // 602603520 * 2,
.ib = 866211840 * 2,//.ib = 2000000000, // 753254400 * 2,
},
};
static struct msm_bus_vectors mdp_vga_vectors[] = {
/* VGA and less video */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 605122560 * 2,//.ab = 2000000000, // 602603520 * 2,
.ib = 756403200 * 2,//.ib = 2000000000, // 753254400 * 2,
},
};
static struct msm_bus_vectors mdp_720p_vectors[] = {
/* 720p and less video */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 660418560 * 2,//.ab = 2000000000, // 602603520 * 2,
.ib = 825523200 * 2,//.ib = 2000000000, // 753254400 * 2,
},
};
static struct msm_bus_vectors mdp_1080p_vectors[] = {
/* 1080p and less video */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 764098560 * 2,//.ab = 2000000000, // 602603520 * 2,
.ib = 955123200 * 2,//.ib = 2000000000, // 753254400 * 2,
},
};
static struct msm_bus_paths mdp_bus_scale_usecases[] = {
{
ARRAY_SIZE(mdp_init_vectors),
mdp_init_vectors,
},
{
ARRAY_SIZE(mdp_ui_vectors),
mdp_ui_vectors,
},
{
ARRAY_SIZE(mdp_ui_vectors),
mdp_ui_vectors,
},
{
ARRAY_SIZE(mdp_vga_vectors),
mdp_vga_vectors,
},
{
ARRAY_SIZE(mdp_720p_vectors),
mdp_720p_vectors,
},
{
ARRAY_SIZE(mdp_1080p_vectors),
mdp_1080p_vectors,
},
};
static struct msm_bus_scale_pdata mdp_bus_scale_pdata = {
mdp_bus_scale_usecases,
ARRAY_SIZE(mdp_bus_scale_usecases),
.name = "mdp",
};
static struct msm_panel_common_pdata mdp_pdata = {
.gpio = MDP_VSYNC_GPIO,
.mdp_max_clk = 266667000,
.mdp_max_bw = 3080000000UL,
.mdp_bw_ab_factor = 160,
.mdp_bw_ib_factor = 180,
.mdp_bus_scale_table = &mdp_bus_scale_pdata,
.mdp_rev = MDP_REV_44,
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.mem_hid = BIT(ION_CP_MM_HEAP_ID),
#else
.mem_hid = MEMTYPE_EBI1,
#endif
/* for early backlight on for APQ8064 */
.cont_splash_enabled = 0x01,
.mdp_iommu_split_domain = 1,
};
void __init apq8064_mdp_writeback(struct memtype_reserve* reserve_table)
{
mdp_pdata.ov0_wb_size = MSM_FB_OVERLAY0_WRITEBACK_SIZE;
mdp_pdata.ov1_wb_size = MSM_FB_OVERLAY1_WRITEBACK_SIZE;
#if defined(CONFIG_ANDROID_PMEM) && !defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
reserve_table[mdp_pdata.mem_hid].size +=
mdp_pdata.ov0_wb_size;
reserve_table[mdp_pdata.mem_hid].size +=
mdp_pdata.ov1_wb_size;
#endif
}
#ifdef CONFIG_LGE_KCAL
extern int set_kcal_values(int kcal_r, int kcal_g, int kcal_b);
extern int refresh_kcal_display(void);
extern int get_kcal_values(int *kcal_r, int *kcal_g, int *kcal_b);
static struct kcal_platform_data kcal_pdata = {
.set_values = set_kcal_values,
.get_values = get_kcal_values,
.refresh_display = refresh_kcal_display
};
static struct platform_device kcal_platrom_device = {
.name = "kcal_ctrl",
.dev = {
.platform_data = &kcal_pdata,
}
};
#endif /* */
static struct resource hdmi_msm_resources[] = {
{
.name = "hdmi_msm_qfprom_addr",
.start = 0x00700000,
.end = 0x007060FF,
.flags = IORESOURCE_MEM,
},
{
.name = "hdmi_msm_hdmi_addr",
.start = 0x04A00000,
.end = 0x04A00FFF,
.flags = IORESOURCE_MEM,
},
{
.name = "hdmi_msm_irq",
.start = HDMI_IRQ,
.end = HDMI_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static int hdmi_enable_5v(int on);
static int hdmi_core_power(int on, int show);
static int hdmi_cec_power(int on);
static int hdmi_gpio_config(int on);
static int hdmi_panel_power(int on);
static struct msm_hdmi_platform_data hdmi_msm_data = {
.irq = HDMI_IRQ,
.enable_5v = hdmi_enable_5v,
.core_power = hdmi_core_power,
.cec_power = hdmi_cec_power,
.panel_power = hdmi_panel_power,
.gpio_config = hdmi_gpio_config,
};
static struct platform_device hdmi_msm_device = {
.name = "hdmi_msm",
.id = 0,
.num_resources = ARRAY_SIZE(hdmi_msm_resources),
.resource = hdmi_msm_resources,
.dev.platform_data = &hdmi_msm_data,
};
static char wfd_check_mdp_iommu_split_domain(void)
{
return mdp_pdata.mdp_iommu_split_domain;
}
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
static struct msm_wfd_platform_data wfd_pdata = {
.wfd_check_mdp_iommu_split = wfd_check_mdp_iommu_split_domain,
};
static struct platform_device wfd_panel_device = {
.name = "wfd_panel",
.id = 0,
.dev.platform_data = NULL,
};
static struct platform_device wfd_device = {
.name = "msm_wfd",
.id = -1,
.dev.platform_data = &wfd_pdata,
};
#endif
/* HDMI related GPIOs */
#define HDMI_CEC_VAR_GPIO 69
#define HDMI_DDC_CLK_GPIO 70
#define HDMI_DDC_DATA_GPIO 71
#define HDMI_HPD_GPIO 72
struct lcd_delay {
unsigned lcdvdd_lcdvdd;
unsigned lcdvdd_iovcc;
unsigned iovcc_vdda;
unsigned vdda;
unsigned vdda_iovcc;
unsigned iovcc_lcdvdd;
};
#ifdef CONFIG_SUPPORT_EVB2_BD
/* Define to delay for power sequence */
static struct lcd_delay lcd_power_sequence_delay_LD089WU1 = {
.lcdvdd_lcdvdd = 400, /* 400 ms */
.lcdvdd_iovcc = 120, /* 120 ms */
.iovcc_vdda = 0, /* no delay */
.vdda = 0, /* no delay */
.vdda_iovcc = 1, /* 1 ms */
.iovcc_lcdvdd = 10, /* 10ms */
};
#endif
static struct lcd_delay lcd_power_sequence_delay_LD083WU1 = {
.lcdvdd_lcdvdd = 400, /* 400 ms */
.lcdvdd_iovcc = 0, /* no delay */
.iovcc_vdda = 0, /* no delay */
.vdda = 180, /* 180 ms */
.vdda_iovcc = 1, /* 1 ms */
.iovcc_lcdvdd = 10, /* 10ms */
};
static struct lcd_delay *lcd_power_sequence_delay = &lcd_power_sequence_delay_LD083WU1;
static bool dsi_power_on = false;
static int mipi_dsi_panel_power(int on)
{
static struct regulator *reg_l2, *reg_lvs6;
static int gpio26; // LCD_VDD_EN (PM8921_GPIO_26)
static u64 p_down = 0;
static bool p_down_first = true;
int rc;
pr_debug("%s: state : %d\n", __func__, on);
if (!dsi_power_on) /* LCD initial start (power side) */
{
pr_info("%s: initial start\n", __func__);
reg_lvs6 = regulator_get(&msm_mipi_dsi1_device.dev, "dsi_iovcc");
if (IS_ERR(reg_lvs6)) {
pr_err("could not get 8921_lvs6, rc = %ld\n",
PTR_ERR(reg_lvs6));
return -ENODEV;
}
reg_l2 = regulator_get(&msm_mipi_dsi1_device.dev, "dsi_vdda");
if (IS_ERR(reg_l2)) {
pr_err("could not get 8921_l2, rc = %ld\n",
PTR_ERR(reg_l2));
return -ENODEV;
}
rc = regulator_set_voltage(reg_l2, 1200000, 1200000);
if (rc) {
pr_err("set_voltage l2 failed, rc=%d\n", rc);
return -EINVAL;
}
gpio26 = PM8921_GPIO_PM_TO_SYS(26);
rc = gpio_request(gpio26, "lcd_vdd_en");
if (rc) {
pr_err("request gpio 26 failed, rc=%d\n", rc);
return -ENODEV;
}
dsi_power_on = true;
}
pr_info("%s: onoff = %d\n", __func__, on);
if (on) /* LCD on start (power side) */
{
/* Enable delay between LCD_VDD to LCD_VDD. */
if ((p_down_first == false) && (lcd_power_sequence_delay->lcdvdd_lcdvdd > 0)) {
u64 dur_jiffies;
u64 cur_jiffies = jiffies_64;
if (cur_jiffies < p_down) {
dur_jiffies = p_down - (~cur_jiffies) + 1;
} else
dur_jiffies = cur_jiffies - p_down;
#if HZ!=1000
dur_jiffies = div_u64((dur_jiffies)*1000,HZ);
#endif
if(dur_jiffies<lcd_power_sequence_delay->lcdvdd_lcdvdd) {
mdelay(lcd_power_sequence_delay->lcdvdd_lcdvdd-dur_jiffies);
}
}
/* Enable LDO for LCD_VDD 3.3V*/
gpio_direction_output(gpio26, 1);
/* Delay between LCDVCC to IOVCC. */
if (lcd_power_sequence_delay->lcdvdd_iovcc)
mdelay(lcd_power_sequence_delay->lcdvdd_iovcc);
/* Set DSI VDDA current to 100mA */
rc = regulator_set_optimum_mode(reg_l2, 100000);
if (rc < 0) {
pr_err("set_optimum_mode l2 failed, rc=%d\n", rc);
return -EINVAL;
}
/* Enable DSI IOVCC 1.8V */
rc = regulator_enable(reg_lvs6);
if (rc) {
pr_err("enable lvs6 failed, rc=%d\n", rc);
return -ENODEV;
}
/* Delay between IOVCC to VDDA. */
if (lcd_power_sequence_delay->iovcc_vdda)
mdelay(lcd_power_sequence_delay->iovcc_vdda);
/* Enable DSI VDDA 1.2V*/
rc = regulator_enable(reg_l2);
if (rc) {
pr_err("enable l2 failed, rc=%d\n", rc);
return -ENODEV;
}
/* Delay for VDDA. */
if (lcd_power_sequence_delay->vdda)
mdelay(lcd_power_sequence_delay->vdda);
}
else /* LCD off start (power side) */
{
/* Disable DSI VDDA */
rc = regulator_disable(reg_l2);
if (rc) {
pr_err("disable reg_l2 failed, rc=%d\n", rc);
return -ENODEV;
}
/* Delay between VDDA to IOVCC. */
if (lcd_power_sequence_delay->vdda_iovcc)
mdelay(lcd_power_sequence_delay->vdda_iovcc);
/* Disable DSI IOVCC */
rc = regulator_disable(reg_lvs6);
if (rc) {
pr_err("disable lvs6 failed, rc=%d\n", rc);
return -ENODEV;
}
/* Set DSI VDDA current to 100uA for power consumption */
rc = regulator_set_optimum_mode(reg_l2, 100);
if (rc < 0) {
pr_err("set_optimum_mode l2 failed, rc=%d\n", rc);
return -EINVAL;
}
/* Delay between IOVCC to LCD_VDDA. */
if (lcd_power_sequence_delay->iovcc_lcdvdd)
mdelay(lcd_power_sequence_delay->iovcc_lcdvdd);
/* Disable LDO for LCD_VDD */
gpio_direction_output(gpio26, 0);
if (lcd_power_sequence_delay->lcdvdd_lcdvdd > 0) {
p_down = jiffies_64;
p_down_first = false;
}
}
return 0;
}
static char mipi_dsi_splash_is_enabled(void)
{
return mdp_pdata.cont_splash_enabled;
}
static struct mipi_dsi_platform_data mipi_dsi_pdata = {
.dsi_power_save = mipi_dsi_panel_power,
.splash_is_enabled = mipi_dsi_splash_is_enabled,
};
#if !defined(CONFIG_MACH_LGE)
static bool lvds_power_on;
static int lvds_panel_power(int on)
{
static struct regulator *reg_lvs7, *reg_l2, *reg_ext_3p3v;
static int gpio36, gpio26, mpp3;
int rc;
pr_debug("%s: on=%d\n", __func__, on);
if (!lvds_power_on) {
reg_lvs7 = regulator_get(&msm_lvds_device.dev,
"lvds_vdda");
if (IS_ERR_OR_NULL(reg_lvs7)) {
pr_err("could not get 8921_lvs7, rc = %ld\n",
PTR_ERR(reg_lvs7));
return -ENODEV;
}
reg_l2 = regulator_get(&msm_lvds_device.dev,
"lvds_pll_vdda");
if (IS_ERR_OR_NULL(reg_l2)) {
pr_err("could not get 8921_l2, rc = %ld\n",
PTR_ERR(reg_l2));
return -ENODEV;
}
rc = regulator_set_voltage(reg_l2, 1200000, 1200000);
if (rc) {
pr_err("set_voltage l2 failed, rc=%d\n", rc);
return -EINVAL;
}
reg_ext_3p3v = regulator_get(&msm_lvds_device.dev,
"lvds_vccs_3p3v");
if (IS_ERR_OR_NULL(reg_ext_3p3v)) {
pr_err("could not get reg_ext_3p3v, rc = %ld\n",
PTR_ERR(reg_ext_3p3v));
return -ENODEV;
}
gpio26 = PM8921_GPIO_PM_TO_SYS(26);
rc = gpio_request(gpio26, "pwm_backlight_ctrl");
if (rc) {
pr_err("request gpio 26 failed, rc=%d\n", rc);
return -ENODEV;
}
gpio36 = PM8921_GPIO_PM_TO_SYS(36); /* lcd1_pwr_en_n */
rc = gpio_request(gpio36, "lcd1_pwr_en_n");
if (rc) {
pr_err("request gpio 36 failed, rc=%d\n", rc);
return -ENODEV;
}
mpp3 = PM8921_MPP_PM_TO_SYS(3);
rc = gpio_request(mpp3, "backlight_en");
if (rc) {
pr_err("request mpp3 failed, rc=%d\n", rc);
return -ENODEV;
}
lvds_power_on = true;
}
if (on) {
rc = regulator_enable(reg_lvs7);
if (rc) {
pr_err("enable lvs7 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_set_optimum_mode(reg_l2, 100000);
if (rc < 0) {
pr_err("set_optimum_mode l2 failed, rc=%d\n", rc);
return -EINVAL;
}
rc = regulator_enable(reg_l2);
if (rc) {
pr_err("enable l2 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_enable(reg_ext_3p3v);
if (rc) {
pr_err("enable reg_ext_3p3v failed, rc=%d\n", rc);
return -ENODEV;
}
gpio_set_value_cansleep(gpio36, 0);
gpio_set_value_cansleep(mpp3, 1);
if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
gpio_set_value_cansleep(gpio26, 1);
} else {
if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
gpio_set_value_cansleep(gpio26, 0);
gpio_set_value_cansleep(mpp3, 0);
gpio_set_value_cansleep(gpio36, 1);
rc = regulator_disable(reg_lvs7);
if (rc) {
pr_err("disable reg_lvs7 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_disable(reg_l2);
if (rc) {
pr_err("disable reg_l2 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_disable(reg_ext_3p3v);
if (rc) {
pr_err("disable reg_ext_3p3v failed, rc=%d\n", rc);
return -ENODEV;
}
}
return 0;
}
static int lvds_pixel_remap(void)
{
u32 ver = socinfo_get_version();
if (machine_is_apq8064_cdp() ||
machine_is_apq8064_liquid()) {
if ((SOCINFO_VERSION_MAJOR(ver) == 1) &&
(SOCINFO_VERSION_MINOR(ver) == 0))
return LVDS_PIXEL_MAP_PATTERN_1;
} else if (machine_is_mpq8064_dtv()) {
if ((SOCINFO_VERSION_MAJOR(ver) == 1) &&
(SOCINFO_VERSION_MINOR(ver) == 0))
return LVDS_PIXEL_MAP_PATTERN_2;
}
return 0;
}
static struct lcdc_platform_data lvds_pdata = {
.lcdc_power_save = lvds_panel_power,
.lvds_pixel_remap = lvds_pixel_remap
};
#define LPM_CHANNEL 2
static int lvds_chimei_gpio[] = {LPM_CHANNEL};
static struct lvds_panel_platform_data lvds_chimei_pdata = {
.gpio = lvds_chimei_gpio,
};
static struct platform_device lvds_chimei_panel_device = {
.name = "lvds_chimei_wxga",
.id = 0,
.dev = {
.platform_data = &lvds_chimei_pdata,
}
};
#define FRC_GPIO_UPDATE (SX150X_EXP4_GPIO_BASE + 8)
#define FRC_GPIO_RESET (SX150X_EXP4_GPIO_BASE + 9)
#define FRC_GPIO_PWR (SX150X_EXP4_GPIO_BASE + 10)
static int lvds_frc_gpio[] = {FRC_GPIO_UPDATE, FRC_GPIO_RESET, FRC_GPIO_PWR};
static struct lvds_panel_platform_data lvds_frc_pdata = {
.gpio = lvds_frc_gpio,
};
static struct platform_device lvds_frc_panel_device = {
.name = "lvds_frc_fhd",
.id = 0,
.dev = {
.platform_data = &lvds_frc_pdata,
}
};
static int dsi2lvds_gpio[2] = {
LPM_CHANNEL,/* Backlight PWM-ID=0 for PMIC-GPIO#24 */
0x1F08 /* DSI2LVDS Bridge GPIO Output, mask=0x1f, out=0x08 */
};
static struct msm_panel_common_pdata mipi_dsi2lvds_pdata = {
.gpio_num = dsi2lvds_gpio,
};
static struct platform_device mipi_dsi2lvds_bridge_device = {
.name = "mipi_tc358764",
.id = 0,
.dev.platform_data = &mipi_dsi2lvds_pdata,
};
static int toshiba_gpio[] = {LPM_CHANNEL};
static struct mipi_dsi_panel_platform_data toshiba_pdata = {
.gpio = toshiba_gpio,
};
static struct platform_device mipi_dsi_toshiba_panel_device = {
.name = "mipi_toshiba",
.id = 0,
.dev = {
.platform_data = &toshiba_pdata,
}
};
#endif /* */
static struct msm_bus_vectors dtv_bus_init_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors dtv_bus_def_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
.ab = 2000000000,
.ib = 2000000000,
#else
.ab = 566092800 * 2,
.ib = 707616000 * 2,
#endif
},
};
static struct msm_bus_paths dtv_bus_scale_usecases[] = {
{
ARRAY_SIZE(dtv_bus_init_vectors),
dtv_bus_init_vectors,
},
{
ARRAY_SIZE(dtv_bus_def_vectors),
dtv_bus_def_vectors,
},
};
static struct msm_bus_scale_pdata dtv_bus_scale_pdata = {
dtv_bus_scale_usecases,
ARRAY_SIZE(dtv_bus_scale_usecases),
.name = "dtv",
};
static struct lcdc_platform_data dtv_pdata = {
.bus_scale_table = &dtv_bus_scale_pdata,
.lcdc_power_save = hdmi_panel_power,
};
static int hdmi_panel_power(int on)
{
int rc;
pr_debug("%s: HDMI Core: %s\n", __func__, (on ? "ON" : "OFF"));
rc = hdmi_core_power(on, 1);
if (rc)
rc = hdmi_cec_power(on);
pr_debug("%s: HDMI Core: %s Success\n", __func__, (on ? "ON" : "OFF"));
return rc;
}
static int hdmi_enable_5v(int on)
{
#ifdef CONFIG_HDMI_MVS
/* TBD: PM8921 regulator instead of 8901 */
static struct regulator *reg_8921_hdmi_mvs; /* HDMI_5V */
static int prev_on;
int rc;
if (on == prev_on)
return 0;
if (!reg_8921_hdmi_mvs) {
reg_8921_hdmi_mvs = regulator_get(&hdmi_msm_device.dev,
"hdmi_mvs");
if (IS_ERR(reg_8921_hdmi_mvs)) {
pr_err("could not get reg_8921_hdmi_mvs, rc = %ld\n",
PTR_ERR(reg_8921_hdmi_mvs));
reg_8921_hdmi_mvs = NULL;
return -ENODEV;
}
}
if (on) {
rc = regulator_enable(reg_8921_hdmi_mvs);
if (rc) {
pr_err("'%s' regulator enable failed, rc=%d\n",
"8921_hdmi_mvs", rc);
return rc;
}
pr_debug("%s(on): success\n", __func__);
} else {
rc = regulator_disable(reg_8921_hdmi_mvs);
if (rc)
pr_warning("'%s' regulator disable failed, rc=%d\n",
"8921_hdmi_mvs", rc);
pr_debug("%s(off): success\n", __func__);
}
prev_on = on;
#endif
return 0;
}
static int hdmi_core_power(int on, int show)
{
#ifdef CONFIG_HDMI_MVS
static struct regulator *reg_8921_lvs7, *reg_8921_s4, *reg_ext_3p3v;
#else
static struct regulator *reg_8921_lvs7;
#endif
static int prev_on;
int rc;
if (on == prev_on)
return 0;
#ifdef CONFIG_HDMI_MVS
/* TBD: PM8921 regulator instead of 8901 */
if (!reg_ext_3p3v) {
reg_ext_3p3v = regulator_get(&hdmi_msm_device.dev,
"hdmi_mux_vdd");
if (IS_ERR_OR_NULL(reg_ext_3p3v)) {
pr_err("could not get reg_ext_3p3v, rc = %ld\n",
PTR_ERR(reg_ext_3p3v));
reg_ext_3p3v = NULL;
return -ENODEV;
}
}
#endif
if (!reg_8921_lvs7) {
reg_8921_lvs7 = regulator_get(&hdmi_msm_device.dev,
"hdmi_vdda");
if (IS_ERR(reg_8921_lvs7)) {
pr_err("could not get reg_8921_lvs7, rc = %ld\n",
PTR_ERR(reg_8921_lvs7));
reg_8921_lvs7 = NULL;
return -ENODEV;
}
}
#ifdef CONFIG_HDMI_MVS
if (!reg_8921_s4) {
reg_8921_s4 = regulator_get(&hdmi_msm_device.dev,
"hdmi_lvl_tsl");
if (IS_ERR(reg_8921_s4)) {
pr_err("could not get reg_8921_s4, rc = %ld\n",
PTR_ERR(reg_8921_s4));
reg_8921_s4 = NULL;
return -ENODEV;
}
rc = regulator_set_voltage(reg_8921_s4, 1800000, 1800000);
if (rc) {
pr_err("set_voltage failed for 8921_s4, rc=%d\n", rc);
return -EINVAL;
}
}
#endif
if (on) {
/*
* Configure 3P3V_BOOST_EN as GPIO, 8mA drive strength,
* pull none, out-high
*/
#ifdef CONFIG_HDMI_MVS
rc = regulator_set_optimum_mode(reg_ext_3p3v, 290000);
if (rc < 0) {
pr_err("set_optimum_mode ext_3p3v failed, rc=%d\n", rc);
return -EINVAL;
}
rc = regulator_enable(reg_ext_3p3v);
if (rc) {
pr_err("enable reg_ext_3p3v failed, rc=%d\n", rc);
return rc;
}
#endif
rc = regulator_enable(reg_8921_lvs7);
if (rc) {
pr_err("'%s' regulator enable failed, rc=%d\n",
"hdmi_vdda", rc);
goto error1;
}
#ifdef CONFIG_HDMI_MVS
rc = regulator_enable(reg_8921_s4);
if (rc) {
pr_err("'%s' regulator enable failed, rc=%d\n",
"hdmi_lvl_tsl", rc);
goto error2;
}
pr_debug("%s(on): success\n", __func__);
#endif
} else {
#ifdef CONFIG_HDMI_MVS
rc = regulator_disable(reg_ext_3p3v);
if (rc) {
pr_err("disable reg_ext_3p3v failed, rc=%d\n", rc);
return -ENODEV;
}
#else
rc = regulator_disable(reg_8921_lvs7);
if (rc) {
pr_err("disable reg_8921_l23 failed, rc=%d\n", rc);
return -ENODEV;
}
#endif
#ifdef CONFIG_HDMI_MVS
rc = regulator_disable(reg_8921_s4);
if (rc) {
pr_err("disable reg_8921_s4 failed, rc=%d\n", rc);
return -ENODEV;
}
#endif
pr_debug("%s(off): success\n", __func__);
}
prev_on = on;
return 0;
#ifdef CONFIG_HDMI_MVS
error2:
regulator_disable(reg_8921_lvs7);
error1:
regulator_disable(reg_ext_3p3v);
return rc;
#else
error1:
return rc;
#endif
}
static int hdmi_gpio_config(int on)
{
int rc = 0;
static int prev_on;
#ifdef CONFIG_HDMI_MVS
int pmic_gpio14 = PM8921_GPIO_PM_TO_SYS(14);
#endif
if (on == prev_on)
return 0;
if (on) {
rc = gpio_request(HDMI_DDC_CLK_GPIO, "HDMI_DDC_CLK");
if (rc) {
pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
"HDMI_DDC_CLK", HDMI_DDC_CLK_GPIO, rc);
goto error1;
}
rc = gpio_request(HDMI_DDC_DATA_GPIO, "HDMI_DDC_DATA");
if (rc) {
pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
"HDMI_DDC_DATA", HDMI_DDC_DATA_GPIO, rc);
goto error2;
}
rc = gpio_request(HDMI_HPD_GPIO, "HDMI_HPD");
if (rc) {
pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
"HDMI_HPD", HDMI_HPD_GPIO, rc);
goto error3;
}
#ifdef CONFIG_HDMI_MVS
if (machine_is_apq8064_liquid()) {
rc = gpio_request(pmic_gpio14, "PMIC_HDMI_MUX_SEL");
if (rc) {
pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
"PMIC_HDMI_MUX_SEL", 14, rc);
goto error4;
}
gpio_set_value_cansleep(pmic_gpio14, 0);
}
pr_debug("%s(on): success\n", __func__);
#endif
} else {
gpio_free(HDMI_DDC_CLK_GPIO);
gpio_free(HDMI_DDC_DATA_GPIO);
gpio_free(HDMI_HPD_GPIO);
#ifdef CONFIG_HDMI_MVS
if (machine_is_apq8064_liquid()) {
gpio_set_value_cansleep(pmic_gpio14, 1);
gpio_free(pmic_gpio14);
}
#endif
pr_debug("%s(off): success\n", __func__);
}
prev_on = on;
return 0;
#ifdef CONFIG_HDMI_MVS
error4:
gpio_free(HDMI_HPD_GPIO);
#endif
error3:
gpio_free(HDMI_DDC_DATA_GPIO);
error2:
gpio_free(HDMI_DDC_CLK_GPIO);
error1:
return rc;
}
static int hdmi_cec_power(int on)
{
static int prev_on;
int rc;
if (on == prev_on)
return 0;
if (on) {
rc = gpio_request(HDMI_CEC_VAR_GPIO, "HDMI_CEC_VAR");
if (rc) {
pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
"HDMI_CEC_VAR", HDMI_CEC_VAR_GPIO, rc);
goto error;
}
pr_debug("%s(on): success\n", __func__);
} else {
gpio_free(HDMI_CEC_VAR_GPIO);
pr_debug("%s(off): success\n", __func__);
}
prev_on = on;
return 0;
error:
return rc;
}
#if defined (CONFIG_BACKLIGHT_LM3530)
extern void lm3530_lcd_backlight_set_level( int level);
#elif defined (CONFIG_BACKLIGHT_LM3533)
extern void lm3533_lcd_backlight_set_level( int level);
#elif defined (CONFIG_BACKLIGHT_LM3630)
extern void lm3630_lcd_backlight_set_level( int level);
#elif defined (CONFIG_BACKLIGHT_LM3532)
extern void lm3532_lcd_backlight_set_level( int level);
#elif defined (CONFIG_BACKLIGHT_I2C_BL)
extern void i2c_bl_lcd_backlight_set_level_export( int level);
#endif /* CONFIG_BACKLIGHT_LMXXXX */
static int mipi_lgit_backlight_level(int level, int max, int min)
{
#if defined (CONFIG_BACKLIGHT_LM3530)
lm3530_lcd_backlight_set_level(level);
#elif defined (CONFIG_BACKLIGHT_LM3533)
lm3533_lcd_backlight_set_level(level);
#elif defined (CONFIG_BACKLIGHT_LM3630)
lm3630_lcd_backlight_set_level(level);
#elif defined (CONFIG_BACKLIGHT_LM3532)
lm3532_lcd_backlight_set_level(level);
#elif defined (CONFIG_BACKLIGHT_I2C_BL)
i2c_bl_lcd_backlight_set_level_export(level);
#endif
return 0;
}
/* */
static char exit_sleep_mode [2] = {0x11,0x00};
static char display_on [2] = {0x29,0x00};
static char display_off [2] = {0x28,0x00};
static char enter_sleep_mode [2] = {0x10,0x00};
static char set_address_mode [2] = {0x36,0x40};
#define PF_16BIT 0x50
#define PF_18BIT 0x60
#define PF_24BIT 0x70
static char pixel_format [2] = {0x3A, PF_24BIT};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
/* Enable CABC block */
static char cabc_enable_LD083WU1 [2] = {0xb9, 0x01};
/* Disable CABC block */
static char cabc_disable_LD083WU1 [2] = {0xb9, 0x00};
/* Set PWM duty */
static char set_pwm_duty_LD083WU1 [2] = {0xbb, 0xff};
/* Enable CE algorithm */
//static char ce_enable_LD083WU1 [2] = {0xb8, 0x01};
/* Disable CE algorithm */
//static char ce_disable_LD083WU1 [2] = {0xb8, 0x00};
#endif
#ifdef CONFIG_SUPPORT_EVB2_BD
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
/* Disable bypass PWMI, Enable PWMO, Pol=High, 3 frame mask, Enable internal freq. gen. */
//static char cabc_reg0_LD089WU1 [2] = {0xe0, (1<<7)|(0<<6)|(1<<0)};
/* Disable bypass PWMI, Enable PWMO, Pol=High, 2 frame mask, Enable internal freq. gen. */
static char cabc_reg0_LD089WU1 [2] = {0xe0, (0<<7)|(0<<6)|(1<<5)|(2<<1)|(0<<0)};
/* PWMO freq. equal to internal gen. freq., PWM freq = 1.2KHz */
static char cabc_reg1_LD089WU1 [2] = {0xe1, (1<<7)|(1<<6)|(0<<4)|((20>>8)<<0)};
/* prd_divider[7:0]:20*prd_sel:1.2KHz */
static char cabc_reg2_LD089WU1 [2] = {0xe2, (20<<0)};
/* cabc_dither_enable:1: Enable CABC dither (for smooth), Modify_rgb:1:Modify RGB value according to PWM value */
static char cabc_reg4_LD089WU1 [2] = {0xe4, (1<<7)|(1<<6)};
/* Abrupt_threshold: 0:disable this function */
static char cabc_reg5_LD089WU1 [2] = {0xe5, 0};
/* Adjust_frame_rate:7, Adjust_step:7 */
static char cabc_reg6_LD089WU1 [2] = {0xe6, (7<<4)|(7<<0)};
/* Pwm_min:0x88 */
static char cabc_reg7_LD089WU1 [2] = {0xe7, 0x88};
/* Pwm_ref:0: by Pwm_set, en_inv_pwmo:0:PWMO */
static char cabc_reg8_LD089WU1 [2] = {0xe8, (0<<7)|(0<<4)};
/* Pwm_Set:45%, Maximum duty of PWMO when pwm_ref =0 */
static char cabc_reg9_LD089WU1 [2] = {0xe9, (unsigned char)(45*255/100)};
/* Average_ratio:0 : maximum Luminance, Allow_distort:255-(15)*2 */
static char cabc_rega_LD089WU1 [2] = {0xea, (0<<4)|(15<<0)};
/* Bypass_pwmi:0:disable, En_bfrm_maxpwm:0:disable */
static char cabc_regb_LD089WU1 [2] = {0xeb, (0<<6)|(0<<5)};
/* CABC_En:1:Enable */
static char cabc_reg0e_LD089WU1 [2] = {0x0e, (1<<3)};
/* Enter Page 0 access mode */
static char change_page0_mode_LD089WU1 [2] = {0xf3, 0xa0};
/* Enter Page 6 access mode */
static char change_page6_mode_LD089WU1 [2] = {0xf3, 0xa6};
/* Enter Page 7 access mode */
static char change_page7_mode_LD089WU1 [2] = {0xf3, 0xa7};
#if 0
static char cabc_lut1_LD089WU1 [65] = {0x00,
0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x18, 0x1C,
0x20, 0x24, 0x28, 0x2C, 0x30, 0x34, 0x38, 0x3C,
0x40, 0x44, 0x48, 0x4C, 0x50, 0x55, 0x59, 0x5D,
0x61, 0x65, 0x69, 0x6D, 0x71, 0x75, 0x79, 0x7D,
0x81, 0x85, 0x89, 0x8D, 0x91, 0x95, 0x99, 0x9D,
0xA1, 0xA5, 0xAA, 0xAE, 0xB2, 0xB6, 0xBA, 0xBE,
0xC2, 0xC6, 0xCA, 0xCE, 0xD2, 0xD6, 0xDA, 0xDE,
0xE2, 0xE6, 0xEA, 0xEE, 0xF2, 0xF6, 0xFA, 0xFF};
#endif
static char cabc_lut1_LD089WU1 [] = {0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
/* Enter DSI command mode */
static char change_dsi_cmd_mode_LD089WU1 [2] = {0x00, 0x00};
#endif
/* initialize device */
static struct dsi_cmd_desc lgit_power_on_set_1_LD089WU1[] = {
/* Display Initial Set */
{DTYPE_DCS_WRITE1, 1, 0, 0, 20, sizeof(set_address_mode),set_address_mode},
{DTYPE_DCS_WRITE1, 1, 0, 0, 20, sizeof(pixel_format),pixel_format},
};
static struct dsi_cmd_desc lgit_power_on_set_2_LD089WU1[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(exit_sleep_mode), exit_sleep_mode},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_on), display_on},
};
static struct dsi_cmd_desc lgit_power_off_set_LD089WU1[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 20, sizeof(display_off), display_off},
{DTYPE_DCS_WRITE, 1, 0, 0, 100, sizeof(enter_sleep_mode), enter_sleep_mode},
};
static struct dsi_cmd_desc lgit_shutdown_set_LD089WU1[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_off), display_off},
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
static struct dsi_cmd_desc lgit_power_on_set_3_LD089WU1[] = {
{DTYPE_GEN_WRITE2, 1, 0, 0, 20, sizeof(change_page0_mode_LD089WU1), change_page0_mode_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg0_LD089WU1), cabc_reg0_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg1_LD089WU1), cabc_reg1_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg2_LD089WU1), cabc_reg2_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg4_LD089WU1), cabc_reg4_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg5_LD089WU1), cabc_reg5_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg6_LD089WU1), cabc_reg6_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg7_LD089WU1), cabc_reg7_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg8_LD089WU1), cabc_reg8_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg9_LD089WU1), cabc_reg9_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_rega_LD089WU1), cabc_rega_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_regb_LD089WU1), cabc_regb_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(cabc_reg0e_LD089WU1), cabc_reg0e_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(change_page6_mode_LD089WU1), change_page6_mode_LD089WU1},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cabc_lut1_LD089WU1), cabc_lut1_LD089WU1},
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(change_page7_mode_LD089WU1), change_page7_mode_LD089WU1},
{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cabc_lut1_LD089WU1), cabc_lut1_LD089WU1},
{DTYPE_GEN_WRITE, 1, 0, 0, 20, 0, change_dsi_cmd_mode_LD089WU1},
};
#endif
static struct msm_panel_common_pdata mipi_lgit_pdata_LD089WU1 = {
.backlight_level = mipi_lgit_backlight_level,
.power_on_set_1 = lgit_power_on_set_1_LD089WU1,
.power_on_set_size_1 = ARRAY_SIZE(lgit_power_on_set_1_LD089WU1),
.power_on_set_2 = lgit_power_on_set_2_LD089WU1,
.power_on_set_size_2 = ARRAY_SIZE(lgit_power_on_set_2_LD089WU1),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.power_on_set_3 = lgit_power_on_set_3_LD089WU1,
.power_on_set_size_3 = ARRAY_SIZE(lgit_power_on_set_3_LD089WU1),
#endif
.power_off_set_1 = lgit_power_off_set_LD089WU1,
.power_off_set_size_1 = ARRAY_SIZE(lgit_power_off_set_LD089WU1),
.power_off_set_2 = lgit_shutdown_set_LD089WU1,
.power_off_set_size_2 = ARRAY_SIZE(lgit_shutdown_set_LD089WU1),
};
#endif
static struct dsi_cmd_desc lgit_power_on_set_1_LD083WU1[] = {
/* Display Initial Set */
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_address_mode),set_address_mode},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(pixel_format),pixel_format},
};
static struct dsi_cmd_desc lgit_power_on_set_2_LD083WU1[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 10, sizeof(exit_sleep_mode), exit_sleep_mode},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_on), display_on},
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
static struct dsi_cmd_desc lgit_power_on_set_3_LD083WU1[] = {
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_pwm_duty_LD083WU1),set_pwm_duty_LD083WU1},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(cabc_enable_LD083WU1),cabc_enable_LD083WU1},
//{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(ce_enable_LD083WU1),ce_enable_LD083WU1},
};
static struct dsi_cmd_desc lgit_power_on_set_3_LD083WU1_noCABC[] = {
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_pwm_duty_LD083WU1),set_pwm_duty_LD083WU1},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(cabc_enable_LD083WU1),cabc_disable_LD083WU1},
};
#endif
static struct dsi_cmd_desc lgit_power_off_set_LD083WU1[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_off), display_off},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(enter_sleep_mode), enter_sleep_mode},
};
static struct dsi_cmd_desc lgit_shutdown_set_LD083WU1[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_off), display_off},
};
static struct msm_panel_common_pdata mipi_lgit_pdata_LD083WU1 = {
.backlight_level = mipi_lgit_backlight_level,
.power_on_set_1 = lgit_power_on_set_1_LD083WU1,
.power_on_set_size_1 = ARRAY_SIZE(lgit_power_on_set_1_LD083WU1),
.power_on_set_2 = lgit_power_on_set_2_LD083WU1,
.power_on_set_size_2 = ARRAY_SIZE(lgit_power_on_set_2_LD083WU1),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.power_on_set_3 = lgit_power_on_set_3_LD083WU1,
.power_on_set_size_3 = ARRAY_SIZE(lgit_power_on_set_3_LD083WU1),
.power_on_set_3_noCABC = lgit_power_on_set_3_LD083WU1_noCABC,
.power_on_set_size_3_noCABC = ARRAY_SIZE(lgit_power_on_set_3_LD083WU1_noCABC),
#endif
.power_off_set_1 = lgit_power_off_set_LD083WU1,
.power_off_set_size_1 = ARRAY_SIZE(lgit_power_off_set_LD083WU1),
.power_off_set_2 = lgit_shutdown_set_LD083WU1,
.power_off_set_size_2 = ARRAY_SIZE(lgit_shutdown_set_LD083WU1),
};
static struct msm_panel_common_pdata mipi_lgit_pdata_LD083WU1_noCABC = {
.backlight_level = mipi_lgit_backlight_level,
.power_on_set_1 = lgit_power_on_set_1_LD083WU1,
.power_on_set_size_1 = ARRAY_SIZE(lgit_power_on_set_1_LD083WU1),
.power_on_set_2 = lgit_power_on_set_2_LD083WU1,
.power_on_set_size_2 = ARRAY_SIZE(lgit_power_on_set_2_LD083WU1),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.power_on_set_3 = lgit_power_on_set_3_LD083WU1_noCABC,
.power_on_set_size_3 = ARRAY_SIZE(lgit_power_on_set_3_LD083WU1_noCABC),
.power_on_set_3_noCABC = lgit_power_on_set_3_LD083WU1_noCABC,
.power_on_set_size_3_noCABC = ARRAY_SIZE(lgit_power_on_set_3_LD083WU1_noCABC),
#endif
.power_off_set_1 = lgit_power_off_set_LD083WU1,
.power_off_set_size_1 = ARRAY_SIZE(lgit_power_off_set_LD083WU1),
.power_off_set_2 = lgit_shutdown_set_LD083WU1,
.power_off_set_size_2 = ARRAY_SIZE(lgit_shutdown_set_LD083WU1),
};
static struct platform_device mipi_dsi_lgit_panel_device = {
.name = "mipi_lgit",
.id = 0,
.dev = {
.platform_data = &mipi_lgit_pdata_LD083WU1,
}
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
static struct platform_device mipi_dsi_lgit_panel_device_noCABC = {
.name = "mipi_lgit",
.id = 0,
.dev = {
.platform_data = &mipi_lgit_pdata_LD083WU1_noCABC,
}
};
#endif
/* */
static struct platform_device *awifi_panel_devices[] __initdata = {
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
&mipi_dsi_lgit_panel_device,
#endif
#ifdef CONFIG_LGE_KCAL
&kcal_platrom_device,
#endif
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
static struct platform_device *awifi_panel_devices_noCABC[] __initdata = {
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_PT) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WUXGA_INVERSE_PT)
&mipi_dsi_lgit_panel_device_noCABC,
#endif
#ifdef CONFIG_LGE_KCAL
&kcal_platrom_device,
#endif
};
#endif
void __init apq8064_init_fb(void)
{
hw_rev_type lge_board_rev;
platform_device_register(&msm_fb_device);
#ifndef CONFIG_MACH_LGE
platform_device_register(&lvds_chimei_panel_device);
#endif /* */
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
platform_device_register(&wfd_panel_device);
platform_device_register(&wfd_device);
#endif /* CONFIG_FB_MSM_WRITEBACK_MSM_PANEL */
/* */
lge_board_rev = lge_get_board_revno();
#ifdef CONFIG_SUPPORT_EVB2_BD
if (lge_board_rev == HW_REV_EVB2) {
lcd_power_sequence_delay = &lcd_power_sequence_delay_LD089WU1;
mipi_dsi_lgit_panel_device.dev.platform_data = &mipi_lgit_pdata_LD089WU1;
}
#endif
/* */
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
// [altev][bsp display], [email protected], 20131112 Enable CABC, regardless of hw_rev {
#if defined(CONFIG_MACH_APQ8064_ALTEV) && defined(CONFIG_MACH_APQ8064_AWIFI)
platform_add_devices(awifi_panel_devices,ARRAY_SIZE(awifi_panel_devices));
pr_warn("(%s) : Enable CABC.\n", __func__);
pr_debug("%d\n", ARRAY_SIZE(awifi_panel_devices_noCABC)); // compile error!!!, to avoid warning "defined but not used"
#else /* CONFIG_MACH_APQ8064_AWIFI */
if (lge_board_rev > HW_REV_1_0) {
platform_add_devices(awifi_panel_devices,ARRAY_SIZE(awifi_panel_devices));
} else {
platform_add_devices(awifi_panel_devices_noCABC,ARRAY_SIZE(awifi_panel_devices_noCABC));
}
#endif // CONFIG_MACH_APQ8064_ALTEV && CONFIG_MACH_APQ8064_AWIFI
// [altev][bsp display], [email protected], 20131112 Enable CABC, regardless of hw_rev }
#else
platform_add_devices(awifi_panel_devices,ARRAY_SIZE(awifi_panel_devices));
#endif
#ifndef CONFIG_MACH_LGE
if (machine_is_apq8064_liquid())
platform_device_register(&mipi_dsi2lvds_bridge_device);
if (machine_is_apq8064_mtp())
platform_device_register(&mipi_dsi_toshiba_panel_device);
if (machine_is_mpq8064_dtv())
platform_device_register(&lvds_frc_panel_device);
#endif /* */
msm_fb_register_device("mdp", &mdp_pdata);
#ifndef CONFIG_MACH_LGE
msm_fb_register_device("lvds", &lvds_pdata);
#endif /* */
msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
platform_device_register(&hdmi_msm_device);
msm_fb_register_device("dtv", &dtv_pdata);
}
/**
* Set MDP clocks to high frequency to avoid DSI underflow
* when using high resolution 1200x1920 WUXGA panels
*/
#ifndef CONFIG_MACH_LGE
static void set_mdp_clocks_for_wuxga(void)
{
mdp_ui_vectors[0].ab = 2000000000;
mdp_ui_vectors[0].ib = 2000000000;
mdp_vga_vectors[0].ab = 2000000000;
mdp_vga_vectors[0].ib = 2000000000;
mdp_720p_vectors[0].ab = 2000000000;
mdp_720p_vectors[0].ib = 2000000000;
mdp_1080p_vectors[0].ab = 2000000000;
mdp_1080p_vectors[0].ib = 2000000000;
if (apq8064_hdmi_as_primary_selected()) {
dtv_bus_def_vectors[0].ab = 2000000000;
dtv_bus_def_vectors[0].ib = 2000000000;
}
}
void __init apq8064_set_display_params(char *prim_panel, char *ext_panel,
unsigned char resolution)
{
/*
* For certain MPQ boards, HDMI should be set as primary display
* by default, with the flexibility to specify any other panel
* as a primary panel through boot parameters.
*/
if (machine_is_mpq8064_hrd() || machine_is_mpq8064_cdp()) {
pr_debug("HDMI is the primary display by default for MPQ\n");
if (!strnlen(prim_panel, PANEL_NAME_MAX_LEN))
strlcpy(msm_fb_pdata.prim_panel_name, HDMI_PANEL_NAME,
PANEL_NAME_MAX_LEN);
}
if (strnlen(prim_panel, PANEL_NAME_MAX_LEN)) {
strlcpy(msm_fb_pdata.prim_panel_name, prim_panel,
PANEL_NAME_MAX_LEN);
pr_debug("msm_fb_pdata.prim_panel_name %s\n",
msm_fb_pdata.prim_panel_name);
if (!strncmp((char *)msm_fb_pdata.prim_panel_name,
HDMI_PANEL_NAME, strnlen(HDMI_PANEL_NAME,
PANEL_NAME_MAX_LEN))) {
pr_debug("HDMI is the primary display by"
" boot parameter\n");
hdmi_is_primary = 1;
set_mdp_clocks_for_wuxga();
}
}
if (strnlen(ext_panel, PANEL_NAME_MAX_LEN)) {
strlcpy(msm_fb_pdata.ext_panel_name, ext_panel,
PANEL_NAME_MAX_LEN);
pr_debug("msm_fb_pdata.ext_panel_name %s\n",
msm_fb_pdata.ext_panel_name);
if (!strncmp((char *)msm_fb_pdata.ext_panel_name,
MHL_PANEL_NAME, strnlen(MHL_PANEL_NAME,
PANEL_NAME_MAX_LEN))) {
pr_debug("MHL is external display by boot parameter\n");
mhl_display_enabled = 1;
}
}
msm_fb_pdata.ext_resolution = resolution;
hdmi_msm_data.is_mhl_enabled = mhl_display_enabled;
}
#endif /* */
#define I2C_SURF 1
#define I2C_FFA (1 << 1)
#define I2C_RUMI (1 << 2)
#define I2C_SIM (1 << 3)
#define I2C_LIQUID (1 << 4)
struct i2c_registry {
u8 machs;
int bus;
struct i2c_board_info *info;
int len;
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
#define PWM_SIMPLE_EN 0xA0
#define PWM_BRIGHTNESS 0x20
#endif
struct backlight_platform_data {
void (*platform_init)(void);
int gpio;
unsigned int mode;
int max_current;
int init_on_boot;
int min_brightness;
int max_brightness;
int default_brightness;
int factory_brightness;
};
#if defined (CONFIG_BACKLIGHT_LM3530)
static struct backlight_platform_data lm3530_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.max_current = 0x17 | PWM_BRIGHTNESS,
#else
.max_current = 0x17,
#endif
.min_brightness = 0x01,
.max_brightness = 0x71,
};
#elif defined(CONFIG_BACKLIGHT_LM3533)
static struct backlight_platform_data lm3533_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.max_current = 0x17 | PWM_SIMPLE_EN,
#else
.max_current = 0x17,
#endif
.min_brightness = 0x05,
.max_brightness = 0xFF,
.default_brightness = 0x9C,
.factory_brightness = 0x78,
};
#elif defined(CONFIG_BACKLIGHT_LM3630)
static struct backlight_platform_data lm3630_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.max_current = 0x17 | PWM_SIMPLE_EN,
#else
.max_current = 0x17,
#endif
.min_brightness = 0x05,
.max_brightness = 0xFF,
.default_brightness = 0x9C,
.factory_brightness = 0x78,
};
#elif defined(CONFIG_BACKLIGHT_LM3532)
static struct backlight_platform_data lm3532_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
.max_current = 0x17 | PWM_SIMPLE_EN,
#else
.max_current = 0x17,
#endif
.min_brightness = 0x05,
.max_brightness = 0xFF,
.default_brightness = 0x9C,
.factory_brightness = 0x78,
};
#elif defined(CONFIG_BACKLIGHT_I2C_BL)
static char i2c_bl_mapped_lm3532_value[256] = {
119,119,119,119,119,119,119,119,124,124,124,124,124,124,124,
124,124,124,124,124,124,124,128,128,128,128,128,128,128,128,
128,128,128,128,128,131,131,131,131,131,131,131,135,135,138,
138,141,141,143,143,146,146,148,148,151,151,151,153,153,153,
153,155,155,155,155,157,157,159,159,159,161,161,162,162,162,
164,164,165,165,166,166,168,168,168,170,170,171,171,172,172,
173,173,174,174,176,176,177,178,179,180,180,181,181,182,183,
183,184,184,185,185,186,186,187,187,188,188,189,189,189,190,
190,190,191,191,192,193,193,194,194,195,195,196,197,197,198,
198,199,200,201,201,202,202,203,203,204,204,205,206,206,207,
208,209,210,211,212,213,214,215,215,215,215,216,216,216,217,
217,218,218,219,219,220,220,220,221,221,222,222,222,223,223,
224,224,225,225,226,226,227,227,228,228,228,229,229,230,230,
231,232,233,233,233,234,234,235,235,236,236,236,237,237,237,
238,238,239,239,240,240,241,241,242,242,243,243,243,244,244,
244,245,245,245,246,246,246,247,247,248,248,248,249,249,249,
250,250,250,251,251,251,252,252,253,253,253,254,254,254,254,
255,
};
static struct i2c_bl_cmd i2c_bl_init_lm3532_cmd[] = {
{0x10, 0x00, 0xff, "ILED1, ILED2, and ILED3 is controlled by Control A PWM and Control A Brightness Registers"},
{0x1d, 0x01, 0xff, "Enable LED A"},
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
{0x13, 0x06, 0xff, "Active PWM input is enabled in Zone 0, active high polarity, PWM2 is mapped to Control Bank A"},
{0x16, 0x01, 0xff, "Control A Zone Target 0, Exponential Mapping, I2C Current Control"},
#else
{0x16, 0x01, 0xff, "Control A Zone Target 0, Exponential Mapping, I2C Current Control"},
#endif // CABC apply
{0x17, 0x13, 0xff, "Full-Scale Current (20.2mA) of BANK A"},
};
static struct i2c_bl_cmd i2c_bl_deinit_lm3532_cmd[] = {
{0x1d, 0x00, 0xff, "Disable LED A"},
};
static struct i2c_bl_cmd i2c_bl_dump_lm3532_regs[] = {
{0x10, 0x00, 0xff, "Output Configuration Register"},
{0x11, 0x00, 0xff, "Startup/Shutdown Ramp Rate Register"},
{0x12, 0x00, 0xff, "Run Time Ramp Rate Register"},
{0x13, 0x00, 0xff, "Control A PWM Register"},
{0x14, 0x00, 0xff, "Control B PWM Register"},
{0x15, 0x00, 0xff, "Control C PWM Register"},
{0x16, 0x00, 0xff, "Control A Brightness Configuration Register"},
{0x18, 0x00, 0xff, "Control B Brightness Configuration Register"},
{0x1a, 0x00, 0xff, "Control C Brightness Configuration Register"},
{0x17, 0x00, 0xff, "Control A Full-Scale Current Registers"},
{0x19, 0x00, 0xff, "Control B Full-Scale Current Registers"},
{0x1b, 0x00, 0xff, "Control C Full-Scale Current Registers"},
{0x1c, 0x00, 0xff, "Feedback Enable Register"},
{0x1d, 0x00, 0xff, "Control Enable Register"},
{0x70, 0x00, 0xff, "Control A Zone Target Register 0 maps directly to Zone 0"},
{0x75, 0x00, 0xff, "Control B Zone Target Register 0 maps directly to Zone 0"},
{0x7a, 0x00, 0xff, "Control C Zone Target Register 0 maps directly to Zone 0"},
};
static struct i2c_bl_cmd i2c_bl_set_get_brightness_lm3532_cmds[] = {
{0x70, 0x00, 0xff, "Set/Get brightness"},
};
static struct i2c_bl_platform_data lm3532_i2c_bl_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
.i2c_addr = 0x38,
.min_brightness = 0x05,
.max_brightness = 0xFF,
.default_brightness = 0x9C,
.factory_brightness = 0x78,
.init_cmds = i2c_bl_init_lm3532_cmd,
.init_cmds_size = ARRAY_SIZE(i2c_bl_init_lm3532_cmd),
.deinit_cmds = i2c_bl_deinit_lm3532_cmd,
.deinit_cmds_size = ARRAY_SIZE(i2c_bl_deinit_lm3532_cmd),
.dump_regs = i2c_bl_dump_lm3532_regs,
.dump_regs_size = ARRAY_SIZE(i2c_bl_dump_lm3532_regs),
.set_brightness_cmds = i2c_bl_set_get_brightness_lm3532_cmds,
.set_brightness_cmds_size = ARRAY_SIZE(i2c_bl_set_get_brightness_lm3532_cmds),
.get_brightness_cmds = i2c_bl_set_get_brightness_lm3532_cmds,
.get_brightness_cmds_size = ARRAY_SIZE(i2c_bl_set_get_brightness_lm3532_cmds),
.blmap = i2c_bl_mapped_lm3532_value,
.blmap_size = ARRAY_SIZE(i2c_bl_mapped_lm3532_value),
};
#ifdef CONFIG_SUPPORT_EVB2_BD
static char i2c_bl_mapped_lp8556_value[256] = {
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, // 14
4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, // 29
5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, // 44
7, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 11, 11, 11, // 59
12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 16, 16, // 74
17, 17, 17, 18, 18, 18, 19, 19, 20, 21, 22, 22, 23, 24, 24, // 89
25, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, // 104
33, 34, 35, 35, 36, 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, // 119
44, 45, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, // 134
58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, // 149
73, 74, 75, 76, 76, 77, 78, 80, 81, 82, 83, 85, 86, 87, 88, // 164
89, 90, 91, 93, 95, 96, 97, 99,100,102,103,104,106,107,108, // 199
109,110,112,114,115,117,119,121,123,125,127,128,129,130,132, // 204
133,135,136,138,139,140,142,144,146,148,150,151,153,154,156, // 219
157,158,159,161,163,164,165,167,168,170,173,175,177,180,184, // 224
186,188,191,194,197,199,201,203,205,207,209,211,213,215,217, // 239
219,221,223,225,227,228,230,232,235,238,240,243,246,249,252, // 244
255
};
static struct i2c_bl_cmd i2c_bl_init_lp8556_cmd[] = {
#if defined(CONFIG_LGE_BACKLIGHT_CABC) && 0 /* Always disable CABC for 8.9" panel */
{0x01, 0x01|(0x01<<1), 0xff, "Device enable, PWM input and Brightness register (combined before shaper block)"},
#else
{0x01, 0x01|(0x02<<1), 0xff,"Device enable, Brightness register only"},
#endif
{0x16, 0x1f, 0xff, "LED string enable: 0 to 4"},
{0xa0, (((int)(17.5/20*4095)))&0xff, 0xff, "Current = 17.499"},
{0xa1, (0x0<<7)|(0x03<<4)|(((int)(17.5/20*4095))>>8), 0xff, "Current_Max = 20mA"},
{0xa2, 0x20, 0xff, "ISET_EN=0, BOOST_FSET_EN=0, UVLO_EN=1"},
{0x9e, 0x20, 0x20, "VBOOST_RANGE=0"},
{0xa5, (2<<4)|(0x08<<0), 0x7f,"PS_MODE[2:0] = 1,5 Phase, 5 drivers, PWM_FREQ=0x0F(38,464Hz), 0x08(15,626Hz)"},
};
static struct i2c_bl_cmd i2c_bl_deinit_lp8556_cmd[] = {
{0x01, 0x00, 0xff, "Device enable, Brightness register only"},
};
static struct i2c_bl_cmd i2c_bl_dump_lp8556_regs[] = {
{0x00, 0x00, 0xff, "Brightness Control register"},
{0x01, 0x00, 0xff, "Device Control register"},
{0x02, 0x00, 0xff, "Fault register"},
{0x03, 0x00, 0xff, "Identification register"},
{0x04, 0x00, 0xff, "Direct Control register"},
{0x16, 0x00, 0xff, "Temp LSB register"},
{0x98, 0x00, 0xff, "CFG98 register"},
{0x9e, 0x00, 0xff, "CFG9E register"},
{0xa0, 0x00, 0xff, "CFG0 register"},
{0xa1, 0x00, 0xff, "CFG1 register"},
{0xa2, 0x00, 0xff, "CFG2 register"},
{0xa3, 0x00, 0xff, "CFG3 register"},
{0xa4, 0x00, 0xff, "CFG4 register"},
{0xa5, 0x00, 0xff, "CFG5 register"},
{0xa6, 0x00, 0xff, "CFG6 register"},
{0xa7, 0x00, 0xff, "CFG7 register"},
{0xa9, 0x00, 0xff, "CFG9 register"},
{0xaa, 0x00, 0xff, "CFGA register"},
{0xae, 0x00, 0xff, "CFGE register"},
{0xaf, 0x00, 0xff, "CFGF register"},
};
static struct i2c_bl_cmd i2c_bl_set_get_brightness_lp8556_cmds[] = {
{0x00, 0x00, 0xff, "Brightness Control register"},
};
static struct i2c_bl_platform_data lp8556_i2c_bl_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
.i2c_addr = 0x2c,
.min_brightness = 0x05,
.max_brightness = 0xFF,
.default_brightness = 0x9C,
.factory_brightness = 0x78,
.init_cmds = i2c_bl_init_lp8556_cmd,
.init_cmds_size = ARRAY_SIZE(i2c_bl_init_lp8556_cmd),
.deinit_cmds = i2c_bl_deinit_lp8556_cmd,
.deinit_cmds_size = ARRAY_SIZE(i2c_bl_deinit_lp8556_cmd),
.dump_regs = i2c_bl_dump_lp8556_regs,
.dump_regs_size = ARRAY_SIZE(i2c_bl_dump_lp8556_regs),
.set_brightness_cmds = i2c_bl_set_get_brightness_lp8556_cmds,
.set_brightness_cmds_size = ARRAY_SIZE(i2c_bl_set_get_brightness_lp8556_cmds),
.get_brightness_cmds = i2c_bl_set_get_brightness_lp8556_cmds,
.get_brightness_cmds_size = ARRAY_SIZE(i2c_bl_set_get_brightness_lp8556_cmds),
.blmap = i2c_bl_mapped_lp8556_value,
.blmap_size = ARRAY_SIZE(i2c_bl_mapped_lp8556_value),
};
#endif
#endif
static struct i2c_board_info msm_i2c_backlight_info[] = {
#if defined(CONFIG_BACKLIGHT_LM3530)
{ I2C_BOARD_INFO("lm3530", 0x38), .platform_data = &lm3530_data, },
#elif defined(CONFIG_BACKLIGHT_LM3533)
{ I2C_BOARD_INFO("lm3533", 0x38), .platform_data = &lm3533_data, },
#elif defined(CONFIG_BACKLIGHT_LM3630)
{ I2C_BOARD_INFO("lm3630", 0x38), .platform_data = &lm3630_data, },
#elif defined(CONFIG_BACKLIGHT_LM3532)
{ I2C_BOARD_INFO("lm3532", 0x38), .platform_data = &lm3532_data, },
#elif defined(CONFIG_BACKLIGHT_I2C_BL)
{ I2C_BOARD_INFO("i2c_bl", 0x38), .platform_data = &lm3532_i2c_bl_data, },
#endif
};
static struct i2c_registry apq8064_i2c_backlight_device[] __initdata = {
{
I2C_SURF | I2C_FFA | I2C_RUMI | I2C_SIM | I2C_LIQUID,
APQ_8064_GSBI1_QUP_I2C_BUS_ID,
msm_i2c_backlight_info,
ARRAY_SIZE(msm_i2c_backlight_info),
},
};
void __init register_i2c_backlight_devices(void)
{
u8 mach_mask = 0;
int i;
/* Build the matching 'supported_machs' bitmask */
if (machine_is_apq8064_cdp())
mach_mask = I2C_SURF;
else if (machine_is_apq8064_mtp())
mach_mask = I2C_FFA;
else if (machine_is_apq8064_liquid())
mach_mask = I2C_LIQUID;
else if (machine_is_apq8064_rumi3())
mach_mask = I2C_RUMI;
else if (machine_is_apq8064_sim())
mach_mask = I2C_SIM;
else
pr_err("unmatched machine ID in register_i2c_devices\n");
/* */
#ifdef CONFIG_SUPPORT_EVB2_BD
#if defined(CONFIG_BACKLIGHT_I2C_BL)
if (lge_get_board_revno() == HW_REV_EVB2) {
struct i2c_board_info *board_info = msm_i2c_backlight_info;
while(board_info<&msm_i2c_backlight_info[ARRAY_SIZE(msm_i2c_backlight_info)]) {
if (strcmp(board_info->type, "i2c_bl") == 0) {
board_info->addr = lp8556_i2c_bl_data.i2c_addr;
board_info->platform_data = &lp8556_i2c_bl_data;
break;
}
}
}
#endif
#endif
/* */
/* Run the array and install devices as appropriate */
for (i = 0; i < ARRAY_SIZE(apq8064_i2c_backlight_device); ++i) {
if (apq8064_i2c_backlight_device[i].machs & mach_mask)
i2c_register_board_info(apq8064_i2c_backlight_device[i].bus,
apq8064_i2c_backlight_device[i].info,
apq8064_i2c_backlight_device[i].len);
}
}
| gpl-2.0 |
p500-ics-cm9/caf-msm-kernel | arch/arm/mach-vexpress/hotplug.c | 2521 | /*
* linux/arch/arm/mach-realview/hotplug.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/system.h>
extern volatile int pen_release;
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
flush_cache_all();
asm volatile(
"mcr p15, 0, %1, c7, c5, 0\n"
" mcr p15, 0, %1, c7, c10, 4\n"
/*
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
" bic %0, %0, %3\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
: "r" (0), "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
static inline void cpu_leave_lowpower(void)
{
unsigned int v;
asm volatile(
"mrc p15, 0, %0, c1, c0, 0\n"
" orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
: "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
/*
* there is no power-control hardware on this platform, so all
* we can do is put the core into WFI; this is safe as the calling
* code will have already disabled interrupts
*/
for (;;) {
wfi();
if (pen_release == cpu) {
/*
* OK, proper wakeup, we're done
*/
break;
}
/*
* Getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
* Just note it happening - when we're woken, we can report
* its occurrence.
*/
(*spurious)++;
}
}
int platform_cpu_kill(unsigned int cpu)
{
return 1;
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void platform_cpu_die(unsigned int cpu)
{
int spurious = 0;
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
if (spurious)
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
int platform_cpu_disable(unsigned int cpu)
{
/*
* we don't allow CPU 0 to be shutdown (it is still too special
* e.g. clock tick interrupts)
*/
return cpu == 0 ? -EPERM : 0;
}
| gpl-2.0 |
ffxijuggalo/darkstar | scripts/zones/RuLude_Gardens/npcs/Macchi_Gazlitah.lua | 1254 | -----------------------------------
-- Area: Ru'Lud Gardens
-- NPC: Macchi Gazlitah
-- Standard Mechant NPC
-- TODO: Add support for occasional stock.
-----------------------------------
local ID = require("scripts/zones/RuLude_Gardens/IDs")
require("scripts/globals/shop")
-----------------------------------
function onTrade(player,npc,trade)
end
function onTrigger(player,npc)
local stock =
{
5703, 100, -- Uleguerand Milk
5684, 250, -- Chalaimbille
17905, 100, -- Wormy Broth
--[[
5686, 800, -- Cheese Sandwich
5729, 3360, -- Bavarois
5718, 1300, -- Cream Puff
461, 5000, -- Buffalo Milk Case
5152, 1280, -- Buffalo Meat
4722, 31878, -- Enfire II
4723, 30492, -- Enblizzard II
4724, 27968, -- Enaero II
4725, 26112, -- Enstone II
4726, 25600, -- Enthunder II
4727, 33000, -- Enwater II
4850, 150000, -- Refresh II
]]--
}
player:showText(npc, ID.text.MACCHI_GAZLITAH_SHOP_DIALOG1)
dsp.shop.general(player, stock, JEUNO)
end
function onEventUpdate(player,csid,option)
end
function onEventFinish(player,csid,option)
end
| gpl-3.0 |
barakav/robomongo | src/third-party/mongodb/jstests/org.js | 395 | // SERVER-2282 $or de duping with sparse indexes
t = db.jstests_org;
t.drop();
t.ensureIndex( {a:1}, {sparse:true} );
t.ensureIndex( {b:1} );
t.remove();
t.save( {a:1,b:2} );
assert.eq( 1, t.count( {$or:[{a:1},{b:2}]} ) );
t.remove();
t.save( {a:null,b:2} );
assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
t.remove();
t.save( {b:2} );
assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
| gpl-3.0 |
odin1314/android | cSploit/src/org/csploit/android/tools/NetworkRadar.java | 2001 | /*
* This file is part of the cSploit.
*
* Copyleft of Massimo Dragano aka tux_mind <[email protected]>
*
* cSploit is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* cSploit is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with cSploit. If not, see <http://www.gnu.org/licenses/>.
*/
package org.csploit.android.tools;
import org.csploit.android.core.ChildManager;
import org.csploit.android.core.System;
import org.csploit.android.core.Child;
import org.csploit.android.core.Logger;
import org.csploit.android.events.Event;
import org.csploit.android.events.Host;
import org.csploit.android.events.HostLost;
import java.net.InetAddress;
public class NetworkRadar extends Tool {
public NetworkRadar() {
mHandler = "network-radar";
mCmdPrefix = null;
}
public static abstract class HostReceiver extends Child.EventReceiver {
public abstract void onHostFound(byte[] macAddress, InetAddress ipAddress, String name);
public abstract void onHostLost(InetAddress ipAddress);
public void onEvent(Event e) {
if ( e instanceof Host ) {
Host h = (Host)e;
onHostFound(h.ethAddress, h.ipAddress, h.name);
} else if ( e instanceof HostLost ) {
onHostLost(((HostLost)e).ipAddress);
} else {
Logger.error("Unknown event: " + e);
}
}
}
public Child start(HostReceiver receiver) throws ChildManager.ChildNotStartedException {
String ifName;
ifName = System.getNetwork().getInterface().getDisplayName();
return async(ifName, receiver);
}
}
| gpl-3.0 |
UnlimitedFreedom/UF-WorldEdit | worldedit-core/src/main/java/com/sk89q/worldedit/function/pattern/BlockPattern.java | 1799 | /*
* WorldEdit, a Minecraft world manipulation toolkit
* Copyright (C) sk89q <http://www.sk89q.com>
* Copyright (C) WorldEdit team and contributors
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.sk89q.worldedit.function.pattern;
import com.sk89q.worldedit.Vector;
import com.sk89q.worldedit.blocks.BaseBlock;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* A pattern that returns the same {@link BaseBlock} each time.
*/
public class BlockPattern extends AbstractPattern {
private BaseBlock block;
/**
* Create a new pattern with the given block.
*
* @param block the block
*/
public BlockPattern(BaseBlock block) {
setBlock(block);
}
/**
* Get the block.
*
* @return the block that is always returned
*/
public BaseBlock getBlock() {
return block;
}
/**
* Set the block that is returned.
*
* @param block the block
*/
public void setBlock(BaseBlock block) {
checkNotNull(block);
this.block = block;
}
@Override
public BaseBlock apply(Vector position) {
return block;
}
}
| gpl-3.0 |
anryko/ansible | lib/ansible/modules/cloud/google/gcp_sql_user_info.py | 6527 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_user_info
description:
- Gather info for GCP User
short_description: Gather info for GCP User
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource
}}"'
required: true
type: dict
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a user
gcp_sql_user_info:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
host:
description:
- The host name from which the user can connect. For insert operations, host
defaults to an empty string. For update operations, host is specified as part
of the request URL. The host name cannot be updated after insertion.
returned: success
type: str
name:
description:
- The name of the user in the Cloud SQL instance.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: dict
password:
description:
- The password for the user.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'sql')
return auth.list(link, return_if_object, array_name='items')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
javastraat/arduino | libraries/MD_Parola/doc/html/search/defines_70.js | 482 | var searchData=
[
['print',['PRINT',['../_m_d___parola__lib_8h.html#a1696fc35fb931f8c876786fbc1078ac4',1,'MD_Parola_lib.h']]],
['print_5fstate',['PRINT_STATE',['../_m_d___parola__lib_8h.html#a3fda4e1a5122a16a21bd96ae5217402c',1,'MD_Parola_lib.h']]],
['prints',['PRINTS',['../_m_d___parola__lib_8h.html#ad68f35c3cfe67be8d09d1cea8e788e13',1,'MD_Parola_lib.h']]],
['printx',['PRINTX',['../_m_d___parola__lib_8h.html#abf55b44e8497cbc3addccdeb294138cc',1,'MD_Parola_lib.h']]]
];
| gpl-3.0 |
timborden/browser-laptop | app/common/constants/extensionConstants.js | 633 | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
const mapValuesByKeys = require('../../../js/lib/functional').mapValuesByKeys
const _ = null
const ExtensionConstants = {
BROWSER_ACTION_REGISTERED: _,
BROWSER_ACTION_UPDATED: _,
EXTENSION_INSTALLED: _,
EXTENSION_UNINSTALLED: _,
EXTENSION_ENABLED: _,
EXTENSION_DISABLED: _,
CONTEXT_MENU_CREATED: _,
CONTEXT_MENU_ALL_REMOVED: _,
CONTEXT_MENU_CLICKED: _
}
module.exports = mapValuesByKeys(ExtensionConstants)
| mpl-2.0 |
hurie/thebuggenie | modules/publish/templates/_manualsidebarlink.inc.php | 2024 | <?php
$children = $main_article->getChildArticles();
$is_parent = in_array($main_article->getID(), $parents);
$is_selected = $main_article->getID() == $article->getID() || ($main_article->isRedirect() && $main_article->getRedirectArticleName() == $article->getTitle());
$is_first = $first;
$first = false;
$project_key = (\thebuggenie\core\framework\Context::isProjectContext()) ? \thebuggenie\core\framework\Context::getCurrentProject()->getKey() . ':' : '';
// $article_name = (strpos(mb_strtolower($main_article->getTitle()), 'category:') !== false) ? substr($main_article->getTitle(), 9+mb_strlen($project_key)) : substr($main_article->getTitle(), mb_strlen($project_key));
?>
<li class="<?php echo (isset($level) && $level >= 1) ? 'child' : 'parent'; ?> <?php if ($is_parent && !$is_selected) echo 'parent'; ?> <?php if ($is_selected) echo 'selected'; ?> level_<?php echo $level; ?>" id="article_sidebar_link_<?php echo $article->getID(); ?>">
<?php if (isset($level) && $level >= 1) echo image_tag('icon_tree_child.png', array('class' => 'branch')); ?>
<?php if ($is_first && $main_article->getArticleType() == \thebuggenie\modules\publish\entities\Article::TYPE_MANUAL): ?>
<?php echo image_tag('icon-article-type-manual.small.png'); ?>
<?php else: ?>
<?php echo (!empty($children)) ? image_tag('icon_folder.png', array(), false, 'publish') : image_tag('icon_article.png', array(), false, 'publish'); ?>
<?php endif; ?>
<?php echo link_tag(make_url('publish_article', array('article_name' => $main_article->getName())), $main_article->getManualName()); ?>
<?php if ($is_parent || $is_selected): ?>
<ul>
<?php foreach ($children as $child_article): ?>
<?php include_component('publish/manualsidebarlink', array('parents' => $parents, 'first' => $first, 'article' => $article, 'main_article' => $child_article, 'level' => $level + 1)); ?>
<?php endforeach; ?>
</ul>
<?php endif; ?>
</li>
| mpl-2.0 |
genome/genome | lib/perl/Genome/VariantReporting/Framework/Component/Expert/Command.pm | 4978 | package Genome::VariantReporting::Framework::Component::Expert::Command;
use strict;
use warnings FATAL => 'all';
use Genome;
use Set::Scalar;
use JSON;
use Params::Validate qw(validate validate_pos :types);
use List::MoreUtils qw(apply);
my $_JSON_CODEC = new JSON->allow_nonref;
use Genome::VariantReporting::Framework::FileLookup qw(
is_file
calculate_lookup
);
class Genome::VariantReporting::Framework::Component::Expert::Command {
is_abstract => 1,
is => ['Genome::Command::DelegatesToResult', 'Genome::VariantReporting::Framework::Component::WithTranslatedInputs'],
attributes_have => {
is_planned => {
is => "Boolean",
default => 0,
},
},
has_structural_input => [
input_vcf => {
is => 'Path',
},
variant_type => {
is => 'Text',
valid_values => ['snvs', 'indels'],
doc => "The type of variant the input_result represents",
},
process_id => {
is => 'Text',
},
plan_json => {
is => 'Text',
}
],
has_transient_structural_optional => [
requestor => {
is => 'Genome::Process',
id_by => 'process_id',
},
],
has_optional_structural_output => [
output_vcf => {
is => 'Path',
},
],
};
sub name {
die "Abstract";
}
sub resolve_plan_attributes {
my $self = shift;
my $variant_reporting_plan = $self->plan;
my $specific_plan = $variant_reporting_plan->get_plan('expert', $self->name);
while (my ($name, $value) = each %{$specific_plan->run_params}) {
$self->$name($value);
}
return;
}
sub plan {
my $self = shift;
return Genome::VariantReporting::Framework::Plan::MasterPlan->create_from_json($self->plan_json);
}
sub planned_names {
my $self = shift;
my @properties = $self->__meta__->properties(is_planned => 1);
return Set::Scalar->new(map {$_->property_name} @properties);
}
sub planned_required_names {
my $self = shift;
my @properties = $self->__meta__->properties(is_planned => 1, is_optional => 0);
return Set::Scalar->new(map {$_->property_name} @properties);
}
sub __planned_errors__ {
my ($self, $params) = validate_pos(@_, 1, 1);
my $needed = $self->planned_required_names;
return Genome::VariantReporting::Framework::Utility::get_missing_errors($self->class, $params, $needed, "Parameters", "run"),
$self->_get_extra_errors($params);
}
sub _get_extra_errors {
my ($self, $params) = validate_pos(@_, 1, 1);
my $needed = $self->planned_names;
my $have = Set::Scalar->new(keys %{$params});
my @errors;
unless($needed->is_equal($have)) {
if (my $not_needed = $have - $needed) {
push @errors, UR::Object::Tag->create(
type => 'error',
properties => [$not_needed->members],
desc => sprintf("Parameters provided but not required by expert (%s): (%s)",
$self->class, join(",", $not_needed->members)),
);
}
}
return @errors;
}
sub result_class {
my $self = shift;
my $class = $self->class;
die "Abstract method 'result_class' must be defined in class $class";
}
sub post_get_or_create {
my $self = shift;
$self->output_vcf($self->output_result->output_file_path);
return 1;
}
sub input_names {
my $self = shift;
return ($self->is_many_input_names, $self->is_not_many_input_names);
}
sub is_many_input_names {
my $self = shift;
return apply {s/_lookup$//} $self->result_class->is_many_property_names;
}
sub is_not_many_input_names {
my $self = shift;
return apply {s/_lookup$//} $self->result_class->is_not_many_property_names;
}
sub input_hash {
my $self = shift;
$self->resolve_plan_attributes;
my %hash;
for my $input_name ($self->is_many_input_names) {
next unless $self->can($input_name);
my $value = [$self->$input_name];
$hash{$input_name} = $value;
if (is_file($value->[0])) {
$hash{$input_name . '_lookup'} = [map {calculate_lookup($_)} @{$value}];
}
}
for my $input_name ($self->is_not_many_input_names) {
next unless $self->can($input_name);
my $value = $self->$input_name;
if (is_hashref($value)) {
$hash{$input_name} = json_encode($value);
} else {
$hash{$input_name} = $value;
}
if (is_file($value)) {
$hash{$input_name . '_lookup'} = calculate_lookup($self->$input_name);
}
}
$hash{test_name} = Genome::Config::get('software_result_test_name');
return %hash;
}
sub is_hashref {
my $value = shift;
if (ref $value eq 'HASH') {
return 1;
} else {
return 0;
}
}
sub json_encode {
my $value = shift;
return $_JSON_CODEC->canonical->encode($value);
}
| lgpl-3.0 |
vinodkc/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala | 34491 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.scalatest.Assertions._
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete, Count, Max}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.{Cross, LeftOuter, RightOuter}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData, MapData}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
private[sql] case class GroupableData(data: Int) {
def getData: Int = data
}
private[sql] class GroupableUDT extends UserDefinedType[GroupableData] {
override def sqlType: DataType = IntegerType
override def serialize(groupableData: GroupableData): Int = groupableData.data
override def deserialize(datum: Any): GroupableData = {
datum match {
case data: Int => GroupableData(data)
}
}
override def userClass: Class[GroupableData] = classOf[GroupableData]
private[spark] override def asNullable: GroupableUDT = this
}
private[sql] case class UngroupableData(data: Map[Int, Int]) {
def getData: Map[Int, Int] = data
}
private[sql] class UngroupableUDT extends UserDefinedType[UngroupableData] {
override def sqlType: DataType = MapType(IntegerType, IntegerType)
override def serialize(ungroupableData: UngroupableData): MapData = {
val keyArray = new GenericArrayData(ungroupableData.data.keys.toSeq)
val valueArray = new GenericArrayData(ungroupableData.data.values.toSeq)
new ArrayBasedMapData(keyArray, valueArray)
}
override def deserialize(datum: Any): UngroupableData = {
datum match {
case data: MapData =>
val keyArray = data.keyArray().array
val valueArray = data.valueArray().array
assert(keyArray.length == valueArray.length)
val mapData = keyArray.zip(valueArray).toMap.asInstanceOf[Map[Int, Int]]
UngroupableData(mapData)
}
}
override def userClass: Class[UngroupableData] = classOf[UngroupableData]
private[spark] override def asNullable: UngroupableUDT = this
}
case class TestFunction(
children: Seq[Expression],
inputTypes: Seq[AbstractDataType])
extends Expression with ImplicitCastInputTypes with Unevaluable {
override def nullable: Boolean = true
override def dataType: DataType = StringType
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression =
copy(children = newChildren)
}
case class UnresolvedTestPlan() extends LeafNode {
override lazy val resolved = false
override def output: Seq[Attribute] = Nil
}
class AnalysisErrorSuite extends AnalysisTest {
import TestRelations._
def errorTest(
name: String,
plan: LogicalPlan,
errorMessages: Seq[String],
caseSensitive: Boolean = true): Unit = {
test(name) {
assertAnalysisError(plan, errorMessages, caseSensitive)
}
}
def errorClassTest(
name: String,
plan: LogicalPlan,
errorClass: String,
messageParameters: Array[String]): Unit = {
test(name) {
assertAnalysisErrorClass(plan, errorClass, messageParameters)
}
}
val dateLit = Literal.create(null, DateType)
errorTest(
"scalar subquery with 2 columns",
testRelation.select(
(ScalarSubquery(testRelation.select($"a", dateLit.as("b"))) + Literal(1)).as("a")),
"Scalar subquery must return only one column, but got 2" :: Nil)
errorTest(
"scalar subquery with no column",
testRelation.select(ScalarSubquery(LocalRelation()).as("a")),
"Scalar subquery must return only one column, but got 0" :: Nil)
errorTest(
"single invalid type, single arg",
testRelation.select(TestFunction(dateLit :: Nil, IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE))" :: "argument 1" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"single invalid type, second arg",
testRelation.select(
TestFunction(dateLit :: dateLit :: Nil, DateType :: IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" ::
"argument 2" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"multiple invalid type",
testRelation.select(
TestFunction(dateLit :: dateLit :: Nil, IntegerType :: IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" ::
"argument 1" :: "argument 2" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"invalid window function",
testRelation2.select(
WindowExpression(
Literal(0),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"not supported within a window function" :: Nil)
errorTest(
"distinct aggregate function in window",
testRelation2.select(
WindowExpression(
AggregateExpression(Count(UnresolvedAttribute("b")), Complete, isDistinct = true),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"Distinct window functions are not supported" :: Nil)
errorTest(
"window aggregate function with filter predicate",
testRelation2.select(
WindowExpression(
AggregateExpression(
Count(UnresolvedAttribute("b")),
Complete,
isDistinct = false,
filter = Some(UnresolvedAttribute("b") > 1)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"window aggregate function with filter predicate is not supported" :: Nil
)
errorTest(
"distinct function",
CatalystSqlParser.parsePlan("SELECT hex(DISTINCT a) FROM TaBlE"),
"Function hex does not support DISTINCT" :: Nil)
errorTest(
"non aggregate function with filter predicate",
CatalystSqlParser.parsePlan("SELECT hex(a) FILTER (WHERE c = 1) FROM TaBlE2"),
"Function hex does not support FILTER clause" :: Nil)
errorTest(
"distinct window function",
CatalystSqlParser.parsePlan("SELECT percent_rank(DISTINCT a) OVER () FROM TaBlE"),
"Function percent_rank does not support DISTINCT" :: Nil)
errorTest(
"window function with filter predicate",
CatalystSqlParser.parsePlan("SELECT percent_rank(a) FILTER (WHERE c > 1) OVER () FROM TaBlE2"),
"Function percent_rank does not support FILTER clause" :: Nil)
errorTest(
"higher order function with filter predicate",
CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " +
"FILTER (WHERE c > 1)"),
"Function aggregate does not support FILTER clause" :: Nil)
errorTest(
"non-deterministic filter predicate in aggregate functions",
CatalystSqlParser.parsePlan("SELECT count(a) FILTER (WHERE rand(int(c)) > 1) FROM TaBlE2"),
"FILTER expression is non-deterministic, it cannot be used in aggregate functions" :: Nil)
errorTest(
"function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT hex(a) IGNORE NULLS FROM TaBlE2"),
"Function hex does not support IGNORE NULLS" :: Nil)
errorTest(
"some window function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT percent_rank(a) IGNORE NULLS FROM TaBlE2"),
"Function percent_rank does not support IGNORE NULLS" :: Nil)
errorTest(
"aggregate function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT count(a) IGNORE NULLS FROM TaBlE2"),
"Function count does not support IGNORE NULLS" :: Nil)
errorTest(
"higher order function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " +
"IGNORE NULLS"), "Function aggregate does not support IGNORE NULLS" :: Nil)
errorTest(
"nested aggregate functions",
testRelation.groupBy($"a")(
AggregateExpression(
Max(AggregateExpression(Count(Literal(1)), Complete, isDistinct = false)),
Complete,
isDistinct = false)),
"not allowed to use an aggregate function in the argument of another aggregate function." :: Nil
)
errorTest(
"offset window function",
testRelation2.select(
WindowExpression(
new Lead(UnresolvedAttribute("b")),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RangeFrame, Literal(1), Literal(2)))).as("window")),
"Cannot specify window frame for lead function" :: Nil)
errorTest(
"the offset of nth_value window function is negative or zero",
testRelation2.select(
WindowExpression(
new NthValue(AttributeReference("b", IntegerType)(), Literal(0)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")),
"The 'offset' argument of nth_value must be greater than zero but it is 0." :: Nil)
errorTest(
"the offset of nth_value window function is not int literal",
testRelation2.select(
WindowExpression(
new NthValue(AttributeReference("b", IntegerType)(), Literal(true)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")),
"argument 2 requires int type, however, 'true' is of boolean type." :: Nil)
errorTest(
"too many generators",
listRelation.select(Explode($"list").as("a"), Explode($"list").as("b")),
"only one generator" :: "explode" :: Nil)
errorClassTest(
"unresolved attributes",
testRelation.select($"abcd"),
"MISSING_COLUMN",
Array("abcd", "a"))
errorClassTest(
"unresolved attributes with a generated name",
testRelation2.groupBy($"a")(max($"b"))
.where(sum($"b") > 0)
.orderBy($"havingCondition".asc),
"MISSING_COLUMN",
Array("havingCondition", "max(b)"))
errorTest(
"unresolved star expansion in max",
testRelation2.groupBy($"a")(sum(UnresolvedStar(None))),
"Invalid usage of '*'" :: "in expression 'sum'" :: Nil)
errorTest(
"sorting by unsupported column types",
mapRelation.orderBy($"map".asc),
"sort" :: "type" :: "map<int,int>" :: Nil)
errorClassTest(
"sorting by attributes are not from grouping expressions",
testRelation2.groupBy($"a", $"c")($"a", $"c", count($"a").as("a3")).orderBy($"b".asc),
"MISSING_COLUMN",
Array("b", "a, c, a3"))
errorTest(
"non-boolean filters",
testRelation.where(Literal(1)),
"filter" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil)
errorTest(
"non-boolean join conditions",
testRelation.join(testRelation, condition = Some(Literal(1))),
"condition" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil)
errorTest(
"missing group by",
testRelation2.groupBy($"a")($"b"),
"'b'" :: "group by" :: Nil
)
errorTest(
"ambiguous field",
nestedRelation.select($"top.duplicateField"),
"Ambiguous reference to fields" :: "duplicateField" :: Nil,
caseSensitive = false)
errorTest(
"ambiguous field due to case insensitivity",
nestedRelation.select($"top.differentCase"),
"Ambiguous reference to fields" :: "differentCase" :: "differentcase" :: Nil,
caseSensitive = false)
errorTest(
"missing field",
nestedRelation2.select($"top.c"),
"No such struct field" :: "aField" :: "bField" :: "cField" :: Nil,
caseSensitive = false)
errorTest(
"catch all unresolved plan",
UnresolvedTestPlan(),
"unresolved" :: Nil)
errorTest(
"union with unequal number of columns",
testRelation.union(testRelation2),
"union" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"intersect with unequal number of columns",
testRelation.intersect(testRelation2, isAll = false),
"intersect" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"except with unequal number of columns",
testRelation.except(testRelation2, isAll = false),
"except" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"union with incompatible column types",
testRelation.union(nestedRelation),
"union" :: "the compatible column types" :: Nil)
errorTest(
"union with a incompatible column type and compatible column types",
testRelation3.union(testRelation4),
"union" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"intersect with incompatible column types",
testRelation.intersect(nestedRelation, isAll = false),
"intersect" :: "the compatible column types" :: Nil)
errorTest(
"intersect with a incompatible column type and compatible column types",
testRelation3.intersect(testRelation4, isAll = false),
"intersect" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"except with incompatible column types",
testRelation.except(nestedRelation, isAll = false),
"except" :: "the compatible column types" :: Nil)
errorTest(
"except with a incompatible column type and compatible column types",
testRelation3.except(testRelation4, isAll = false),
"except" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorClassTest(
"SPARK-9955: correct error message for aggregate",
// When parse SQL string, we will wrap aggregate expressions with UnresolvedAlias.
testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))),
"MISSING_COLUMN",
Array("bad_column", "a, b, c, d, e"))
errorTest(
"slide duration greater than window in time window",
testRelation2.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "2 second", "0 second").as("window")),
s"The slide duration " :: " must be less than or equal to the windowDuration " :: Nil
)
errorTest(
"start time greater than slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 minute").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"start time equal to slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 second").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"SPARK-21590: absolute value of start time greater than slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 minute").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"SPARK-21590: absolute value of start time equal to slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 second").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"negative window duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "-1 second", "1 second", "0 second").as("window")),
"The window duration " :: " must be greater than 0." :: Nil
)
errorTest(
"zero window duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "0 second", "1 second", "0 second").as("window")),
"The window duration " :: " must be greater than 0." :: Nil
)
errorTest(
"negative slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "-1 second", "0 second").as("window")),
"The slide duration " :: " must be greater than 0." :: Nil
)
errorTest(
"zero slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "0 second", "0 second").as("window")),
"The slide duration" :: " must be greater than 0." :: Nil
)
errorTest(
"generator nested in expressions",
listRelation.select(Explode($"list") + 1),
"Generators are not supported when it's nested in expressions, but got: (explode(list) + 1)"
:: Nil
)
errorTest(
"SPARK-30998: unsupported nested inner generators",
{
val nestedListRelation = LocalRelation(
AttributeReference("nestedList", ArrayType(ArrayType(IntegerType)))())
nestedListRelation.select(Explode(Explode($"nestedList")))
},
"Generators are not supported when it's nested in expressions, but got: " +
"explode(explode(nestedList))" :: Nil
)
errorTest(
"SPARK-30998: unsupported nested inner generators for aggregates",
testRelation.select(Explode(Explode(
CreateArray(CreateArray(min($"a") :: max($"a") :: Nil) :: Nil)))),
"Generators are not supported when it's nested in expressions, but got: " +
"explode(explode(array(array(min(a), max(a)))))" :: Nil
)
errorTest(
"generator nested in expressions for aggregates",
testRelation.select(Explode(CreateArray(min($"a") :: max($"a") :: Nil)) + 1),
"Generators are not supported when it's nested in expressions, but got: " +
"(explode(array(min(a), max(a))) + 1)" :: Nil
)
errorTest(
"generator appears in operator which is not Project",
listRelation.sortBy(Explode($"list").asc),
"Generators are not supported outside the SELECT clause, but got: Sort" :: Nil
)
errorTest(
"an evaluated limit class must not be null",
testRelation.limit(Literal(null, IntegerType)),
"The evaluated limit expression must not be null, but got " :: Nil
)
errorTest(
"num_rows in limit clause must be equal to or greater than 0",
listRelation.limit(-1),
"The limit expression must be equal to or greater than 0, but got -1" :: Nil
)
errorTest(
"more than one generators in SELECT",
listRelation.select(Explode($"list"), Explode($"list")),
"Only one generator allowed per select clause but found 2: explode(list), explode(list)" :: Nil
)
errorTest(
"more than one generators for aggregates in SELECT",
testRelation.select(Explode(CreateArray(min($"a") :: Nil)),
Explode(CreateArray(max($"a") :: Nil))),
"Only one generator allowed per select clause but found 2: " +
"explode(array(min(a))), explode(array(max(a)))" :: Nil
)
test("SPARK-6452 regression test") {
// CheckAnalysis should throw AnalysisException when Aggregate contains missing attribute(s)
// Since we manually construct the logical plan at here and Sum only accept
// LongType, DoubleType, and DecimalType. We use LongType as the type of a.
val attrA = AttributeReference("a", LongType)(exprId = ExprId(1))
val otherA = AttributeReference("a", LongType)(exprId = ExprId(2))
val attrC = AttributeReference("c", LongType)(exprId = ExprId(3))
val aliases = Alias(sum(attrA), "b")() :: Alias(sum(attrC), "d")() :: Nil
val plan = Aggregate(
Nil,
aliases,
LocalRelation(otherA))
assert(plan.resolved)
val resolved = s"${attrA.toString},${attrC.toString}"
val errorMsg = s"Resolved attribute(s) $resolved missing from ${otherA.toString} " +
s"in operator !Aggregate [${aliases.mkString(", ")}]. " +
s"Attribute(s) with the same name appear in the operation: a. " +
"Please check if the right attribute(s) are used."
assertAnalysisError(plan, errorMsg :: Nil)
}
test("error test for self-join") {
val join = Join(testRelation, testRelation, Cross, None, JoinHint.NONE)
val error = intercept[AnalysisException] {
SimpleAnalyzer.checkAnalysis(join)
}
assert(error.message.contains("Failure when resolving conflicting references in Join"))
assert(error.message.contains("Conflicting attributes"))
}
test("check grouping expression data types") {
def checkDataType(dataType: DataType, shouldSuccess: Boolean): Unit = {
val plan =
Aggregate(
AttributeReference("a", dataType)(exprId = ExprId(2)) :: Nil,
Alias(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1))), "c")() :: Nil,
LocalRelation(
AttributeReference("a", dataType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
if (shouldSuccess) {
assertAnalysisSuccess(plan, true)
} else {
assertAnalysisError(plan, "expression a cannot be used as a grouping expression" :: Nil)
}
}
val supportedDataTypes = Seq(
StringType, BinaryType,
NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", StringType, nullable = true),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", ArrayType(BooleanType, containsNull = true), nullable = true),
new GroupableUDT())
supportedDataTypes.foreach { dataType =>
checkDataType(dataType, shouldSuccess = true)
}
val unsupportedDataTypes = Seq(
MapType(StringType, LongType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", MapType(StringType, LongType), nullable = true),
new UngroupableUDT())
unsupportedDataTypes.foreach { dataType =>
checkDataType(dataType, shouldSuccess = false)
}
}
test("we should fail analysis when we find nested aggregate functions") {
val plan =
Aggregate(
AttributeReference("a", IntegerType)(exprId = ExprId(2)) :: Nil,
Alias(sum(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1)))), "c")() :: Nil,
LocalRelation(
AttributeReference("a", IntegerType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
assertAnalysisError(
plan,
"It is not allowed to use an aggregate function in the argument of " +
"another aggregate function." :: Nil)
}
test("Join can work on binary types but can't work on map types") {
val left = LocalRelation(Symbol("a").binary, Symbol("b").map(StringType, StringType))
val right = LocalRelation(Symbol("c").binary, Symbol("d").map(StringType, StringType))
val plan1 = left.join(
right,
joinType = Cross,
condition = Some(Symbol("a") === Symbol("c")))
assertAnalysisSuccess(plan1)
val plan2 = left.join(
right,
joinType = Cross,
condition = Some(Symbol("b") === Symbol("d")))
assertAnalysisError(plan2, "EqualTo does not support ordering on type map" :: Nil)
}
test("PredicateSubQuery is used outside of a filter") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val plan = Project(
Seq(a, Alias(InSubquery(Seq(a), ListQuery(LocalRelation(b))), "c")()),
LocalRelation(a))
assertAnalysisError(plan, "Predicate sub-queries can only be used" +
" in Filter" :: Nil)
}
test("PredicateSubQuery correlated predicate is nested in an illegal plan") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val plan1 = Filter(
Exists(
Join(
LocalRelation(b),
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)),
LeftOuter,
Option(EqualTo(b, c)),
JoinHint.NONE)),
LocalRelation(a))
assertAnalysisError(plan1, "Accessing outer query column is not allowed in" :: Nil)
val plan2 = Filter(
Exists(
Join(
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)),
LocalRelation(b),
RightOuter,
Option(EqualTo(b, c)),
JoinHint.NONE)),
LocalRelation(a))
assertAnalysisError(plan2, "Accessing outer query column is not allowed in" :: Nil)
val plan3 = Filter(
Exists(Union(LocalRelation(b),
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)))),
LocalRelation(a))
assertAnalysisError(plan3, "Accessing outer query column is not allowed in" :: Nil)
val plan4 = Filter(
Exists(
Limit(1,
Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b)))
),
LocalRelation(a))
assertAnalysisError(plan4, "Accessing outer query column is not allowed in" :: Nil)
val plan5 = Filter(
Exists(
Sample(0.0, 0.5, false, 1L,
Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b))).select("b")
),
LocalRelation(a))
assertAnalysisError(plan5,
"Accessing outer query column is not allowed in" :: Nil)
}
test("Error on filter condition containing aggregate expressions") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val plan = Filter(Symbol("a") === UnresolvedFunction("max", Seq(b), true), LocalRelation(a, b))
assertAnalysisError(plan,
"Aggregate/Window/Generate expressions are not valid in where clause of the query" :: Nil)
}
test("SPARK-30811: CTE should not cause stack overflow when " +
"it refers to non-existent table with same name") {
val plan = UnresolvedWith(
UnresolvedRelation(TableIdentifier("t")),
Seq("t" -> SubqueryAlias("t",
Project(
Alias(Literal(1), "x")() :: Nil,
UnresolvedRelation(TableIdentifier("t", Option("nonexist")))))))
assertAnalysisError(plan, "Table or view not found:" :: Nil)
}
test("SPARK-33909: Check rand functions seed is legal at analyer side") {
Seq(Rand("a".attr), Randn("a".attr)).foreach { r =>
val plan = Project(Seq(r.as("r")), testRelation)
assertAnalysisError(plan,
s"Input argument to ${r.prettyName} must be a constant." :: Nil)
}
Seq(Rand(1.0), Rand("1"), Randn("a")).foreach { r =>
val plan = Project(Seq(r.as("r")), testRelation)
assertAnalysisError(plan,
s"data type mismatch: argument 1 requires (int or bigint) type" :: Nil)
}
}
test("SPARK-34946: correlated scalar subquery in grouping expressions only") {
val c1 = AttributeReference("c1", IntegerType)()
val c2 = AttributeReference("c2", IntegerType)()
val t = LocalRelation(c1, c2)
val plan = Aggregate(
ScalarSubquery(
Aggregate(Nil, sum($"c2").as("sum") :: Nil,
Filter($"t1.c1" === $"t2.c1",
t.as("t2")))
) :: Nil,
sum($"c2").as("sum") :: Nil, t.as("t1"))
assertAnalysisError(plan, "Correlated scalar subqueries in the group by clause must also be " +
"in the aggregate expressions" :: Nil)
}
test("SPARK-34946: correlated scalar subquery in aggregate expressions only") {
val c1 = AttributeReference("c1", IntegerType)()
val c2 = AttributeReference("c2", IntegerType)()
val t = LocalRelation(c1, c2)
val plan = Aggregate(
$"c1" :: Nil,
ScalarSubquery(
Aggregate(Nil, sum($"c2").as("sum") :: Nil,
Filter($"t1.c1" === $"t2.c1",
t.as("t2")))
).as("sub") :: Nil, t.as("t1"))
assertAnalysisError(plan, "Correlated scalar subquery 'scalarsubquery(t1.c1)' is " +
"neither present in the group by, nor in an aggregate function. Add it to group by " +
"using ordinal position or wrap it in first() (or first_value) if you don't care " +
"which value you get." :: Nil)
}
errorTest(
"SPARK-34920: error code to error message",
testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))),
"Column 'bad_column' does not exist. Did you mean one of the following? [a, b, c, d, e]"
:: Nil)
test("SPARK-35080: Unsupported correlated equality predicates in subquery") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val d = AttributeReference("d", DoubleType)()
val t1 = LocalRelation(a, b, d)
val t2 = LocalRelation(c)
val conditions = Seq(
(abs($"a") === $"c", "abs(a) = outer(c)"),
(abs($"a") <=> $"c", "abs(a) <=> outer(c)"),
($"a" + 1 === $"c", "(a + 1) = outer(c)"),
($"a" + $"b" === $"c", "(a + b) = outer(c)"),
($"a" + $"c" === $"b", "(a + outer(c)) = b"),
(And($"a" === $"c", Cast($"d", IntegerType) === $"c"), "CAST(d AS INT) = outer(c)"))
conditions.foreach { case (cond, msg) =>
val plan = Project(
ScalarSubquery(
Aggregate(Nil, count(Literal(1)).as("cnt") :: Nil,
Filter(cond, t1))
).as("sub") :: Nil,
t2)
assertAnalysisError(plan, s"Correlated column is not allowed in predicate ($msg)" :: Nil)
}
}
test("SPARK-35673: fail if the plan still contains UnresolvedHint after analysis") {
val hintName = "some_random_hint_that_does_not_exist"
val plan = UnresolvedHint(hintName, Seq.empty,
Project(Alias(Literal(1), "x")() :: Nil, OneRowRelation())
)
assert(plan.resolved)
val error = intercept[AnalysisException] {
SimpleAnalyzer.checkAnalysis(plan)
}
assert(error.message.contains(s"Hint not found: ${hintName}"))
// UnresolvedHint be removed by batch `Remove Unresolved Hints`
assertAnalysisSuccess(plan, true)
}
test("SPARK-35618: Resolve star expressions in subqueries") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val t0 = OneRowRelation()
val t1 = LocalRelation(a, b).as("t1")
// t1.* in the subquery should be resolved into outer(t1.a) and outer(t1.b).
assertAnalysisError(
Project(ScalarSubquery(t0.select(star("t1"))).as("sub") :: Nil, t1),
"Scalar subquery must return only one column, but got 2" :: Nil)
// t2.* cannot be resolved and the error should be the initial analysis exception.
assertAnalysisError(
Project(ScalarSubquery(t0.select(star("t2"))).as("sub") :: Nil, t1),
"cannot resolve 't2.*' given input columns ''" :: Nil
)
}
test("SPARK-35618: Invalid star usage in subqueries") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val t1 = LocalRelation(a, b).as("t1")
val t2 = LocalRelation(b, c).as("t2")
// SELECT * FROM t1 WHERE a = (SELECT sum(c) FROM t2 WHERE t1.* = t2.b)
assertAnalysisError(
Filter(EqualTo(a, ScalarSubquery(t2.select(sum(c)).where(star("t1") === b))), t1),
"Invalid usage of '*' in Filter" :: Nil
)
// SELECT * FROM t1 JOIN t2 ON (EXISTS (SELECT 1 FROM t2 WHERE t1.* = b))
assertAnalysisError(
t1.join(t2, condition = Some(Exists(t2.select(1).where(star("t1") === b)))),
"Invalid usage of '*' in Filter" :: Nil
)
}
test("SPARK-36488: Regular expression expansion should fail with a meaningful message") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "true") {
assertAnalysisError(testRelation.select(Divide(UnresolvedRegex(".?", None, false), "a")),
s"Invalid usage of regular expression '.?' in" :: Nil)
assertAnalysisError(testRelation.select(
Divide(UnresolvedRegex(".?", None, false), UnresolvedRegex(".*", None, false))),
s"Invalid usage of regular expressions '.?', '.*' in" :: Nil)
assertAnalysisError(testRelation.select(
Divide(UnresolvedRegex(".?", None, false), UnresolvedRegex(".?", None, false))),
s"Invalid usage of regular expression '.?' in" :: Nil)
assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), "a")),
"Invalid usage of '*' in" :: Nil)
assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), UnresolvedStar(None))),
"Invalid usage of '*' in" :: Nil)
assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None),
UnresolvedRegex(".?", None, false))),
"Invalid usage of '*' and regular expression '.?' in" :: Nil)
assertAnalysisError(testRelation.select(Least(Seq(UnresolvedStar(None),
UnresolvedRegex(".*", None, false), UnresolvedRegex(".?", None, false)))),
"Invalid usage of '*' and regular expressions '.*', '.?' in" :: Nil)
}
}
}
| apache-2.0 |
LambdaLord/es-operating-system | esjs/src/interface.cpp | 28073 | /*
* Copyright 2008, 2009 Google Inc.
* Copyright 2006, 2007 Nintendo Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <es/any.h>
#include <es/endian.h>
#include <es/formatter.h>
#include <es/interfaceData.h>
#include <es/object.h>
#include <es/base/IProcess.h>
#include <es/hashtable.h>
#include <es/reflect.h>
// TODO use proper name prefix or namespace
namespace es
{
Reflect::Interface& getInterface(const char* iid);
Object* getConstructor(const char* iid);
extern unsigned char* defaultInterfaceInfo[];
extern size_t defaultInterfaceCount;
} // namespace es
#ifndef VERBOSE
#define PRINTF(...) (__VA_ARGS__)
#else
#define PRINTF(...) report(__VA_ARGS__)
#endif
class ObjectValue;
#include "interface.h"
extern es::CurrentProcess* System();
namespace
{
const int GuidStringLength = 37; // Including terminating zero
}
//
// invoke
//
typedef long long (*InterfaceMethod)(void* self, ...);
static char heap[64*1024];
static Value* invoke(const char* iid, int number, InterfaceMethod** self, ListValue* list)
{
if (!self)
{
throw getErrorInstance("TypeError");
}
Reflect::Interface interface = es::getInterface(iid);
Reflect::Method method(interface.getMethod(number));
PRINTF("invoke %s.%s(%p)\n", interface.getName().c_str(), method.getName().c_str(), self);
// Set up parameters
Any argv[9];
Any* argp = argv;
int ext = 0; // extra parameter count
// Set this
*argp++ = Any(reinterpret_cast<intptr_t>(self));
// In the following implementation, we assume no out nor inout attribute is
// used for parameters.
Reflect::Type returnType = method.getReturnType();
switch (returnType.getType())
{
case Reflect::kAny:
// Any op(void* buf, int len, ...);
// FALL THROUGH
case Reflect::kString:
// const char* op(xxx* buf, int len, ...);
*argp++ = Any(reinterpret_cast<intptr_t>(heap));
*argp++ = Any(sizeof(heap));
break;
case Reflect::kSequence:
// int op(xxx* buf, int len, ...);
*argp++ = Any(reinterpret_cast<intptr_t>(heap));
++ext;
*argp++ = Any(static_cast<int32_t>(((*list)[0])->toNumber()));
break;
case Reflect::kArray:
// void op(xxx[x] buf, ...);
*argp++ = Any(reinterpret_cast<intptr_t>(heap));
break;
}
Reflect::Parameter param = method.listParameter();
for (int i = ext; param.next(); ++i, ++argp)
{
Reflect::Type type(param.getType());
Value* value = (*list)[i];
switch (type.getType())
{
case Reflect::kAny:
// Any variant, ...
switch (value->getType()) {
case Value::BoolType:
*argp = Any(static_cast<bool>(value->toBoolean()));
break;
case Value::StringType:
*argp = Any(value->toString().c_str());
break;
case Value::NumberType:
*argp = Any(static_cast<double>(value->toNumber()));
break;
case Value::ObjectType:
if (InterfacePointerValue* unknown = dynamic_cast<InterfacePointerValue*>(value))
{
*argp = Any(unknown->getObject());
}
else
{
// XXX expose ECMAScript object
*argp = Any(static_cast<Object*>(0));
}
break;
default:
*argp = Any();
break;
}
argp->makeVariant();
break;
case Reflect::kSequence:
// xxx* buf, int len, ...
// XXX Assume sequence<octet> now...
*argp++ = Any(reinterpret_cast<intptr_t>(value->toString().c_str()));
value = (*list)[++i];
*argp = Any(static_cast<int32_t>(value->toNumber()));
break;
case Reflect::kString:
*argp = Any(value->toString().c_str());
break;
case Reflect::kArray:
// void op(xxx[x] buf, ...);
// XXX expand data
break;
case Reflect::kObject:
if (InterfacePointerValue* unknown = dynamic_cast<InterfacePointerValue*>(value))
{
*argp = Any(unknown->getObject());
}
else
{
*argp = Any(static_cast<Object*>(0));
}
break;
case Reflect::kBoolean:
*argp = Any(static_cast<bool>(value->toBoolean()));
break;
case Reflect::kPointer:
*argp = Any(static_cast<intptr_t>(value->toNumber()));
break;
case Reflect::kShort:
*argp = Any(static_cast<int16_t>(value->toNumber()));
break;
case Reflect::kLong:
*argp = Any(static_cast<int32_t>(value->toNumber()));
break;
case Reflect::kOctet:
*argp = Any(static_cast<uint8_t>(value->toNumber()));
break;
case Reflect::kUnsignedShort:
*argp = Any(static_cast<uint16_t>(value->toNumber()));
break;
case Reflect::kUnsignedLong:
*argp = Any(static_cast<uint32_t>(value->toNumber()));
break;
case Reflect::kLongLong:
*argp = Any(static_cast<int64_t>(value->toNumber()));
break;
case Reflect::kUnsignedLongLong:
*argp = Any(static_cast<uint64_t>(value->toNumber()));
break;
case Reflect::kFloat:
*argp = Any(static_cast<float>(value->toNumber()));
break;
case Reflect::kDouble:
*argp = Any(static_cast<double>(value->toNumber()));
break;
default:
break;
}
}
// Invoke method
Register<Value> value;
unsigned methodNumber = interface.getInheritedMethodCount() + number;
int argc = argp - argv;
switch (returnType.getType())
{
case Reflect::kAny:
{
Any result = apply(argc, argv, (Any (*)()) ((*self)[methodNumber]));
switch (result.getType())
{
case Any::TypeVoid:
value = NullValue::getInstance();
break;
case Any::TypeBool:
value = BoolValue::getInstance(static_cast<bool>(result));
break;
case Any::TypeOctet:
value = new NumberValue(static_cast<uint8_t>(result));
break;
case Any::TypeShort:
value = new NumberValue(static_cast<int16_t>(result));
break;
case Any::TypeUnsignedShort:
value = new NumberValue(static_cast<uint16_t>(result));
break;
case Any::TypeLong:
value = new NumberValue(static_cast<int32_t>(result));
break;
case Any::TypeUnsignedLong:
value = new NumberValue(static_cast<uint32_t>(result));
break;
case Any::TypeLongLong:
value = new NumberValue(static_cast<int64_t>(result));
break;
case Any::TypeUnsignedLongLong:
value = new NumberValue(static_cast<uint64_t>(result));
break;
case Any::TypeFloat:
value = new NumberValue(static_cast<float>(result));
break;
case Any::TypeDouble:
value = new NumberValue(static_cast<double>(result));
break;
case Any::TypeString:
if (const char* string = static_cast<const char*>(result))
{
value = new StringValue(string);
}
else
{
value = NullValue::getInstance();
}
break;
case Any::TypeObject:
if (Object* unknown = static_cast<Object*>(result))
{
ObjectValue* instance = new InterfacePointerValue(unknown);
instance->setPrototype(getGlobal()->get(es::getInterface(Object::iid()).getName())->get("prototype")); // XXX Should use IID
value = instance;
}
else
{
value = NullValue::getInstance();
}
break;
default:
value = NullValue::getInstance();
break;
}
}
break;
case Reflect::kBoolean:
value = BoolValue::getInstance(static_cast<bool>(apply(argc, argv, (bool (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kOctet:
value = new NumberValue(static_cast<uint8_t>(apply(argc, argv, (uint8_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kShort:
value = new NumberValue(static_cast<int16_t>(apply(argc, argv, (int16_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kUnsignedShort:
value = new NumberValue(static_cast<uint16_t>(apply(argc, argv, (uint16_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kLong:
value = new NumberValue(static_cast<int32_t>(apply(argc, argv, (int32_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kUnsignedLong:
value = new NumberValue(static_cast<uint32_t>(apply(argc, argv, (uint32_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kLongLong:
value = new NumberValue(static_cast<int64_t>(apply(argc, argv, (int64_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kUnsignedLongLong:
value = new NumberValue(static_cast<uint64_t>(apply(argc, argv, (uint64_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kFloat:
value = new NumberValue(static_cast<float>(apply(argc, argv, (float (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kDouble:
value = new NumberValue(apply(argc, argv, (double (*)()) ((*self)[methodNumber])));
break;
case Reflect::kPointer:
value = new NumberValue(static_cast<intptr_t>(apply(argc, argv, (intptr_t (*)()) ((*self)[methodNumber]))));
break;
case Reflect::kString:
{
heap[0] = '\0';
Any result = apply(argc, argv, (const char* (*)()) ((*self)[methodNumber]));
if (const char* string = static_cast<const char*>(result))
{
value = new StringValue(string);
}
else
{
value = NullValue::getInstance();
}
}
break;
case Reflect::kSequence:
{
// XXX Assume sequence<octet> now...
int32_t count = apply(argc, argv, (int32_t (*)()) ((*self)[methodNumber]));
if (count < 0)
{
count = 0;
}
heap[count] = '\0';
value = new StringValue(heap);
}
break;
case Reflect::kObject:
if (Object* unknown = apply(argc, argv, (Object* (*)()) ((*self)[methodNumber])))
{
ObjectValue* instance = new InterfacePointerValue(unknown);
// TODO: check Object and others
instance->setPrototype(getGlobal()->get(es::getInterface(returnType.getQualifiedName().c_str()).getName())->get("prototype")); // XXX Should use IID
value = instance;
}
else
{
value = NullValue::getInstance();
}
break;
case Reflect::kVoid:
apply(argc, argv, (int32_t (*)()) ((*self)[methodNumber]));
value = NullValue::getInstance();
break;
}
return value;
}
static Value* invoke(const char* iid, int number, InterfacePointerValue* object, ListValue* list)
{
InterfaceMethod** self = reinterpret_cast<InterfaceMethod**>(object->getObject());
if (!self)
{
throw getErrorInstance("TypeError");
}
Value* value = invoke(iid, number, self, list);
if (strcmp(iid, Object::iid()) == 0 && number == 2) // Object::release()
{
object->clearObject();
}
return value;
}
//
// AttributeValue
//
class AttributeValue : public ObjectValue
{
bool readOnly;
const char* iid;
int getter; // Method number
int setter; // Method number
public:
AttributeValue(const char* iid) :
readOnly(true),
iid(iid),
getter(0),
setter(0)
{
}
~AttributeValue()
{
}
void addSetter(int number)
{
readOnly = false;
setter = number;
}
void addGetter(int number)
{
getter = number;
}
// Getter
virtual Value* get(Value* self)
{
if (dynamic_cast<InterfacePointerValue*>(self))
{
Register<ListValue> list = new ListValue;
return invoke(iid, getter, static_cast<InterfacePointerValue*>(self), list);
}
else
{
return this;
}
}
// Setter
virtual bool put(Value* self, Value* value)
{
if (dynamic_cast<InterfacePointerValue*>(self) && !readOnly)
{
Register<ListValue> list = new ListValue;
list->push(value);
invoke(iid, setter, static_cast<InterfacePointerValue*>(self), list);
}
return true;
}
};
//
// InterfaceMethodCode
//
class InterfaceMethodCode : public Code
{
FormalParameterList* arguments;
ObjectValue* prototype;
const char* iid;
int number; // Method number
public:
InterfaceMethodCode(ObjectValue* object, const char* iid, int number) :
arguments(new FormalParameterList),
prototype(new ObjectValue),
iid(iid),
number(number)
{
Reflect::Interface interface = es::getInterface(iid);
Reflect::Method method(interface.getMethod(number));
#if 0
// Add as many arguments as required.
for (int i = 0; i < method.getParameterCount(); ++i)
{
Reflect::Parameter param(method.getParameter(i));
if (param.isInput())
{
// Note the name "arguments" is reserved in a ECMAScript function.
ASSERT(strcmp(param.getName(), "arguments") != 0);
arguments->add(new Identifier(param.getName()));
}
}
#endif
object->setParameterList(arguments);
object->setScope(getGlobal());
// Create Interface.prototype
prototype->put("constructor", object);
object->put("prototype", prototype);
}
~InterfaceMethodCode()
{
delete arguments;
}
CompletionType evaluate()
{
InterfacePointerValue* object = dynamic_cast<InterfacePointerValue*>(getThis());
if (!object)
{
throw getErrorInstance("TypeError");
}
ListValue* list = static_cast<ListValue*>(getScopeChain()->get("arguments"));
Register<Value> value = invoke(iid, number, object, list);
return CompletionType(CompletionType::Return, value, "");
}
};
//
// AttributeSetterValue
//
class AttributeSetterValue : public ObjectValue
{
const char* iid;
int number; // Method number
public:
AttributeSetterValue(const char* iid, int number) :
iid(iid),
number(number)
{
}
~AttributeSetterValue()
{
}
void put(Value* self, const std::string& name, Value* value)
{
Register<ListValue> list = new ListValue;
Register<StringValue> ident = new StringValue(name);
list->push(ident);
list->push(value);
invoke(iid, number, static_cast<InterfacePointerValue*>(self), list);
return;
}
};
//
// AttributeGetterValue
//
class AttributeGetterValue : public ObjectValue
{
const char* iid;
int number; // Method number
public:
AttributeGetterValue(const char* iid, int number) :
iid(iid),
number(number)
{
}
~AttributeGetterValue()
{
}
Value* get(Value* self, const std::string& name)
{
Register<ListValue> list = new ListValue;
Register<StringValue> ident = new StringValue(name);
list->push(ident);
return invoke(iid, number, static_cast<InterfacePointerValue*>(self), list);
}
};
//
// InterfacePrototypeValue
//
class InterfacePrototypeValue : public ObjectValue
{
enum OpObject {
IndexGetter,
IndexSetter,
NameGetter,
NameSetter,
ObjectCount
};
ObjectValue* opObjects[ObjectCount];
public:
InterfacePrototypeValue()
{
for (int i = 0; i < ObjectCount; ++i)
{
opObjects[i] = 0;
}
}
~InterfacePrototypeValue()
{
}
void setOpObject(int op, ObjectValue* object)
{
ASSERT(op >= 0 && op < ObjectCount);
opObjects[op] = object;
}
ObjectValue* getOpObject(int op)
{
ASSERT(op >= 0 && op < ObjectCount);
return opObjects[op];
}
friend class InterfaceConstructor;
friend class InterfacePointerValue;
};
//
// InterfaceConstructor
//
class InterfaceConstructor : public Code
{
ObjectValue* constructor;
FormalParameterList* arguments;
InterfacePrototypeValue* prototype;
std::string iid;
public:
InterfaceConstructor(ObjectValue* object, std::string iid) :
constructor(object),
arguments(new FormalParameterList),
prototype(new InterfacePrototypeValue),
iid(iid)
{
arguments->add(new Identifier("object"));
object->setParameterList(arguments);
object->setScope(getGlobal());
Reflect::Interface interface = es::getInterface(iid.c_str());
// PRINTF("interface: %s\n", interface.getName().c_str());
for (int i = 0; i < interface.getMethodCount(); ++i)
{
// Construct Method object
Reflect::Method method(interface.getMethod(i));
if (prototype->hasProperty(method.getName()))
{
if (method.isOperation())
{
// XXX Currently overloaded functions are just ignored.
}
else
{
AttributeValue* attribute = static_cast<AttributeValue*>(prototype->get(method.getName()));
if (method.isGetter())
{
attribute->addGetter(i);
}
else
{
attribute->addSetter(i);
}
}
}
else
{
if (method.isOperation())
{
ObjectValue* function = new ObjectValue;
function->setCode(new InterfaceMethodCode(function, iid.c_str(), i));
prototype->put(method.getName(), function);
#if 0
if (method.isIndexGetter())
{
AttributeGetterValue* getter = new AttributeGetterValue(iid, i);
prototype->setOpObject(InterfacePrototypeValue::IndexGetter, getter);
}
else if (method.isIndexSetter())
{
AttributeSetterValue* setter = new AttributeSetterValue(iid, i);
prototype->setOpObject(InterfacePrototypeValue::IndexSetter, setter);
}
else if (method.isNameGetter())
{
AttributeGetterValue* getter = new AttributeGetterValue(iid, i);
prototype->setOpObject(InterfacePrototypeValue::NameGetter, getter);
}
else if (method.isNameSetter())
{
AttributeSetterValue* setter = new AttributeSetterValue(iid, i);
prototype->setOpObject(InterfacePrototypeValue::NameSetter, setter);
}
#endif
}
else
{
// method is an attribute
AttributeValue* attribute = new AttributeValue(iid.c_str());
if (method.isGetter())
{
attribute->addGetter(i);
}
else
{
attribute->addSetter(i);
}
prototype->put(method.getName(), attribute);
}
}
}
if (interface.getQualifiedSuperName() == "")
{
prototype->setPrototype(getGlobal()->get("InterfaceStore")->getPrototype()->getPrototype());
}
else
{
Reflect::Interface super = es::getInterface(interface.getQualifiedSuperName().c_str());
prototype->setPrototype(getGlobal()->get(super.getName())->get("prototype"));
}
// Create Interface.prototype
prototype->put("constructor", object);
object->put("prototype", prototype);
}
~InterfaceConstructor()
{
delete arguments;
}
// Query interface for this interface.
CompletionType evaluate()
{
if (constructor->hasInstance(getThis()))
{
// Constructor
Object* constructor = es::getConstructor(iid.c_str());
if (!constructor)
{
throw getErrorInstance("TypeError");
}
// TODO: Currently only the default constructor is supported
std::string ciid = iid;
ciid += "::Constructor";
Value* value = invoke(ciid.c_str(), 0, reinterpret_cast<InterfaceMethod**>(constructor), 0);
return CompletionType(CompletionType::Return, value, "");
}
else
{
// Cast
InterfacePointerValue* self = dynamic_cast<InterfacePointerValue*>(getScopeChain()->get("object"));
if (!self)
{
throw getErrorInstance("TypeError");
}
Object* object;
object = self->getObject();
if (!object || !(object = reinterpret_cast<Object*>(object->queryInterface(iid.c_str()))))
{
// We should throw an error in case called by a new expression.
throw getErrorInstance("TypeError");
}
ObjectValue* value = new InterfacePointerValue(object);
value->setPrototype(prototype);
return CompletionType(CompletionType::Return, value, "");
}
}
};
//
// InterfaceStoreConstructor
//
class InterfaceStoreConstructor : public Code
{
FormalParameterList* arguments;
ObjectValue* prototype; // Interface.prototype
public:
InterfaceStoreConstructor(ObjectValue* object) :
arguments(new FormalParameterList),
prototype(new ObjectValue)
{
ObjectValue* function = static_cast<ObjectValue*>(getGlobal()->get("Function"));
arguments->add(new Identifier("iid"));
object->setParameterList(arguments);
object->setScope(getGlobal());
// Create Interface.prototype
prototype->setPrototype(function->getPrototype()->getPrototype());
prototype->put("constructor", object);
object->put("prototype", prototype);
object->setPrototype(function->getPrototype());
}
~InterfaceStoreConstructor()
{
delete arguments;
}
CompletionType evaluate()
{
Value* value = getScopeChain()->get("iid");
if (!value->isString())
{
throw getErrorInstance("TypeError");
}
const char* iid = value->toString().c_str();
Reflect::Interface interface;
try
{
interface = es::getInterface(iid);
}
catch (...)
{
throw getErrorInstance("TypeError");
}
// Construct Interface Object
ObjectValue* object = new ObjectValue;
object->setCode(new InterfaceConstructor(object, iid));
object->setPrototype(prototype);
return CompletionType(CompletionType::Return, object, "");
}
};
static bool isIndexAccessor(const std::string& name)
{
const char* ptr = name.c_str();
bool hex = false;
if (strncasecmp(ptr, "0x", 2) == 0)
{
ptr += 2;
hex = true;
}
while(*ptr)
{
if (hex)
{
if (!isxdigit(*ptr))
{
return false;
}
}
else
{
if (!isdigit(*ptr))
{
return false;
}
}
ptr++;
}
return true;
}
Value* InterfacePointerValue::get(const std::string& name)
{
InterfacePrototypeValue* proto = static_cast<InterfacePrototypeValue*>(prototype);
AttributeGetterValue* getter;
if (hasProperty(name))
{
return ObjectValue::get(name);
}
else if ((getter = static_cast<AttributeGetterValue*>(proto->getOpObject(InterfacePrototypeValue::IndexGetter)))
&& isIndexAccessor(name))
{
return getter->get(this, name);
}
else if ((getter = static_cast<AttributeGetterValue*>(proto->getOpObject(InterfacePrototypeValue::NameGetter))))
{
return getter->get(this, name);
}
else {
return ObjectValue::get(name);
}
}
void InterfacePointerValue::put(const std::string& name, Value* value, int attributes)
{
InterfacePrototypeValue* proto = static_cast<InterfacePrototypeValue*>(prototype);
AttributeSetterValue* setter;
if (!canPut(name))
{
return;
}
if (hasProperty(name))
{
ObjectValue::put(name, value, attributes);
}
else if ((setter = static_cast<AttributeSetterValue*>(proto->getOpObject(InterfacePrototypeValue::IndexSetter)))
&& isIndexAccessor(name))
{
setter->put(this, name, value);
}
else if ((setter = static_cast<AttributeSetterValue*>(proto->getOpObject(InterfacePrototypeValue::NameSetter))))
{
setter->put(this, name, value);
}
else
{
ObjectValue::put(name, value, attributes);
}
}
ObjectValue* constructInterfaceObject()
{
ObjectValue* object = new ObjectValue;
object->setCode(new InterfaceStoreConstructor(object));
return object;
}
ObjectValue* constructSystemObject(void* system)
{
for (es::InterfaceData* data = es::interfaceData; data->iid; ++data)
{
// Construct Default Interface Object
Reflect::Interface interface = es::getInterface(data->iid());
PRINTF("%s\n", interface.getName().c_str());
ObjectValue* object = new ObjectValue;
object->setCode(new InterfaceConstructor(object, interface.getQualifiedName()));
object->setPrototype(getGlobal()->get("InterfaceStore")->getPrototype());
getGlobal()->put(interface.getName(), object);
}
System()->addRef();
ObjectValue* object = new InterfacePointerValue(System());
object->setPrototype(getGlobal()->get("CurrentProcess")->get("prototype"));
return object;
}
| apache-2.0 |
narayan2903/cc-commercialpaper | vendor/github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example05/chaincode_example05_test.go | 4062 | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"testing"
"github.com/hyperledger/fabric/core/chaincode/shim"
ex02 "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
)
// chaincode_example02's hash is used here and must be updated if the example is changed
var example02Url = "github.com/hyperledger/fabric/core/example/chaincode/chaincode_example02"
// chaincode_example05 looks like it wanted to return a JSON response to Query()
// it doesn't actually do this though, it just returns the sum value
func jsonResponse(name string, value string) string {
return fmt.Sprintf("jsonResponse = \"{\"Name\":\"%v\",\"Value\":\"%v\"}", name, value)
}
func checkInit(t *testing.T, stub *shim.MockStub, args []string) {
_, err := stub.MockInit("1", "init", args)
if err != nil {
fmt.Println("Init failed", err)
t.FailNow()
}
}
func checkState(t *testing.T, stub *shim.MockStub, name string, expect string) {
bytes := stub.State[name]
if bytes == nil {
fmt.Println("State", name, "failed to get value")
t.FailNow()
}
if string(bytes) != expect {
fmt.Println("State value", name, "was not", expect, "as expected")
t.FailNow()
}
}
func checkQuery(t *testing.T, stub *shim.MockStub, args []string, expect string) {
bytes, err := stub.MockQuery("query", args)
if err != nil {
fmt.Println("Query", args, "failed", err)
t.FailNow()
}
if bytes == nil {
fmt.Println("Query", args, "failed to get result")
t.FailNow()
}
if string(bytes) != expect {
fmt.Println("Query result ", string(bytes), "was not", expect, "as expected")
t.FailNow()
}
}
func checkInvoke(t *testing.T, stub *shim.MockStub, args []string) {
_, err := stub.MockInvoke("1", "query", args)
if err != nil {
fmt.Println("Invoke", args, "failed", err)
t.FailNow()
}
}
func TestExample04_Init(t *testing.T) {
scc := new(SimpleChaincode)
stub := shim.NewMockStub("ex05", scc)
// Init A=123 B=234
checkInit(t, stub, []string{"sumStoreName", "432"})
checkState(t, stub, "sumStoreName", "432")
}
func TestExample04_Query(t *testing.T) {
scc := new(SimpleChaincode)
stub := shim.NewMockStub("ex05", scc)
ccEx2 := new(ex02.SimpleChaincode)
stubEx2 := shim.NewMockStub("ex02", ccEx2)
checkInit(t, stubEx2, []string{"a", "111", "b", "222"})
stub.MockPeerChaincode(example02Url, stubEx2)
checkInit(t, stub, []string{"sumStoreName", "0"})
// a + b = 111 + 222 = 333
checkQuery(t, stub, []string{example02Url, "sumStoreName"}, "333") // example05 doesn't return JSON?
}
func TestExample04_Invoke(t *testing.T) {
scc := new(SimpleChaincode)
stub := shim.NewMockStub("ex05", scc)
ccEx2 := new(ex02.SimpleChaincode)
stubEx2 := shim.NewMockStub("ex02", ccEx2)
checkInit(t, stubEx2, []string{"a", "222", "b", "333"})
stub.MockPeerChaincode(example02Url, stubEx2)
checkInit(t, stub, []string{"sumStoreName", "0"})
// a + b = 222 + 333 = 555
checkInvoke(t, stub, []string{example02Url, "sumStoreName"})
checkQuery(t, stub, []string{example02Url, "sumStoreName"}, "555") // example05 doesn't return JSON?
checkQuery(t, stubEx2, []string{"a"}, "222")
checkQuery(t, stubEx2, []string{"b"}, "333")
// update A-=10 and B+=10
checkInvoke(t, stubEx2, []string{"a", "b", "10"})
// a + b = 212 + 343 = 555
checkInvoke(t, stub, []string{example02Url, "sumStoreName"})
checkQuery(t, stub, []string{example02Url, "sumStoreName"}, "555") // example05 doesn't return JSON?
checkQuery(t, stubEx2, []string{"a"}, "212")
checkQuery(t, stubEx2, []string{"b"}, "343")
}
| apache-2.0 |
vinodkc/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala | 100019 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{RepartitionOperation, _}
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.AlwaysProcess
import org.apache.spark.sql.catalyst.trees.TreePattern._
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils._
import org.apache.spark.util.Utils
/**
* Abstract class all optimizers should inherit of, contains the standard batches (extending
* Optimizers can override this.
*/
abstract class Optimizer(catalogManager: CatalogManager)
extends RuleExecutor[LogicalPlan] {
// Check for structural integrity of the plan in test mode.
// Currently we check after the execution of each rule if a plan:
// - is still resolved
// - only host special expressions in supported operators
// - has globally-unique attribute IDs
// - optimized plan have same schema with previous plan.
override protected def isPlanIntegral(
previousPlan: LogicalPlan,
currentPlan: LogicalPlan): Boolean = {
!Utils.isTesting || (currentPlan.resolved &&
currentPlan.find(PlanHelper.specialExpressionsInUnsupportedOperator(_).nonEmpty).isEmpty &&
LogicalPlanIntegrity.checkIfExprIdsAreGloballyUnique(currentPlan) &&
DataType.equalsIgnoreNullability(previousPlan.schema, currentPlan.schema))
}
override protected val excludedOnceBatches: Set[String] =
Set(
"PartitionPruning",
"Extract Python UDFs")
protected def fixedPoint =
FixedPoint(
SQLConf.get.optimizerMaxIterations,
maxIterationsSetting = SQLConf.OPTIMIZER_MAX_ITERATIONS.key)
/**
* Defines the default rule batches in the Optimizer.
*
* Implementations of this class should override this method, and [[nonExcludableRules]] if
* necessary, instead of [[batches]]. The rule batches that eventually run in the Optimizer,
* i.e., returned by [[batches]], will be (defaultBatches - (excludedRules - nonExcludableRules)).
*/
def defaultBatches: Seq[Batch] = {
val operatorOptimizationRuleSet =
Seq(
// Operator push down
PushProjectionThroughUnion,
ReorderJoin,
EliminateOuterJoin,
PushDownPredicates,
PushDownLeftSemiAntiJoin,
PushLeftSemiLeftAntiThroughJoin,
LimitPushDown,
LimitPushDownThroughWindow,
ColumnPruning,
GenerateOptimization,
// Operator combine
CollapseRepartition,
CollapseProject,
OptimizeWindowFunctions,
CollapseWindow,
CombineFilters,
EliminateLimits,
CombineUnions,
// Constant folding and strength reduction
OptimizeRepartition,
TransposeWindow,
NullPropagation,
NullDownPropagation,
ConstantPropagation,
FoldablePropagation,
OptimizeIn,
ConstantFolding,
EliminateAggregateFilter,
ReorderAssociativeOperator,
LikeSimplification,
BooleanSimplification,
SimplifyConditionals,
PushFoldableIntoBranches,
RemoveDispensableExpressions,
SimplifyBinaryComparison,
ReplaceNullWithFalseInPredicate,
SimplifyConditionalsInPredicate,
PruneFilters,
SimplifyCasts,
SimplifyCaseConversionExpressions,
RewriteCorrelatedScalarSubquery,
RewriteLateralSubquery,
EliminateSerialization,
RemoveRedundantAliases,
RemoveRedundantAggregates,
UnwrapCastInBinaryComparison,
RemoveNoopOperators,
OptimizeUpdateFields,
SimplifyExtractValueOps,
OptimizeCsvJsonExprs,
CombineConcats) ++
extendedOperatorOptimizationRules
val operatorOptimizationBatch: Seq[Batch] = {
Batch("Operator Optimization before Inferring Filters", fixedPoint,
operatorOptimizationRuleSet: _*) ::
Batch("Infer Filters", Once,
InferFiltersFromGenerate,
InferFiltersFromConstraints) ::
Batch("Operator Optimization after Inferring Filters", fixedPoint,
operatorOptimizationRuleSet: _*) ::
// Set strategy to Once to avoid pushing filter every time because we do not change the
// join condition.
Batch("Push extra predicate through join", fixedPoint,
PushExtraPredicateThroughJoin,
PushDownPredicates) :: Nil
}
val batches = (Batch("Eliminate Distinct", Once, EliminateDistinct) ::
// Technically some of the rules in Finish Analysis are not optimizer rules and belong more
// in the analyzer, because they are needed for correctness (e.g. ComputeCurrentTime).
// However, because we also use the analyzer to canonicalized queries (for view definition),
// we do not eliminate subqueries or compute current time in the analyzer.
Batch("Finish Analysis", Once,
EliminateResolvedHint,
EliminateSubqueryAliases,
EliminateView,
InlineCTE,
ReplaceExpressions,
RewriteNonCorrelatedExists,
PullOutGroupingExpressions,
ComputeCurrentTime,
ReplaceCurrentLike(catalogManager),
SpecialDatetimeValues,
RewriteAsOfJoin) ::
//////////////////////////////////////////////////////////////////////////////////////////
// Optimizer rules start here
//////////////////////////////////////////////////////////////////////////////////////////
// - Do the first call of CombineUnions before starting the major Optimizer rules,
// since it can reduce the number of iteration and the other rules could add/move
// extra operators between two adjacent Union operators.
// - Call CombineUnions again in Batch("Operator Optimizations"),
// since the other rules might make two separate Unions operators adjacent.
Batch("Union", Once,
RemoveNoopOperators,
CombineUnions,
RemoveNoopUnion) ::
Batch("OptimizeLimitZero", Once,
OptimizeLimitZero) ::
// Run this once earlier. This might simplify the plan and reduce cost of optimizer.
// For example, a query such as Filter(LocalRelation) would go through all the heavy
// optimizer rules that are triggered when there is a filter
// (e.g. InferFiltersFromConstraints). If we run this batch earlier, the query becomes just
// LocalRelation and does not trigger many rules.
Batch("LocalRelation early", fixedPoint,
ConvertToLocalRelation,
PropagateEmptyRelation,
// PropagateEmptyRelation can change the nullability of an attribute from nullable to
// non-nullable when an empty relation child of a Union is removed
UpdateAttributeNullability) ::
Batch("Pullup Correlated Expressions", Once,
OptimizeOneRowRelationSubquery,
PullupCorrelatedPredicates) ::
// Subquery batch applies the optimizer rules recursively. Therefore, it makes no sense
// to enforce idempotence on it and we change this batch from Once to FixedPoint(1).
Batch("Subquery", FixedPoint(1),
OptimizeSubqueries) ::
Batch("Replace Operators", fixedPoint,
RewriteExceptAll,
RewriteIntersectAll,
ReplaceIntersectWithSemiJoin,
ReplaceExceptWithFilter,
ReplaceExceptWithAntiJoin,
ReplaceDistinctWithAggregate,
ReplaceDeduplicateWithAggregate) ::
Batch("Aggregate", fixedPoint,
RemoveLiteralFromGroupExpressions,
RemoveRepetitionFromGroupExpressions) :: Nil ++
operatorOptimizationBatch) :+
// This batch rewrites plans after the operator optimization and
// before any batches that depend on stats.
Batch("Pre CBO Rules", Once, preCBORules: _*) :+
// This batch pushes filters and projections into scan nodes. Before this batch, the logical
// plan may contain nodes that do not report stats. Anything that uses stats must run after
// this batch.
Batch("Early Filter and Projection Push-Down", Once, earlyScanPushDownRules: _*) :+
Batch("Update CTE Relation Stats", Once, UpdateCTERelationStats) :+
// Since join costs in AQP can change between multiple runs, there is no reason that we have an
// idempotence enforcement on this batch. We thus make it FixedPoint(1) instead of Once.
Batch("Join Reorder", FixedPoint(1),
CostBasedJoinReorder) :+
Batch("Eliminate Sorts", Once,
EliminateSorts) :+
Batch("Decimal Optimizations", fixedPoint,
DecimalAggregates) :+
// This batch must run after "Decimal Optimizations", as that one may change the
// aggregate distinct column
Batch("Distinct Aggregate Rewrite", Once,
RewriteDistinctAggregates) :+
Batch("Object Expressions Optimization", fixedPoint,
EliminateMapObjects,
CombineTypedFilters,
ObjectSerializerPruning,
ReassignLambdaVariableID) :+
Batch("LocalRelation", fixedPoint,
ConvertToLocalRelation,
PropagateEmptyRelation,
// PropagateEmptyRelation can change the nullability of an attribute from nullable to
// non-nullable when an empty relation child of a Union is removed
UpdateAttributeNullability) :+
Batch("Optimize One Row Plan", fixedPoint, OptimizeOneRowPlan) :+
// The following batch should be executed after batch "Join Reorder" and "LocalRelation".
Batch("Check Cartesian Products", Once,
CheckCartesianProducts) :+
Batch("RewriteSubquery", Once,
RewritePredicateSubquery,
ColumnPruning,
CollapseProject,
RemoveRedundantAliases,
RemoveNoopOperators) :+
// This batch must be executed after the `RewriteSubquery` batch, which creates joins.
Batch("NormalizeFloatingNumbers", Once, NormalizeFloatingNumbers) :+
Batch("ReplaceUpdateFieldsExpression", Once, ReplaceUpdateFieldsExpression)
// remove any batches with no rules. this may happen when subclasses do not add optional rules.
batches.filter(_.rules.nonEmpty)
}
/**
* Defines rules that cannot be excluded from the Optimizer even if they are specified in
* SQL config "excludedRules".
*
* Implementations of this class can override this method if necessary. The rule batches
* that eventually run in the Optimizer, i.e., returned by [[batches]], will be
* (defaultBatches - (excludedRules - nonExcludableRules)).
*/
def nonExcludableRules: Seq[String] =
EliminateDistinct.ruleName ::
EliminateResolvedHint.ruleName ::
EliminateSubqueryAliases.ruleName ::
EliminateView.ruleName ::
ReplaceExpressions.ruleName ::
ComputeCurrentTime.ruleName ::
SpecialDatetimeValues.ruleName ::
ReplaceCurrentLike(catalogManager).ruleName ::
RewriteDistinctAggregates.ruleName ::
ReplaceDeduplicateWithAggregate.ruleName ::
ReplaceIntersectWithSemiJoin.ruleName ::
ReplaceExceptWithFilter.ruleName ::
ReplaceExceptWithAntiJoin.ruleName ::
RewriteExceptAll.ruleName ::
RewriteIntersectAll.ruleName ::
ReplaceDistinctWithAggregate.ruleName ::
PullupCorrelatedPredicates.ruleName ::
RewriteCorrelatedScalarSubquery.ruleName ::
RewritePredicateSubquery.ruleName ::
NormalizeFloatingNumbers.ruleName ::
ReplaceUpdateFieldsExpression.ruleName ::
PullOutGroupingExpressions.ruleName ::
RewriteAsOfJoin.ruleName ::
RewriteLateralSubquery.ruleName :: Nil
/**
* Optimize all the subqueries inside expression.
*/
object OptimizeSubqueries extends Rule[LogicalPlan] {
private def removeTopLevelSort(plan: LogicalPlan): LogicalPlan = {
if (!plan.containsPattern(SORT)) {
return plan
}
plan match {
case Sort(_, _, child) => child
case Project(fields, child) => Project(fields, removeTopLevelSort(child))
case other => other
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsPattern(PLAN_EXPRESSION), ruleId) {
case s: SubqueryExpression =>
val Subquery(newPlan, _) = Optimizer.this.execute(Subquery.fromExpression(s))
// At this point we have an optimized subquery plan that we are going to attach
// to this subquery expression. Here we can safely remove any top level sort
// in the plan as tuples produced by a subquery are un-ordered.
s.withNewPlan(removeTopLevelSort(newPlan))
}
}
/**
* Update CTE reference stats.
*/
object UpdateCTERelationStats extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
if (!plan.isInstanceOf[Subquery] && plan.containsPattern(CTE)) {
val statsMap = mutable.HashMap.empty[Long, Statistics]
updateCTEStats(plan, statsMap)
} else {
plan
}
}
private def updateCTEStats(
plan: LogicalPlan,
statsMap: mutable.HashMap[Long, Statistics]): LogicalPlan = plan match {
case WithCTE(child, cteDefs) =>
val newDefs = cteDefs.map { cteDef =>
val newDef = updateCTEStats(cteDef, statsMap)
statsMap.put(cteDef.id, newDef.stats)
newDef.asInstanceOf[CTERelationDef]
}
WithCTE(updateCTEStats(child, statsMap), newDefs)
case c: CTERelationRef =>
statsMap.get(c.cteId).map(s => c.withNewStats(Some(s))).getOrElse(c)
case _ if plan.containsPattern(CTE) =>
plan
.withNewChildren(plan.children.map(child => updateCTEStats(child, statsMap)))
.transformExpressionsWithPruning(_.containsAllPatterns(PLAN_EXPRESSION, CTE)) {
case e: SubqueryExpression =>
e.withNewPlan(updateCTEStats(e.plan, statsMap))
}
case _ => plan
}
}
/**
* Override to provide additional rules for the operator optimization batch.
*/
def extendedOperatorOptimizationRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide additional rules for early projection and filter pushdown to scans.
*/
def earlyScanPushDownRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide additional rules for rewriting plans after operator optimization rules and
* before any cost-based optimization rules that depend on stats.
*/
def preCBORules: Seq[Rule[LogicalPlan]] = Nil
/**
* Returns (defaultBatches - (excludedRules - nonExcludableRules)), the rule batches that
* eventually run in the Optimizer.
*
* Implementations of this class should override [[defaultBatches]], and [[nonExcludableRules]]
* if necessary, instead of this method.
*/
final override def batches: Seq[Batch] = {
val excludedRulesConf =
SQLConf.get.optimizerExcludedRules.toSeq.flatMap(Utils.stringToSeq)
val excludedRules = excludedRulesConf.filter { ruleName =>
val nonExcludable = nonExcludableRules.contains(ruleName)
if (nonExcludable) {
logWarning(s"Optimization rule '${ruleName}' was not excluded from the optimizer " +
s"because this rule is a non-excludable rule.")
}
!nonExcludable
}
if (excludedRules.isEmpty) {
defaultBatches
} else {
defaultBatches.flatMap { batch =>
val filteredRules = batch.rules.filter { rule =>
val exclude = excludedRules.contains(rule.ruleName)
if (exclude) {
logInfo(s"Optimization rule '${rule.ruleName}' is excluded from the optimizer.")
}
!exclude
}
if (batch.rules == filteredRules) {
Some(batch)
} else if (filteredRules.nonEmpty) {
Some(Batch(batch.name, batch.strategy, filteredRules: _*))
} else {
logInfo(s"Optimization batch '${batch.name}' is excluded from the optimizer " +
s"as all enclosed rules have been excluded.")
None
}
}
}
}
}
/**
* Remove useless DISTINCT for MAX and MIN.
* This rule should be applied before RewriteDistinctAggregates.
*/
object EliminateDistinct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsPattern(AGGREGATE_EXPRESSION)) {
case ae: AggregateExpression if ae.isDistinct && isDuplicateAgnostic(ae.aggregateFunction) =>
ae.copy(isDistinct = false)
}
def isDuplicateAgnostic(af: AggregateFunction): Boolean = af match {
case _: Max => true
case _: Min => true
case _: BitAndAgg => true
case _: BitOrAgg => true
case _: CollectSet => true
case _: First => true
case _: Last => true
case _ => false
}
}
/**
* Remove useless FILTER clause for aggregate expressions.
* This rule should be applied before RewriteDistinctAggregates.
*/
object EliminateAggregateFilter extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsAllPatterns(AGGREGATE_EXPRESSION, TRUE_OR_FALSE_LITERAL), ruleId) {
case ae @ AggregateExpression(_, _, _, Some(Literal.TrueLiteral), _) =>
ae.copy(filter = None)
case AggregateExpression(af: DeclarativeAggregate, _, _, Some(Literal.FalseLiteral), _) =>
val initialProject = SafeProjection.create(af.initialValues)
val evalProject = SafeProjection.create(af.evaluateExpression :: Nil, af.aggBufferAttributes)
val initialBuffer = initialProject(EmptyRow)
val internalRow = evalProject(initialBuffer)
Literal.create(internalRow.get(0, af.dataType), af.dataType)
case AggregateExpression(af: ImperativeAggregate, _, _, Some(Literal.FalseLiteral), _) =>
val buffer = new SpecificInternalRow(af.aggBufferAttributes.map(_.dataType))
af.initialize(buffer)
Literal.create(af.eval(buffer), af.dataType)
}
}
/**
* An optimizer used in test code.
*
* To ensure extendability, we leave the standard rules in the abstract optimizer rules, while
* specific rules go to the subclasses
*/
object SimpleTestOptimizer extends SimpleTestOptimizer
class SimpleTestOptimizer extends Optimizer(
new CatalogManager(
FakeV2SessionCatalog,
new SessionCatalog(new InMemoryCatalog, EmptyFunctionRegistry, EmptyTableFunctionRegistry)))
/**
* Remove redundant aliases from a query plan. A redundant alias is an alias that does not change
* the name or metadata of a column, and does not deduplicate it.
*/
object RemoveRedundantAliases extends Rule[LogicalPlan] {
/**
* Create an attribute mapping from the old to the new attributes. This function will only
* return the attribute pairs that have changed.
*/
private def createAttributeMapping(current: LogicalPlan, next: LogicalPlan)
: Seq[(Attribute, Attribute)] = {
current.output.zip(next.output).filterNot {
case (a1, a2) => a1.semanticEquals(a2)
}
}
/**
* Remove the top-level alias from an expression when it is redundant.
*/
private def removeRedundantAlias(e: Expression, excludeList: AttributeSet): Expression = e match {
// Alias with metadata can not be stripped, or the metadata will be lost.
// If the alias name is different from attribute name, we can't strip it either, or we
// may accidentally change the output schema name of the root plan.
case a @ Alias(attr: Attribute, name)
if (a.metadata == Metadata.empty || a.metadata == attr.metadata) &&
name == attr.name &&
!excludeList.contains(attr) &&
!excludeList.contains(a) =>
attr
case a => a
}
/**
* Remove redundant alias expression from a LogicalPlan and its subtree. A set of excludes is used
* to prevent the removal of seemingly redundant aliases used to deduplicate the input for a
* (self) join or to prevent the removal of top-level subquery attributes.
*/
private def removeRedundantAliases(plan: LogicalPlan, excluded: AttributeSet): LogicalPlan = {
if (!plan.containsPattern(ALIAS)) {
return plan
}
plan match {
// We want to keep the same output attributes for subqueries. This means we cannot remove
// the aliases that produce these attributes
case Subquery(child, correlated) =>
Subquery(removeRedundantAliases(child, excluded ++ child.outputSet), correlated)
// A join has to be treated differently, because the left and the right side of the join are
// not allowed to use the same attributes. We use an exclude list to prevent us from creating
// a situation in which this happens; the rule will only remove an alias if its child
// attribute is not on the black list.
case Join(left, right, joinType, condition, hint) =>
val newLeft = removeRedundantAliases(left, excluded ++ right.outputSet)
val newRight = removeRedundantAliases(right, excluded ++ newLeft.outputSet)
val mapping = AttributeMap(
createAttributeMapping(left, newLeft) ++
createAttributeMapping(right, newRight))
val newCondition = condition.map(_.transform {
case a: Attribute => mapping.getOrElse(a, a)
})
Join(newLeft, newRight, joinType, newCondition, hint)
case _ =>
// Remove redundant aliases in the subtree(s).
val currentNextAttrPairs = mutable.Buffer.empty[(Attribute, Attribute)]
val newNode = plan.mapChildren { child =>
val newChild = removeRedundantAliases(child, excluded)
currentNextAttrPairs ++= createAttributeMapping(child, newChild)
newChild
}
// Create the attribute mapping. Note that the currentNextAttrPairs can contain duplicate
// keys in case of Union (this is caused by the PushProjectionThroughUnion rule); in this
// case we use the first mapping (which should be provided by the first child).
val mapping = AttributeMap(currentNextAttrPairs.toSeq)
// Create a an expression cleaning function for nodes that can actually produce redundant
// aliases, use identity otherwise.
val clean: Expression => Expression = plan match {
case _: Project => removeRedundantAlias(_, excluded)
case _: Aggregate => removeRedundantAlias(_, excluded)
case _: Window => removeRedundantAlias(_, excluded)
case _ => identity[Expression]
}
// Transform the expressions.
newNode.mapExpressions { expr =>
clean(expr.transform {
case a: Attribute => mapping.get(a).map(_.withName(a.name)).getOrElse(a)
})
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = removeRedundantAliases(plan, AttributeSet.empty)
}
/**
* Remove no-op operators from the query plan that do not make any modifications.
*/
object RemoveNoopOperators extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsAnyPattern(PROJECT, WINDOW), ruleId) {
// Eliminate no-op Projects
case p @ Project(projectList, child) if child.sameOutput(p) =>
val newChild = child match {
case p: Project =>
p.copy(projectList = restoreOriginalOutputNames(p.projectList, projectList.map(_.name)))
case agg: Aggregate =>
agg.copy(aggregateExpressions =
restoreOriginalOutputNames(agg.aggregateExpressions, projectList.map(_.name)))
case _ =>
child
}
if (newChild.output.zip(projectList).forall { case (a1, a2) => a1.name == a2.name }) {
newChild
} else {
p
}
// Eliminate no-op Window
case w: Window if w.windowExpressions.isEmpty => w.child
}
}
/**
* Smplify the children of `Union` or remove no-op `Union` from the query plan that
* do not make any modifications to the query.
*/
object RemoveNoopUnion extends Rule[LogicalPlan] {
/**
* This only removes the `Project` that has only attributes or aliased attributes
* from its child.
*/
private def removeAliasOnlyProject(plan: LogicalPlan): LogicalPlan = plan match {
case p @ Project(projectList, child) =>
val aliasOnly = projectList.length == child.output.length &&
projectList.zip(child.output).forall {
case (Alias(left: Attribute, _), right) => left.semanticEquals(right)
case (left: Attribute, right) => left.semanticEquals(right)
case _ => false
}
if (aliasOnly) {
child
} else {
p
}
case _ => plan
}
private def simplifyUnion(u: Union): LogicalPlan = {
val uniqueChildren = mutable.ArrayBuffer.empty[LogicalPlan]
val uniqueChildrenKey = mutable.HashSet.empty[LogicalPlan]
u.children.foreach { c =>
val key = removeAliasOnlyProject(c).canonicalized
if (!uniqueChildrenKey.contains(key)) {
uniqueChildren += c
uniqueChildrenKey += key
}
}
if (uniqueChildren.size == 1) {
u.children.head
} else {
u.copy(children = uniqueChildren.toSeq)
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsAllPatterns(DISTINCT_LIKE, UNION)) {
case d @ Distinct(u: Union) =>
d.withNewChildren(Seq(simplifyUnion(u)))
case d @ Deduplicate(_, u: Union) =>
d.withNewChildren(Seq(simplifyUnion(u)))
}
}
/**
* Pushes down [[LocalLimit]] beneath UNION ALL and joins.
*/
object LimitPushDown extends Rule[LogicalPlan] {
private def stripGlobalLimitIfPresent(plan: LogicalPlan): LogicalPlan = {
plan match {
case GlobalLimit(_, child) => child
case _ => plan
}
}
private def maybePushLocalLimit(limitExp: Expression, plan: LogicalPlan): LogicalPlan = {
(limitExp, plan.maxRowsPerPartition) match {
case (IntegerLiteral(newLimit), Some(childMaxRows)) if newLimit < childMaxRows =>
// If the child has a cap on max rows per partition and the cap is larger than
// the new limit, put a new LocalLimit there.
LocalLimit(limitExp, stripGlobalLimitIfPresent(plan))
case (_, None) =>
// If the child has no cap, put the new LocalLimit.
LocalLimit(limitExp, stripGlobalLimitIfPresent(plan))
case _ =>
// Otherwise, don't put a new LocalLimit.
plan
}
}
private def pushLocalLimitThroughJoin(limitExpr: Expression, join: Join): Join = {
join.joinType match {
case RightOuter => join.copy(right = maybePushLocalLimit(limitExpr, join.right))
case LeftOuter => join.copy(left = maybePushLocalLimit(limitExpr, join.left))
case _: InnerLike if join.condition.isEmpty =>
join.copy(
left = maybePushLocalLimit(limitExpr, join.left),
right = maybePushLocalLimit(limitExpr, join.right))
case LeftSemi | LeftAnti if join.condition.isEmpty =>
join.copy(
left = maybePushLocalLimit(limitExpr, join.left),
right = maybePushLocalLimit(Literal(1, IntegerType), join.right))
case _ => join
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(LIMIT), ruleId) {
// Adding extra Limits below UNION ALL for children which are not Limit or do not have Limit
// descendants whose maxRow is larger. This heuristic is valid assuming there does not exist any
// Limit push-down rule that is unable to infer the value of maxRows.
// Note: right now Union means UNION ALL, which does not de-duplicate rows, so it is safe to
// pushdown Limit through it. Once we add UNION DISTINCT, however, we will not be able to
// pushdown Limit.
case LocalLimit(exp, u: Union) =>
LocalLimit(exp, u.copy(children = u.children.map(maybePushLocalLimit(exp, _))))
// Add extra limits below JOIN:
// 1. For LEFT OUTER and RIGHT OUTER JOIN, we push limits to the left and right sides,
// respectively.
// 2. For INNER and CROSS JOIN, we push limits to both the left and right sides if join
// condition is empty.
// 3. For LEFT SEMI and LEFT ANTI JOIN, we push limits to the left side if join condition
// is empty.
// It's not safe to push limits below FULL OUTER JOIN in the general case without a more
// invasive rewrite. We also need to ensure that this limit pushdown rule will not eventually
// introduce limits on both sides if it is applied multiple times. Therefore:
// - If one side is already limited, stack another limit on top if the new limit is smaller.
// The redundant limit will be collapsed by the CombineLimits rule.
case LocalLimit(exp, join: Join) =>
LocalLimit(exp, pushLocalLimitThroughJoin(exp, join))
// There is a Project between LocalLimit and Join if they do not have the same output.
case LocalLimit(exp, project @ Project(_, join: Join)) =>
LocalLimit(exp, project.copy(child = pushLocalLimitThroughJoin(exp, join)))
// Push down limit 1 through Aggregate and turn Aggregate into Project if it is group only.
case Limit(le @ IntegerLiteral(1), a: Aggregate) if a.groupOnly =>
Limit(le, Project(a.aggregateExpressions, LocalLimit(le, a.child)))
case Limit(le @ IntegerLiteral(1), p @ Project(_, a: Aggregate)) if a.groupOnly =>
Limit(le, p.copy(child = Project(a.aggregateExpressions, LocalLimit(le, a.child))))
}
}
/**
* Pushes Project operator to both sides of a Union operator.
* Operations that are safe to pushdown are listed as follows.
* Union:
* Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is
* safe to pushdown Filters and Projections through it. Filter pushdown is handled by another
* rule PushDownPredicates. Once we add UNION DISTINCT, we will not be able to pushdown Projections.
*/
object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper {
/**
* Maps Attributes from the left side to the corresponding Attribute on the right side.
*/
private def buildRewrites(left: LogicalPlan, right: LogicalPlan): AttributeMap[Attribute] = {
assert(left.output.size == right.output.size)
AttributeMap(left.output.zip(right.output))
}
/**
* Rewrites an expression so that it can be pushed to the right side of a
* Union or Except operator. This method relies on the fact that the output attributes
* of a union/intersect/except are always equal to the left child's output.
*/
private def pushToRight[A <: Expression](e: A, rewrites: AttributeMap[Attribute]) = {
val result = e transform {
case a: Attribute => rewrites(a)
} match {
// Make sure exprId is unique in each child of Union.
case Alias(child, alias) => Alias(child, alias)()
case other => other
}
// We must promise the compiler that we did not discard the names in the case of project
// expressions. This is safe since the only transformation is from Attribute => Attribute.
result.asInstanceOf[A]
}
def pushProjectionThroughUnion(projectList: Seq[NamedExpression], u: Union): Seq[LogicalPlan] = {
val newFirstChild = Project(projectList, u.children.head)
val newOtherChildren = u.children.tail.map { child =>
val rewrites = buildRewrites(u.children.head, child)
Project(projectList.map(pushToRight(_, rewrites)), child)
}
newFirstChild +: newOtherChildren
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAllPatterns(UNION, PROJECT)) {
// Push down deterministic projection through UNION ALL
case Project(projectList, u: Union)
if projectList.forall(_.deterministic) && u.children.nonEmpty =>
u.copy(children = pushProjectionThroughUnion(projectList, u))
}
}
/**
* Attempts to eliminate the reading of unneeded columns from the query plan.
*
* Since adding Project before Filter conflicts with PushPredicatesThroughProject, this rule will
* remove the Project p2 in the following pattern:
*
* p1 @ Project(_, Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(p2.inputSet)
*
* p2 is usually inserted by this rule and useless, p1 could prune the columns anyway.
*/
object ColumnPruning extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = removeProjectBeforeFilter(
plan.transformWithPruning(AlwaysProcess.fn, ruleId) {
// Prunes the unused columns from project list of Project/Aggregate/Expand
case p @ Project(_, p2: Project) if !p2.outputSet.subsetOf(p.references) =>
p.copy(child = p2.copy(projectList = p2.projectList.filter(p.references.contains)))
case p @ Project(_, a: Aggregate) if !a.outputSet.subsetOf(p.references) =>
p.copy(
child = a.copy(aggregateExpressions = a.aggregateExpressions.filter(p.references.contains)))
case a @ Project(_, e @ Expand(_, _, grandChild)) if !e.outputSet.subsetOf(a.references) =>
val newOutput = e.output.filter(a.references.contains(_))
val newProjects = e.projections.map { proj =>
proj.zip(e.output).filter { case (_, a) =>
newOutput.contains(a)
}.unzip._1
}
a.copy(child = Expand(newProjects, newOutput, grandChild))
// Prune and drop AttachDistributedSequence if the produced attribute is not referred.
case p @ Project(_, a @ AttachDistributedSequence(_, grandChild))
if !p.references.contains(a.sequenceAttr) =>
p.copy(child = prunedChild(grandChild, p.references))
// Prunes the unused columns from child of `DeserializeToObject`
case d @ DeserializeToObject(_, _, child) if !child.outputSet.subsetOf(d.references) =>
d.copy(child = prunedChild(child, d.references))
// Prunes the unused columns from child of Aggregate/Expand/Generate/ScriptTransformation
case a @ Aggregate(_, _, child) if !child.outputSet.subsetOf(a.references) =>
a.copy(child = prunedChild(child, a.references))
case f @ FlatMapGroupsInPandas(_, _, _, child) if !child.outputSet.subsetOf(f.references) =>
f.copy(child = prunedChild(child, f.references))
case e @ Expand(_, _, child) if !child.outputSet.subsetOf(e.references) =>
e.copy(child = prunedChild(child, e.references))
// prune unrequired references
case p @ Project(_, g: Generate) if p.references != g.outputSet =>
val requiredAttrs = p.references -- g.producedAttributes ++ g.generator.references
val newChild = prunedChild(g.child, requiredAttrs)
val unrequired = g.generator.references -- p.references
val unrequiredIndices = newChild.output.zipWithIndex.filter(t => unrequired.contains(t._1))
.map(_._2)
p.copy(child = g.copy(child = newChild, unrequiredChildIndex = unrequiredIndices))
// prune unrequired nested fields from `Generate`.
case GeneratorNestedColumnAliasing(rewrittenPlan) => rewrittenPlan
// Eliminate unneeded attributes from right side of a Left Existence Join.
case j @ Join(_, right, LeftExistence(_), _, _) =>
j.copy(right = prunedChild(right, j.references))
// all the columns will be used to compare, so we can't prune them
case p @ Project(_, _: SetOperation) => p
case p @ Project(_, _: Distinct) => p
// Eliminate unneeded attributes from children of Union.
case p @ Project(_, u: Union) =>
if (!u.outputSet.subsetOf(p.references)) {
val firstChild = u.children.head
val newOutput = prunedChild(firstChild, p.references).output
// pruning the columns of all children based on the pruned first child.
val newChildren = u.children.map { p =>
val selected = p.output.zipWithIndex.filter { case (a, i) =>
newOutput.contains(firstChild.output(i))
}.map(_._1)
Project(selected, p)
}
p.copy(child = u.withNewChildren(newChildren))
} else {
p
}
// Prune unnecessary window expressions
case p @ Project(_, w: Window) if !w.windowOutputSet.subsetOf(p.references) =>
p.copy(child = w.copy(
windowExpressions = w.windowExpressions.filter(p.references.contains)))
// Prune WithCTE
case p @ Project(_, w: WithCTE) =>
if (!w.outputSet.subsetOf(p.references)) {
p.copy(child = w.withNewPlan(prunedChild(w.plan, p.references)))
} else {
p
}
// Can't prune the columns on LeafNode
case p @ Project(_, _: LeafNode) => p
case NestedColumnAliasing(rewrittenPlan) => rewrittenPlan
// for all other logical plans that inherits the output from it's children
// Project over project is handled by the first case, skip it here.
case p @ Project(_, child) if !child.isInstanceOf[Project] =>
val required = child.references ++ p.references
if (!child.inputSet.subsetOf(required)) {
val newChildren = child.children.map(c => prunedChild(c, required))
p.copy(child = child.withNewChildren(newChildren))
} else {
p
}
})
/** Applies a projection only when the child is producing unnecessary attributes */
private def prunedChild(c: LogicalPlan, allReferences: AttributeSet) =
if (!c.outputSet.subsetOf(allReferences)) {
Project(c.output.filter(allReferences.contains), c)
} else {
c
}
/**
* The Project before Filter is not necessary but conflict with PushPredicatesThroughProject,
* so remove it. Since the Projects have been added top-down, we need to remove in bottom-up
* order, otherwise lower Projects can be missed.
*/
private def removeProjectBeforeFilter(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p1 @ Project(_, f @ Filter(_, p2 @ Project(_, child)))
if p2.outputSet.subsetOf(child.outputSet) &&
// We only remove attribute-only project.
p2.projectList.forall(_.isInstanceOf[AttributeReference]) =>
p1.copy(child = f.copy(child = child))
}
}
/**
* Combines two [[Project]] operators into one and perform alias substitution,
* merging the expressions into one single expression for the following cases.
* 1. When two [[Project]] operators are adjacent.
* 2. When two [[Project]] operators have LocalLimit/Sample/Repartition operator between them
* and the upper project consists of the same number of columns which is equal or aliasing.
* `GlobalLimit(LocalLimit)` pattern is also considered.
*/
object CollapseProject extends Rule[LogicalPlan] with AliasHelper {
def apply(plan: LogicalPlan): LogicalPlan = {
val alwaysInline = conf.getConf(SQLConf.COLLAPSE_PROJECT_ALWAYS_INLINE)
plan.transformUpWithPruning(_.containsPattern(PROJECT), ruleId) {
case p1 @ Project(_, p2: Project)
if canCollapseExpressions(p1.projectList, p2.projectList, alwaysInline) =>
p2.copy(projectList = buildCleanedProjectList(p1.projectList, p2.projectList))
case p @ Project(_, agg: Aggregate)
if canCollapseExpressions(p.projectList, agg.aggregateExpressions, alwaysInline) &&
canCollapseAggregate(p, agg) =>
agg.copy(aggregateExpressions = buildCleanedProjectList(
p.projectList, agg.aggregateExpressions))
case Project(l1, g @ GlobalLimit(_, limit @ LocalLimit(_, p2 @ Project(l2, _))))
if isRenaming(l1, l2) =>
val newProjectList = buildCleanedProjectList(l1, l2)
g.copy(child = limit.copy(child = p2.copy(projectList = newProjectList)))
case Project(l1, limit @ LocalLimit(_, p2 @ Project(l2, _))) if isRenaming(l1, l2) =>
val newProjectList = buildCleanedProjectList(l1, l2)
limit.copy(child = p2.copy(projectList = newProjectList))
case Project(l1, r @ Repartition(_, _, p @ Project(l2, _))) if isRenaming(l1, l2) =>
r.copy(child = p.copy(projectList = buildCleanedProjectList(l1, p.projectList)))
case Project(l1, s @ Sample(_, _, _, _, p2 @ Project(l2, _))) if isRenaming(l1, l2) =>
s.copy(child = p2.copy(projectList = buildCleanedProjectList(l1, p2.projectList)))
}
}
/**
* Check if we can collapse expressions safely.
*/
def canCollapseExpressions(
consumers: Seq[Expression],
producers: Seq[NamedExpression],
alwaysInline: Boolean): Boolean = {
canCollapseExpressions(consumers, getAliasMap(producers), alwaysInline)
}
/**
* Check if we can collapse expressions safely.
*/
def canCollapseExpressions(
consumers: Seq[Expression],
producerMap: Map[Attribute, Expression],
alwaysInline: Boolean = false): Boolean = {
// We can only collapse expressions if all input expressions meet the following criteria:
// - The input is deterministic.
// - The input is only consumed once OR the underlying input expression is cheap.
consumers.flatMap(collectReferences)
.groupBy(identity)
.mapValues(_.size)
.forall {
case (reference, count) =>
val producer = producerMap.getOrElse(reference, reference)
producer.deterministic && (count == 1 || alwaysInline || {
val relatedConsumers = consumers.filter(_.references.contains(reference))
val extractOnly = relatedConsumers.forall(isExtractOnly(_, reference))
shouldInline(producer, extractOnly)
})
}
}
@scala.annotation.tailrec
private def isExtractOnly(expr: Expression, ref: Attribute): Boolean = expr match {
case a: Alias => isExtractOnly(a.child, ref)
case e: ExtractValue => isExtractOnly(e.children.head, ref)
case a: Attribute => a.semanticEquals(ref)
case _ => false
}
/**
* A project cannot be collapsed with an aggregate when there are correlated scalar
* subqueries in the project list, because currently we only allow correlated subqueries
* in aggregate if they are also part of the grouping expressions. Otherwise the plan
* after subquery rewrite will not be valid.
*/
private def canCollapseAggregate(p: Project, a: Aggregate): Boolean = {
p.projectList.forall(_.collect {
case s: ScalarSubquery if s.outerAttrs.nonEmpty => s
}.isEmpty)
}
def buildCleanedProjectList(
upper: Seq[NamedExpression],
lower: Seq[NamedExpression]): Seq[NamedExpression] = {
val aliases = getAliasMap(lower)
upper.map(replaceAliasButKeepName(_, aliases))
}
/**
* Check if the given expression is cheap that we can inline it.
*/
private def shouldInline(e: Expression, extractOnlyConsumer: Boolean): Boolean = e match {
case _: Attribute | _: OuterReference => true
case _ if e.foldable => true
// PythonUDF is handled by the rule ExtractPythonUDFs
case _: PythonUDF => true
// Alias and ExtractValue are very cheap.
case _: Alias | _: ExtractValue => e.children.forall(shouldInline(_, extractOnlyConsumer))
// These collection create functions are not cheap, but we have optimizer rules that can
// optimize them out if they are only consumed by ExtractValue, so we need to allow to inline
// them to avoid perf regression. As an example:
// Project(s.a, s.b, Project(create_struct(a, b, c) as s, child))
// We should collapse these two projects and eventually get Project(a, b, child)
case _: CreateNamedStruct | _: CreateArray | _: CreateMap | _: UpdateFields =>
extractOnlyConsumer
case _ => false
}
/**
* Return all the references of the given expression without deduplication, which is different
* from `Expression.references`.
*/
private def collectReferences(e: Expression): Seq[Attribute] = e.collect {
case a: Attribute => a
}
private def isRenaming(list1: Seq[NamedExpression], list2: Seq[NamedExpression]): Boolean = {
list1.length == list2.length && list1.zip(list2).forall {
case (e1, e2) if e1.semanticEquals(e2) => true
case (Alias(a: Attribute, _), b) if a.metadata == Metadata.empty && a.name == b.name => true
case _ => false
}
}
}
/**
* Combines adjacent [[RepartitionOperation]] and [[RebalancePartitions]] operators
*/
object CollapseRepartition extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsAnyPattern(REPARTITION_OPERATION, REBALANCE_PARTITIONS), ruleId) {
// Case 1: When a Repartition has a child of Repartition or RepartitionByExpression,
// 1) When the top node does not enable the shuffle (i.e., coalesce API), but the child
// enables the shuffle. Returns the child node if the last numPartitions is bigger;
// otherwise, keep unchanged.
// 2) In the other cases, returns the top node with the child's child
case r @ Repartition(_, _, child: RepartitionOperation) => (r.shuffle, child.shuffle) match {
case (false, true) => if (r.numPartitions >= child.numPartitions) child else r
case _ => r.copy(child = child.child)
}
// Case 2: When a RepartitionByExpression has a child of global Sort, Repartition or
// RepartitionByExpression we can remove the child.
case r @ RepartitionByExpression(_, child @ (Sort(_, true, _) | _: RepartitionOperation), _) =>
r.withNewChildren(child.children)
// Case 3: When a RebalancePartitions has a child of local or global Sort, Repartition or
// RepartitionByExpression we can remove the child.
case r @ RebalancePartitions(_, child @ (_: Sort | _: RepartitionOperation)) =>
r.withNewChildren(child.children)
// Case 4: When a RebalancePartitions has a child of RebalancePartitions we can remove the
// child.
case r @ RebalancePartitions(_, child: RebalancePartitions) =>
r.withNewChildren(child.children)
}
}
/**
* Replace RepartitionByExpression numPartitions to 1 if all partition expressions are foldable
* and user not specify.
*/
object OptimizeRepartition extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(REPARTITION_OPERATION), ruleId) {
case r @ RepartitionByExpression(partitionExpressions, _, numPartitions)
if partitionExpressions.nonEmpty && partitionExpressions.forall(_.foldable) &&
numPartitions.isEmpty =>
r.copy(optNumPartitions = Some(1))
}
}
/**
* Replaces first(col) to nth_value(col, 1) for better performance.
*/
object OptimizeWindowFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveExpressionsWithPruning(
_.containsPattern(WINDOW_EXPRESSION), ruleId) {
case we @ WindowExpression(AggregateExpression(first: First, _, _, _, _),
WindowSpecDefinition(_, orderSpec, frameSpecification: SpecifiedWindowFrame))
if orderSpec.nonEmpty && frameSpecification.frameType == RowFrame &&
frameSpecification.lower == UnboundedPreceding &&
(frameSpecification.upper == UnboundedFollowing ||
frameSpecification.upper == CurrentRow) =>
we.copy(windowFunction = NthValue(first.child, Literal(1), first.ignoreNulls))
}
}
/**
* Collapse Adjacent Window Expression.
* - If the partition specs and order specs are the same and the window expression are
* independent and are of the same window function type, collapse into the parent.
*/
object CollapseWindow extends Rule[LogicalPlan] {
private def windowsCompatible(w1: Window, w2: Window): Boolean = {
w1.partitionSpec == w2.partitionSpec &&
w1.orderSpec == w2.orderSpec &&
w1.references.intersect(w2.windowOutputSet).isEmpty &&
w1.windowExpressions.nonEmpty && w2.windowExpressions.nonEmpty &&
// This assumes Window contains the same type of window expressions. This is ensured
// by ExtractWindowFunctions.
WindowFunctionType.functionType(w1.windowExpressions.head) ==
WindowFunctionType.functionType(w2.windowExpressions.head)
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsPattern(WINDOW), ruleId) {
case w1 @ Window(we1, _, _, w2 @ Window(we2, _, _, grandChild))
if windowsCompatible(w1, w2) =>
w1.copy(windowExpressions = we2 ++ we1, child = grandChild)
case w1 @ Window(we1, _, _, Project(pl, w2 @ Window(we2, _, _, grandChild)))
if windowsCompatible(w1, w2) && w1.references.subsetOf(grandChild.outputSet) =>
Project(
pl ++ w1.windowOutputSet,
w1.copy(windowExpressions = we2 ++ we1, child = grandChild))
}
}
/**
* Transpose Adjacent Window Expressions.
* - If the partition spec of the parent Window expression is compatible with the partition spec
* of the child window expression, transpose them.
*/
object TransposeWindow extends Rule[LogicalPlan] {
private def compatiblePartitions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = {
ps1.length < ps2.length && ps2.take(ps1.length).permutations.exists(ps1.zip(_).forall {
case (l, r) => l.semanticEquals(r)
})
}
private def windowsCompatible(w1: Window, w2: Window): Boolean = {
w1.references.intersect(w2.windowOutputSet).isEmpty &&
w1.expressions.forall(_.deterministic) &&
w2.expressions.forall(_.deterministic) &&
compatiblePartitions(w1.partitionSpec, w2.partitionSpec)
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsPattern(WINDOW), ruleId) {
case w1 @ Window(_, _, _, w2 @ Window(_, _, _, grandChild))
if windowsCompatible(w1, w2) =>
Project(w1.output, w2.copy(child = w1.copy(child = grandChild)))
case w1 @ Window(_, _, _, Project(pl, w2 @ Window(_, _, _, grandChild)))
if windowsCompatible(w1, w2) && w1.references.subsetOf(grandChild.outputSet) =>
Project(
pl ++ w1.windowOutputSet,
w2.copy(child = w1.copy(child = grandChild)))
}
}
/**
* Infers filters from [[Generate]], such that rows that would have been removed
* by this [[Generate]] can be removed earlier - before joins and in data sources.
*/
object InferFiltersFromGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsPattern(GENERATE)) {
case generate @ Generate(g, _, false, _, _, _) if canInferFilters(g) =>
assert(g.children.length == 1)
val input = g.children.head
// Generating extra predicates here has overheads/risks:
// - We may evaluate expensive input expressions multiple times.
// - We may infer too many constraints later.
// - The input expression may fail to be evaluated under ANSI mode. If we reorder the
// predicates and evaluate the input expression first, we may fail the query unexpectedly.
// To be safe, here we only generate extra predicates if the input is an attribute.
// Note that, foldable input is also excluded here, to avoid constant filters like
// 'size([1, 2, 3]) > 0'. These do not show up in child's constraints and then the
// idempotence will break.
if (input.isInstanceOf[Attribute]) {
// Exclude child's constraints to guarantee idempotency
val inferredFilters = ExpressionSet(
Seq(GreaterThan(Size(input), Literal(0)), IsNotNull(input))
) -- generate.child.constraints
if (inferredFilters.nonEmpty) {
generate.copy(child = Filter(inferredFilters.reduce(And), generate.child))
} else {
generate
}
} else {
generate
}
}
private def canInferFilters(g: Generator): Boolean = g match {
case _: ExplodeBase => true
case _: Inline => true
case _ => false
}
}
/**
* Generate a list of additional filters from an operator's existing constraint but remove those
* that are either already part of the operator's condition or are part of the operator's child
* constraints. These filters are currently inserted to the existing conditions in the Filter
* operators and on either side of Join operators.
*
* Note: While this optimization is applicable to a lot of types of join, it primarily benefits
* Inner and LeftSemi joins.
*/
object InferFiltersFromConstraints extends Rule[LogicalPlan]
with PredicateHelper with ConstraintHelper {
def apply(plan: LogicalPlan): LogicalPlan = {
if (conf.constraintPropagationEnabled) {
inferFilters(plan)
} else {
plan
}
}
private def inferFilters(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAnyPattern(FILTER, JOIN)) {
case filter @ Filter(condition, child) =>
val newFilters = filter.constraints --
(child.constraints ++ splitConjunctivePredicates(condition))
if (newFilters.nonEmpty) {
Filter(And(newFilters.reduce(And), condition), child)
} else {
filter
}
case join @ Join(left, right, joinType, conditionOpt, _) =>
joinType match {
// For inner join, we can infer additional filters for both sides. LeftSemi is kind of an
// inner join, it just drops the right side in the final output.
case _: InnerLike | LeftSemi =>
val allConstraints = getAllConstraints(left, right, conditionOpt)
val newLeft = inferNewFilter(left, allConstraints)
val newRight = inferNewFilter(right, allConstraints)
join.copy(left = newLeft, right = newRight)
// For right outer join, we can only infer additional filters for left side.
case RightOuter =>
val allConstraints = getAllConstraints(left, right, conditionOpt)
val newLeft = inferNewFilter(left, allConstraints)
join.copy(left = newLeft)
// For left join, we can only infer additional filters for right side.
case LeftOuter | LeftAnti =>
val allConstraints = getAllConstraints(left, right, conditionOpt)
val newRight = inferNewFilter(right, allConstraints)
join.copy(right = newRight)
case _ => join
}
}
private def getAllConstraints(
left: LogicalPlan,
right: LogicalPlan,
conditionOpt: Option[Expression]): ExpressionSet = {
val baseConstraints = left.constraints.union(right.constraints)
.union(ExpressionSet(conditionOpt.map(splitConjunctivePredicates).getOrElse(Nil)))
baseConstraints.union(inferAdditionalConstraints(baseConstraints))
}
private def inferNewFilter(plan: LogicalPlan, constraints: ExpressionSet): LogicalPlan = {
val newPredicates = constraints
.union(constructIsNotNullConstraints(constraints, plan.output))
.filter { c =>
c.references.nonEmpty && c.references.subsetOf(plan.outputSet) && c.deterministic
} -- plan.constraints
if (newPredicates.isEmpty) {
plan
} else {
Filter(newPredicates.reduce(And), plan)
}
}
}
/**
* Combines all adjacent [[Union]] operators into a single [[Union]].
*/
object CombineUnions extends Rule[LogicalPlan] {
import CollapseProject.{buildCleanedProjectList, canCollapseExpressions}
import PushProjectionThroughUnion.pushProjectionThroughUnion
def apply(plan: LogicalPlan): LogicalPlan = plan.transformDownWithPruning(
_.containsAnyPattern(UNION, DISTINCT_LIKE), ruleId) {
case u: Union => flattenUnion(u, false)
case Distinct(u: Union) => Distinct(flattenUnion(u, true))
// Only handle distinct-like 'Deduplicate', where the keys == output
case Deduplicate(keys: Seq[Attribute], u: Union) if AttributeSet(keys) == u.outputSet =>
Deduplicate(keys, flattenUnion(u, true))
}
private def flattenUnion(union: Union, flattenDistinct: Boolean): Union = {
val topByName = union.byName
val topAllowMissingCol = union.allowMissingCol
val stack = mutable.Stack[LogicalPlan](union)
val flattened = mutable.ArrayBuffer.empty[LogicalPlan]
// Note that we should only flatten the unions with same byName and allowMissingCol.
// Although we do `UnionCoercion` at analysis phase, we manually run `CombineUnions`
// in some places like `Dataset.union`. Flattening unions with different resolution
// rules (by position and by name) could cause incorrect results.
while (stack.nonEmpty) {
stack.pop() match {
case p1 @ Project(_, p2: Project)
if canCollapseExpressions(p1.projectList, p2.projectList, alwaysInline = false) =>
val newProjectList = buildCleanedProjectList(p1.projectList, p2.projectList)
stack.pushAll(Seq(p2.copy(projectList = newProjectList)))
case Distinct(Union(children, byName, allowMissingCol))
if flattenDistinct && byName == topByName && allowMissingCol == topAllowMissingCol =>
stack.pushAll(children.reverse)
// Only handle distinct-like 'Deduplicate', where the keys == output
case Deduplicate(keys: Seq[Attribute], u: Union)
if flattenDistinct && u.byName == topByName &&
u.allowMissingCol == topAllowMissingCol && AttributeSet(keys) == u.outputSet =>
stack.pushAll(u.children.reverse)
case Union(children, byName, allowMissingCol)
if byName == topByName && allowMissingCol == topAllowMissingCol =>
stack.pushAll(children.reverse)
// Push down projection through Union and then push pushed plan to Stack if
// there is a Project.
case Project(projectList, Distinct(u @ Union(children, byName, allowMissingCol)))
if projectList.forall(_.deterministic) && children.nonEmpty &&
flattenDistinct && byName == topByName && allowMissingCol == topAllowMissingCol =>
stack.pushAll(pushProjectionThroughUnion(projectList, u).reverse)
case Project(projectList, Deduplicate(keys: Seq[Attribute], u: Union))
if projectList.forall(_.deterministic) && flattenDistinct && u.byName == topByName &&
u.allowMissingCol == topAllowMissingCol && AttributeSet(keys) == u.outputSet =>
stack.pushAll(pushProjectionThroughUnion(projectList, u).reverse)
case Project(projectList, u @ Union(children, byName, allowMissingCol))
if projectList.forall(_.deterministic) && children.nonEmpty &&
byName == topByName && allowMissingCol == topAllowMissingCol =>
stack.pushAll(pushProjectionThroughUnion(projectList, u).reverse)
case child =>
flattened += child
}
}
union.copy(children = flattened.toSeq)
}
}
/**
* Combines two adjacent [[Filter]] operators into one, merging the non-redundant conditions into
* one conjunctive predicate.
*/
object CombineFilters extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(FILTER), ruleId)(applyLocally)
val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
// The query execution/optimization does not guarantee the expressions are evaluated in order.
// We only can combine them if and only if both are deterministic.
case Filter(fc, nf @ Filter(nc, grandChild)) if nc.deterministic =>
val (combineCandidates, nonDeterministic) =
splitConjunctivePredicates(fc).partition(_.deterministic)
val mergedFilter = (ExpressionSet(combineCandidates) --
ExpressionSet(splitConjunctivePredicates(nc))).reduceOption(And) match {
case Some(ac) =>
Filter(And(nc, ac), grandChild)
case None =>
nf
}
nonDeterministic.reduceOption(And).map(c => Filter(c, mergedFilter)).getOrElse(mergedFilter)
}
}
/**
* Removes Sort operations if they don't affect the final output ordering.
* Note that changes in the final output ordering may affect the file size (SPARK-32318).
* This rule handles the following cases:
* 1) if the sort order is empty or the sort order does not have any reference
* 2) if the Sort operator is a local sort and the child is already sorted
* 3) if there is another Sort operator separated by 0...n Project, Filter, Repartition or
* RepartitionByExpression, RebalancePartitions (with deterministic expressions) operators
* 4) if the Sort operator is within Join separated by 0...n Project, Filter, Repartition or
* RepartitionByExpression, RebalancePartitions (with deterministic expressions) operators only
* and the Join condition is deterministic
* 5) if the Sort operator is within GroupBy separated by 0...n Project, Filter, Repartition or
* RepartitionByExpression, RebalancePartitions (with deterministic expressions) operators only
* and the aggregate function is order irrelevant
*/
object EliminateSorts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(SORT))(applyLocally)
private val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
case s @ Sort(orders, _, child) if orders.isEmpty || orders.exists(_.child.foldable) =>
val newOrders = orders.filterNot(_.child.foldable)
if (newOrders.isEmpty) {
applyLocally.lift(child).getOrElse(child)
} else {
s.copy(order = newOrders)
}
case Sort(orders, false, child) if SortOrder.orderingSatisfies(child.outputOrdering, orders) =>
applyLocally.lift(child).getOrElse(child)
case s @ Sort(_, _, child) => s.copy(child = recursiveRemoveSort(child))
case j @ Join(originLeft, originRight, _, cond, _) if cond.forall(_.deterministic) =>
j.copy(left = recursiveRemoveSort(originLeft), right = recursiveRemoveSort(originRight))
case g @ Aggregate(_, aggs, originChild) if isOrderIrrelevantAggs(aggs) =>
g.copy(child = recursiveRemoveSort(originChild))
}
private def recursiveRemoveSort(plan: LogicalPlan): LogicalPlan = {
if (!plan.containsPattern(SORT)) {
return plan
}
plan match {
case Sort(_, _, child) => recursiveRemoveSort(child)
case other if canEliminateSort(other) =>
other.withNewChildren(other.children.map(recursiveRemoveSort))
case _ => plan
}
}
private def canEliminateSort(plan: LogicalPlan): Boolean = plan match {
case p: Project => p.projectList.forall(_.deterministic)
case f: Filter => f.condition.deterministic
case r: RepartitionByExpression => r.partitionExpressions.forall(_.deterministic)
case r: RebalancePartitions => r.partitionExpressions.forall(_.deterministic)
case _: Repartition => true
case _ => false
}
private def isOrderIrrelevantAggs(aggs: Seq[NamedExpression]): Boolean = {
def isOrderIrrelevantAggFunction(func: AggregateFunction): Boolean = func match {
case _: Min | _: Max | _: Count | _: BitAggregate => true
// Arithmetic operations for floating-point values are order-sensitive
// (they are not associative).
case _: Sum | _: Average | _: CentralMomentAgg =>
!Seq(FloatType, DoubleType).exists(_.sameType(func.children.head.dataType))
case _ => false
}
def checkValidAggregateExpression(expr: Expression): Boolean = expr match {
case _: AttributeReference => true
case ae: AggregateExpression => isOrderIrrelevantAggFunction(ae.aggregateFunction)
case _: UserDefinedExpression => false
case e => e.children.forall(checkValidAggregateExpression)
}
aggs.forall(checkValidAggregateExpression)
}
}
/**
* Removes filters that can be evaluated trivially. This can be done through the following ways:
* 1) by eliding the filter for cases where it will always evaluate to `true`.
* 2) by substituting a dummy empty relation when the filter will always evaluate to `false`.
* 3) by eliminating the always-true conditions given the constraints on the child's output.
*/
object PruneFilters extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(FILTER), ruleId) {
// If the filter condition always evaluate to true, remove the filter.
case Filter(Literal(true, BooleanType), child) => child
// If the filter condition always evaluate to null or false,
// replace the input with an empty relation.
case Filter(Literal(null, _), child) =>
LocalRelation(child.output, data = Seq.empty, isStreaming = plan.isStreaming)
case Filter(Literal(false, BooleanType), child) =>
LocalRelation(child.output, data = Seq.empty, isStreaming = plan.isStreaming)
// If any deterministic condition is guaranteed to be true given the constraints on the child's
// output, remove the condition
case f @ Filter(fc, p: LogicalPlan) =>
val (prunedPredicates, remainingPredicates) =
splitConjunctivePredicates(fc).partition { cond =>
cond.deterministic && p.constraints.contains(cond)
}
if (prunedPredicates.isEmpty) {
f
} else if (remainingPredicates.isEmpty) {
p
} else {
val newCond = remainingPredicates.reduce(And)
Filter(newCond, p)
}
}
}
/**
* The unified version for predicate pushdown of normal operators and joins.
* This rule improves performance of predicate pushdown for cascading joins such as:
* Filter-Join-Join-Join. Most predicates can be pushed down in a single pass.
*/
object PushDownPredicates extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAnyPattern(FILTER, JOIN)) {
CombineFilters.applyLocally
.orElse(PushPredicateThroughNonJoin.applyLocally)
.orElse(PushPredicateThroughJoin.applyLocally)
}
}
/**
* Pushes [[Filter]] operators through many operators iff:
* 1) the operator is deterministic
* 2) the predicate is deterministic and the operator will not change any of rows.
*
* This heuristic is valid assuming the expression evaluation cost is minimal.
*/
object PushPredicateThroughNonJoin extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally
val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
// SPARK-13473: We can't push the predicate down when the underlying projection output non-
// deterministic field(s). Non-deterministic expressions are essentially stateful. This
// implies that, for a given input row, the output are determined by the expression's initial
// state and all the input rows processed before. In another word, the order of input rows
// matters for non-deterministic expressions, while pushing down predicates changes the order.
// This also applies to Aggregate.
case Filter(condition, project @ Project(fields, grandChild))
if fields.forall(_.deterministic) && canPushThroughCondition(grandChild, condition) =>
val aliasMap = getAliasMap(project)
project.copy(child = Filter(replaceAlias(condition, aliasMap), grandChild))
case filter @ Filter(condition, aggregate: Aggregate)
if aggregate.aggregateExpressions.forall(_.deterministic)
&& aggregate.groupingExpressions.nonEmpty =>
val aliasMap = getAliasMap(aggregate)
// For each filter, expand the alias and check if the filter can be evaluated using
// attributes produced by the aggregate operator's child operator.
val (candidates, nonDeterministic) =
splitConjunctivePredicates(condition).partition(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
val replaced = replaceAlias(cond, aliasMap)
cond.references.nonEmpty && replaced.references.subsetOf(aggregate.child.outputSet)
}
val stayUp = rest ++ nonDeterministic
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val replaced = replaceAlias(pushDownPredicate, aliasMap)
val newAggregate = aggregate.copy(child = Filter(replaced, aggregate.child))
// If there is no more filter to stay up, just eliminate the filter.
// Otherwise, create "Filter(stayUp) <- Aggregate <- Filter(pushDownPredicate)".
if (stayUp.isEmpty) newAggregate else Filter(stayUp.reduce(And), newAggregate)
} else {
filter
}
// Push [[Filter]] operators through [[Window]] operators. Parts of the predicate that can be
// pushed beneath must satisfy the following conditions:
// 1. All the expressions are part of window partitioning key. The expressions can be compound.
// 2. Deterministic.
// 3. Placed before any non-deterministic predicates.
case filter @ Filter(condition, w: Window)
if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) =>
val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references))
val (candidates, nonDeterministic) =
splitConjunctivePredicates(condition).partition(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
cond.references.subsetOf(partitionAttrs)
}
val stayUp = rest ++ nonDeterministic
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val newWindow = w.copy(child = Filter(pushDownPredicate, w.child))
if (stayUp.isEmpty) newWindow else Filter(stayUp.reduce(And), newWindow)
} else {
filter
}
case filter @ Filter(condition, union: Union) =>
// Union could change the rows, so non-deterministic predicate can't be pushed down
val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition(_.deterministic)
if (pushDown.nonEmpty) {
val pushDownCond = pushDown.reduceLeft(And)
val output = union.output
val newGrandChildren = union.children.map { grandchild =>
val newCond = pushDownCond transform {
case e if output.exists(_.semanticEquals(e)) =>
grandchild.output(output.indexWhere(_.semanticEquals(e)))
}
assert(newCond.references.subsetOf(grandchild.outputSet))
Filter(newCond, grandchild)
}
val newUnion = union.withNewChildren(newGrandChildren)
if (stayUp.nonEmpty) {
Filter(stayUp.reduceLeft(And), newUnion)
} else {
newUnion
}
} else {
filter
}
case filter @ Filter(condition, watermark: EventTimeWatermark) =>
val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition { p =>
p.deterministic && !p.references.contains(watermark.eventTime)
}
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduceLeft(And)
val newWatermark = watermark.copy(child = Filter(pushDownPredicate, watermark.child))
// If there is no more filter to stay up, just eliminate the filter.
// Otherwise, create "Filter(stayUp) <- watermark <- Filter(pushDownPredicate)".
if (stayUp.isEmpty) newWatermark else Filter(stayUp.reduceLeft(And), newWatermark)
} else {
filter
}
case filter @ Filter(_, u: UnaryNode)
if canPushThrough(u) && u.expressions.forall(_.deterministic) =>
pushDownPredicate(filter, u.child) { predicate =>
u.withNewChildren(Seq(Filter(predicate, u.child)))
}
}
def canPushThrough(p: UnaryNode): Boolean = p match {
// Note that some operators (e.g. project, aggregate, union) are being handled separately
// (earlier in this rule).
case _: AppendColumns => true
case _: Distinct => true
case _: Generate => true
case _: Pivot => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: RebalancePartitions => true
case _: ScriptTransformation => true
case _: Sort => true
case _: BatchEvalPython => true
case _: ArrowEvalPython => true
case _: Expand => true
case _ => false
}
private def pushDownPredicate(
filter: Filter,
grandchild: LogicalPlan)(insertFilter: Expression => LogicalPlan): LogicalPlan = {
// Only push down the predicates that is deterministic and all the referenced attributes
// come from grandchild.
// TODO: non-deterministic predicates could be pushed through some operators that do not change
// the rows.
val (candidates, nonDeterministic) =
splitConjunctivePredicates(filter.condition).partition(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
cond.references.subsetOf(grandchild.outputSet)
}
val stayUp = rest ++ nonDeterministic
if (pushDown.nonEmpty) {
val newChild = insertFilter(pushDown.reduceLeft(And))
if (stayUp.nonEmpty) {
Filter(stayUp.reduceLeft(And), newChild)
} else {
newChild
}
} else {
filter
}
}
/**
* Check if we can safely push a filter through a projection, by making sure that predicate
* subqueries in the condition do not contain the same attributes as the plan they are moved
* into. This can happen when the plan and predicate subquery have the same source.
*/
private def canPushThroughCondition(plan: LogicalPlan, condition: Expression): Boolean = {
val attributes = plan.outputSet
val matched = condition.find {
case s: SubqueryExpression => s.plan.outputSet.intersect(attributes).nonEmpty
case _ => false
}
matched.isEmpty
}
}
/**
* Pushes down [[Filter]] operators where the `condition` can be
* evaluated using only the attributes of the left or right side of a join. Other
* [[Filter]] conditions are moved into the `condition` of the [[Join]].
*
* And also pushes down the join filter, where the `condition` can be evaluated using only the
* attributes of the left or right side of sub query when applicable.
*
* Check https://cwiki.apache.org/confluence/display/Hive/OuterJoinBehavior for more details
*/
object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper {
/**
* Splits join condition expressions or filter predicates (on a given join's output) into three
* categories based on the attributes required to evaluate them. Note that we explicitly exclude
* non-deterministic (i.e., stateful) condition expressions in canEvaluateInLeft or
* canEvaluateInRight to prevent pushing these predicates on either side of the join.
*
* @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth)
*/
private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = {
val (pushDownCandidates, nonDeterministic) = condition.partition(_.deterministic)
val (leftEvaluateCondition, rest) =
pushDownCandidates.partition(_.references.subsetOf(left.outputSet))
val (rightEvaluateCondition, commonCondition) =
rest.partition(expr => expr.references.subsetOf(right.outputSet))
(leftEvaluateCondition, rightEvaluateCondition, commonCondition ++ nonDeterministic)
}
private def canPushThrough(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | LeftSemi | RightOuter | LeftOuter | LeftAnti | ExistenceJoin(_) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally
val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
// push the where condition down into join filter
case f @ Filter(filterCondition, Join(left, right, joinType, joinCondition, hint))
if canPushThrough(joinType) =>
val (leftFilterConditions, rightFilterConditions, commonFilterCondition) =
split(splitConjunctivePredicates(filterCondition), left, right)
joinType match {
case _: InnerLike =>
// push down the single side `where` condition into respective sides
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val (newJoinConditions, others) =
commonFilterCondition.partition(canEvaluateWithinJoin)
val newJoinCond = (newJoinConditions ++ joinCondition).reduceLeftOption(And)
val join = Join(newLeft, newRight, joinType, newJoinCond, hint)
if (others.nonEmpty) {
Filter(others.reduceLeft(And), join)
} else {
join
}
case RightOuter =>
// push down the right side only `where` condition
val newLeft = left
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, RightOuter, newJoinCond, hint)
(leftFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case LeftOuter | LeftExistence(_) =>
// push down the left side only `where` condition
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, joinType, newJoinCond, hint)
(rightFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case other =>
throw new IllegalStateException(s"Unexpected join type: $other")
}
// push down the join filter into sub query scanning if applicable
case j @ Join(left, right, joinType, joinCondition, hint) if canPushThrough(joinType) =>
val (leftJoinConditions, rightJoinConditions, commonJoinCondition) =
split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right)
joinType match {
case _: InnerLike | LeftSemi =>
// push down the single side only join filter for both sides sub queries
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = commonJoinCondition.reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond, hint)
case RightOuter =>
// push down the left side only join filter for left side sub query
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = (rightJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, RightOuter, newJoinCond, hint)
case LeftOuter | LeftAnti | ExistenceJoin(_) =>
// push down the right side only join filter for right sub query
val newLeft = left
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond, hint)
case other =>
throw new IllegalStateException(s"Unexpected join type: $other")
}
}
}
/**
* This rule is applied by both normal and AQE Optimizer, and optimizes Limit operators by:
* 1. Eliminate [[Limit]]/[[GlobalLimit]] operators if it's child max row <= limit.
* 2. Combines two adjacent [[Limit]] operators into one, merging the
* expressions into one single expression.
*/
object EliminateLimits extends Rule[LogicalPlan] {
private def canEliminate(limitExpr: Expression, child: LogicalPlan): Boolean = {
limitExpr.foldable && child.maxRows.exists { _ <= limitExpr.eval().asInstanceOf[Int] }
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformDownWithPruning(
_.containsPattern(LIMIT), ruleId) {
case Limit(l, child) if canEliminate(l, child) =>
child
case GlobalLimit(l, child) if canEliminate(l, child) =>
child
case GlobalLimit(le, GlobalLimit(ne, grandChild)) =>
GlobalLimit(Literal(Least(Seq(ne, le)).eval().asInstanceOf[Int]), grandChild)
case LocalLimit(le, LocalLimit(ne, grandChild)) =>
LocalLimit(Literal(Least(Seq(ne, le)).eval().asInstanceOf[Int]), grandChild)
case Limit(le, Limit(ne, grandChild)) =>
Limit(Literal(Least(Seq(ne, le)).eval().asInstanceOf[Int]), grandChild)
}
}
/**
* Check if there any cartesian products between joins of any type in the optimized plan tree.
* Throw an error if a cartesian product is found without an explicit cross join specified.
* This rule is effectively disabled if the CROSS_JOINS_ENABLED flag is true.
*
* This rule must be run AFTER the ReorderJoin rule since the join conditions for each join must be
* collected before checking if it is a cartesian product. If you have
* SELECT * from R, S where R.r = S.s,
* the join between R and S is not a cartesian product and therefore should be allowed.
* The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule.
*
* This rule must be run AFTER the batch "LocalRelation", since a join with empty relation should
* not be a cartesian product.
*/
object CheckCartesianProducts extends Rule[LogicalPlan] with PredicateHelper {
/**
* Check if a join is a cartesian product. Returns true if
* there are no join conditions involving references from both left and right.
*/
def isCartesianProduct(join: Join): Boolean = {
val conditions = join.condition.map(splitConjunctivePredicates).getOrElse(Nil)
conditions match {
case Seq(Literal.FalseLiteral) | Seq(Literal(null, BooleanType)) => false
case _ => !conditions.map(_.references).exists(refs =>
refs.exists(join.left.outputSet.contains) && refs.exists(join.right.outputSet.contains))
}
}
def apply(plan: LogicalPlan): LogicalPlan =
if (conf.crossJoinEnabled) {
plan
} else plan.transformWithPruning(_.containsAnyPattern(INNER_LIKE_JOIN, OUTER_JOIN)) {
case j @ Join(left, right, Inner | LeftOuter | RightOuter | FullOuter, _, _)
if isCartesianProduct(j) =>
throw QueryCompilationErrors.joinConditionMissingOrTrivialError(j, left, right)
}
}
/**
* Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values.
*
* This uses the same rules for increasing the precision and scale of the output as
* [[org.apache.spark.sql.catalyst.analysis.DecimalPrecision]].
*/
object DecimalAggregates extends Rule[LogicalPlan] {
import Decimal.MAX_LONG_DIGITS
/** Maximum number of decimal digits representable precisely in a Double */
private val MAX_DOUBLE_DIGITS = 15
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAnyPattern(SUM, AVERAGE), ruleId) {
case q: LogicalPlan => q.transformExpressionsDownWithPruning(
_.containsAnyPattern(SUM, AVERAGE), ruleId) {
case we @ WindowExpression(ae @ AggregateExpression(af, _, _, _, _), _) => af match {
case Sum(e @ DecimalType.Expression(prec, scale), _) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(we.copy(windowFunction = ae.copy(aggregateFunction = Sum(UnscaledValue(e)))),
prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale), _) if prec + 4 <= MAX_DOUBLE_DIGITS =>
val newAggExpr =
we.copy(windowFunction = ae.copy(aggregateFunction = Average(UnscaledValue(e))))
Cast(
Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4), Option(conf.sessionLocalTimeZone))
case _ => we
}
case ae @ AggregateExpression(af, _, _, _, _) => af match {
case Sum(e @ DecimalType.Expression(prec, scale), _) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(ae.copy(aggregateFunction = Sum(UnscaledValue(e))), prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale), _) if prec + 4 <= MAX_DOUBLE_DIGITS =>
val newAggExpr = ae.copy(aggregateFunction = Average(UnscaledValue(e)))
Cast(
Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4), Option(conf.sessionLocalTimeZone))
case _ => ae
}
}
}
}
/**
* Converts local operations (i.e. ones that don't require data exchange) on `LocalRelation` to
* another `LocalRelation`.
*/
object ConvertToLocalRelation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(LOCAL_RELATION), ruleId) {
case Project(projectList, LocalRelation(output, data, isStreaming))
if !projectList.exists(hasUnevaluableExpr) =>
val projection = new InterpretedMutableProjection(projectList, output)
projection.initialize(0)
LocalRelation(projectList.map(_.toAttribute), data.map(projection(_).copy()), isStreaming)
case Limit(IntegerLiteral(limit), LocalRelation(output, data, isStreaming)) =>
LocalRelation(output, data.take(limit), isStreaming)
case Filter(condition, LocalRelation(output, data, isStreaming))
if !hasUnevaluableExpr(condition) =>
val predicate = Predicate.create(condition, output)
predicate.initialize(0)
LocalRelation(output, data.filter(row => predicate.eval(row)), isStreaming)
}
private def hasUnevaluableExpr(expr: Expression): Boolean = {
expr.find(e => e.isInstanceOf[Unevaluable] && !e.isInstanceOf[AttributeReference]).isDefined
}
}
/**
* Replaces logical [[Distinct]] operator with an [[Aggregate]] operator.
* {{{
* SELECT DISTINCT f1, f2 FROM t ==> SELECT f1, f2 FROM t GROUP BY f1, f2
* }}}
*/
object ReplaceDistinctWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(DISTINCT_LIKE), ruleId) {
case Distinct(child) => Aggregate(child.output, child.output, child)
}
}
/**
* Replaces logical [[Deduplicate]] operator with an [[Aggregate]] operator.
*/
object ReplaceDeduplicateWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUpWithNewOutput {
case d @ Deduplicate(keys, child) if !child.isStreaming =>
val keyExprIds = keys.map(_.exprId)
val aggCols = child.output.map { attr =>
if (keyExprIds.contains(attr.exprId)) {
attr
} else {
Alias(new First(attr).toAggregateExpression(), attr.name)()
}
}
// SPARK-22951: Physical aggregate operators distinguishes global aggregation and grouping
// aggregations by checking the number of grouping keys. The key difference here is that a
// global aggregation always returns at least one row even if there are no input rows. Here
// we append a literal when the grouping key list is empty so that the result aggregate
// operator is properly treated as a grouping aggregation.
val nonemptyKeys = if (keys.isEmpty) Literal(1) :: Nil else keys
val newAgg = Aggregate(nonemptyKeys, aggCols, child)
val attrMapping = d.output.zip(newAgg.output)
newAgg -> attrMapping
}
}
/**
* Replaces logical [[Intersect]] operator with a left-semi [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 INTERSECT SELECT b1, b2 FROM Tab2
* ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT SEMI JOIN Tab2 ON a1<=>b1 AND a2<=>b2
* }}}
*
* Note:
* 1. This rule is only applicable to INTERSECT DISTINCT. Do not use it for INTERSECT ALL.
* 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated
* join conditions will be incorrect.
*/
object ReplaceIntersectWithSemiJoin extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(INTERSECT), ruleId) {
case Intersect(left, right, false) =>
assert(left.output.size == right.output.size)
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) }
Distinct(Join(left, right, LeftSemi, joinCond.reduceLeftOption(And), JoinHint.NONE))
}
}
/**
* Replaces logical [[Except]] operator with a left-anti [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 EXCEPT SELECT b1, b2 FROM Tab2
* ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT ANTI JOIN Tab2 ON a1<=>b1 AND a2<=>b2
* }}}
*
* Note:
* 1. This rule is only applicable to EXCEPT DISTINCT. Do not use it for EXCEPT ALL.
* 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated
* join conditions will be incorrect.
*/
object ReplaceExceptWithAntiJoin extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(EXCEPT), ruleId) {
case Except(left, right, false) =>
assert(left.output.size == right.output.size)
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) }
Distinct(Join(left, right, LeftAnti, joinCond.reduceLeftOption(And), JoinHint.NONE))
}
}
/**
* Replaces logical [[Except]] operator using a combination of Union, Aggregate
* and Generate operator.
*
* Input Query :
* {{{
* SELECT c1 FROM ut1 EXCEPT ALL SELECT c1 FROM ut2
* }}}
*
* Rewritten Query:
* {{{
* SELECT c1
* FROM (
* SELECT replicate_rows(sum_val, c1)
* FROM (
* SELECT c1, sum_val
* FROM (
* SELECT c1, sum(vcol) AS sum_val
* FROM (
* SELECT 1L as vcol, c1 FROM ut1
* UNION ALL
* SELECT -1L as vcol, c1 FROM ut2
* ) AS union_all
* GROUP BY union_all.c1
* )
* WHERE sum_val > 0
* )
* )
* }}}
*/
object RewriteExceptAll extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(EXCEPT), ruleId) {
case Except(left, right, true) =>
assert(left.output.size == right.output.size)
val newColumnLeft = Alias(Literal(1L), "vcol")()
val newColumnRight = Alias(Literal(-1L), "vcol")()
val modifiedLeftPlan = Project(Seq(newColumnLeft) ++ left.output, left)
val modifiedRightPlan = Project(Seq(newColumnRight) ++ right.output, right)
val unionPlan = Union(modifiedLeftPlan, modifiedRightPlan)
val aggSumCol =
Alias(AggregateExpression(Sum(unionPlan.output.head.toAttribute), Complete, false), "sum")()
val aggOutputColumns = left.output ++ Seq(aggSumCol)
val aggregatePlan = Aggregate(left.output, aggOutputColumns, unionPlan)
val filteredAggPlan = Filter(GreaterThan(aggSumCol.toAttribute, Literal(0L)), aggregatePlan)
val genRowPlan = Generate(
ReplicateRows(Seq(aggSumCol.toAttribute) ++ left.output),
unrequiredChildIndex = Nil,
outer = false,
qualifier = None,
left.output,
filteredAggPlan
)
Project(left.output, genRowPlan)
}
}
/**
* Replaces logical [[Intersect]] operator using a combination of Union, Aggregate
* and Generate operator.
*
* Input Query :
* {{{
* SELECT c1 FROM ut1 INTERSECT ALL SELECT c1 FROM ut2
* }}}
*
* Rewritten Query:
* {{{
* SELECT c1
* FROM (
* SELECT replicate_row(min_count, c1)
* FROM (
* SELECT c1, If (vcol1_cnt > vcol2_cnt, vcol2_cnt, vcol1_cnt) AS min_count
* FROM (
* SELECT c1, count(vcol1) as vcol1_cnt, count(vcol2) as vcol2_cnt
* FROM (
* SELECT true as vcol1, null as , c1 FROM ut1
* UNION ALL
* SELECT null as vcol1, true as vcol2, c1 FROM ut2
* ) AS union_all
* GROUP BY c1
* HAVING vcol1_cnt >= 1 AND vcol2_cnt >= 1
* )
* )
* )
* }}}
*/
object RewriteIntersectAll extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(INTERSECT), ruleId) {
case Intersect(left, right, true) =>
assert(left.output.size == right.output.size)
val trueVcol1 = Alias(Literal(true), "vcol1")()
val nullVcol1 = Alias(Literal(null, BooleanType), "vcol1")()
val trueVcol2 = Alias(Literal(true), "vcol2")()
val nullVcol2 = Alias(Literal(null, BooleanType), "vcol2")()
// Add a projection on the top of left and right plans to project out
// the additional virtual columns.
val leftPlanWithAddedVirtualCols = Project(Seq(trueVcol1, nullVcol2) ++ left.output, left)
val rightPlanWithAddedVirtualCols = Project(Seq(nullVcol1, trueVcol2) ++ right.output, right)
val unionPlan = Union(leftPlanWithAddedVirtualCols, rightPlanWithAddedVirtualCols)
// Expressions to compute count and minimum of both the counts.
val vCol1AggrExpr =
Alias(AggregateExpression(Count(unionPlan.output(0)), Complete, false), "vcol1_count")()
val vCol2AggrExpr =
Alias(AggregateExpression(Count(unionPlan.output(1)), Complete, false), "vcol2_count")()
val ifExpression = Alias(If(
GreaterThan(vCol1AggrExpr.toAttribute, vCol2AggrExpr.toAttribute),
vCol2AggrExpr.toAttribute,
vCol1AggrExpr.toAttribute
), "min_count")()
val aggregatePlan = Aggregate(left.output,
Seq(vCol1AggrExpr, vCol2AggrExpr) ++ left.output, unionPlan)
val filterPlan = Filter(And(GreaterThanOrEqual(vCol1AggrExpr.toAttribute, Literal(1L)),
GreaterThanOrEqual(vCol2AggrExpr.toAttribute, Literal(1L))), aggregatePlan)
val projectMinPlan = Project(left.output ++ Seq(ifExpression), filterPlan)
// Apply the replicator to replicate rows based on min_count
val genRowPlan = Generate(
ReplicateRows(Seq(ifExpression.toAttribute) ++ left.output),
unrequiredChildIndex = Nil,
outer = false,
qualifier = None,
left.output,
projectMinPlan
)
Project(left.output, genRowPlan)
}
}
/**
* Removes literals from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveLiteralFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(AGGREGATE), ruleId) {
case a @ Aggregate(grouping, _, _) if grouping.nonEmpty =>
val newGrouping = grouping.filter(!_.foldable)
if (newGrouping.nonEmpty) {
a.copy(groupingExpressions = newGrouping)
} else {
// All grouping expressions are literals. We should not drop them all, because this can
// change the return semantics when the input of the Aggregate is empty (SPARK-17114). We
// instead replace this by single, easy to hash/sort, literal expression.
a.copy(groupingExpressions = Seq(Literal(0, IntegerType)))
}
}
}
/**
* Prunes unnecessary fields from a [[Generate]] if it is under a project which does not refer
* any generated attributes, .e.g., count-like aggregation on an exploded array.
*/
object GenerateOptimization extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformDownWithPruning(
_.containsAllPatterns(PROJECT, GENERATE), ruleId) {
case p @ Project(_, g: Generate) if p.references.isEmpty
&& g.generator.isInstanceOf[ExplodeBase] =>
g.generator.children.head.dataType match {
case ArrayType(StructType(fields), containsNull) if fields.length > 1 =>
// Try to pick up smallest field
val sortedFields = fields.zipWithIndex.sortBy(f => f._1.dataType.defaultSize)
val extractor = GetArrayStructFields(g.generator.children.head, sortedFields(0)._1,
sortedFields(0)._2, fields.length, containsNull || sortedFields(0)._1.nullable)
val rewrittenG = g.transformExpressions {
case e: ExplodeBase =>
e.withNewChildren(Seq(extractor))
}
// As we change the child of the generator, its output data type must be updated.
val updatedGeneratorOutput = rewrittenG.generatorOutput
.zip(rewrittenG.generator.elementSchema.toAttributes)
.map { case (oldAttr, newAttr) =>
newAttr.withExprId(oldAttr.exprId).withName(oldAttr.name)
}
assert(updatedGeneratorOutput.length == rewrittenG.generatorOutput.length,
"Updated generator output must have the same length " +
"with original generator output.")
val updatedGenerate = rewrittenG.copy(generatorOutput = updatedGeneratorOutput)
p.withNewChildren(Seq(updatedGenerate))
case _ => p
}
}
}
/**
* Removes repetition from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(AGGREGATE), ruleId) {
case a @ Aggregate(grouping, _, _) if grouping.size > 1 =>
val newGrouping = ExpressionSet(grouping).toSeq
if (newGrouping.size == grouping.size) {
a
} else {
a.copy(groupingExpressions = newGrouping)
}
}
}
/**
* Replaces GlobalLimit 0 and LocalLimit 0 nodes (subtree) with empty Local Relation, as they don't
* return any rows.
*/
object OptimizeLimitZero extends Rule[LogicalPlan] {
// returns empty Local Relation corresponding to given plan
private def empty(plan: LogicalPlan) =
LocalRelation(plan.output, data = Seq.empty, isStreaming = plan.isStreaming)
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsAllPatterns(LIMIT, LITERAL)) {
// Nodes below GlobalLimit or LocalLimit can be pruned if the limit value is zero (0).
// Any subtree in the logical plan that has GlobalLimit 0 or LocalLimit 0 as its root is
// semantically equivalent to an empty relation.
//
// In such cases, the effects of Limit 0 can be propagated through the Logical Plan by replacing
// the (Global/Local) Limit subtree with an empty LocalRelation, thereby pruning the subtree
// below and triggering other optimization rules of PropagateEmptyRelation to propagate the
// changes up the Logical Plan.
//
// Replace Global Limit 0 nodes with empty Local Relation
case gl @ GlobalLimit(IntegerLiteral(0), _) =>
empty(gl)
// Note: For all SQL queries, if a LocalLimit 0 node exists in the Logical Plan, then a
// GlobalLimit 0 node would also exist. Thus, the above case would be sufficient to handle
// almost all cases. However, if a user explicitly creates a Logical Plan with LocalLimit 0 node
// then the following rule will handle that case as well.
//
// Replace Local Limit 0 nodes with empty Local Relation
case ll @ LocalLimit(IntegerLiteral(0), _) =>
empty(ll)
}
}
| apache-2.0 |
mcobrien/netty | buffer/src/main/java/io/netty/buffer/PooledUnsafeDirectByteBuf.java | 11458 | /*
* Copyright 2013 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import io.netty.util.Recycler;
import io.netty.util.internal.PlatformDependent;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
final class PooledUnsafeDirectByteBuf extends PooledByteBuf<ByteBuffer> {
private static final Recycler<PooledUnsafeDirectByteBuf> RECYCLER = new Recycler<PooledUnsafeDirectByteBuf>() {
@Override
protected PooledUnsafeDirectByteBuf newObject(Handle<PooledUnsafeDirectByteBuf> handle) {
return new PooledUnsafeDirectByteBuf(handle, 0);
}
};
static PooledUnsafeDirectByteBuf newInstance(int maxCapacity) {
PooledUnsafeDirectByteBuf buf = RECYCLER.get();
buf.reuse(maxCapacity);
return buf;
}
private long memoryAddress;
private PooledUnsafeDirectByteBuf(Recycler.Handle<PooledUnsafeDirectByteBuf> recyclerHandle, int maxCapacity) {
super(recyclerHandle, maxCapacity);
}
@Override
void init(PoolChunk<ByteBuffer> chunk, long handle, int offset, int length, int maxLength,
PoolThreadCache cache) {
super.init(chunk, handle, offset, length, maxLength, cache);
initMemoryAddress();
}
@Override
void initUnpooled(PoolChunk<ByteBuffer> chunk, int length) {
super.initUnpooled(chunk, length);
initMemoryAddress();
}
private void initMemoryAddress() {
memoryAddress = PlatformDependent.directBufferAddress(memory) + offset;
}
@Override
protected ByteBuffer newInternalNioBuffer(ByteBuffer memory) {
return memory.duplicate();
}
@Override
public boolean isDirect() {
return true;
}
@Override
protected byte _getByte(int index) {
return UnsafeByteBufUtil.getByte(addr(index));
}
@Override
protected short _getShort(int index) {
return UnsafeByteBufUtil.getShort(addr(index));
}
@Override
protected short _getShortLE(int index) {
return UnsafeByteBufUtil.getShortLE(addr(index));
}
@Override
protected int _getUnsignedMedium(int index) {
return UnsafeByteBufUtil.getUnsignedMedium(addr(index));
}
@Override
protected int _getUnsignedMediumLE(int index) {
return UnsafeByteBufUtil.getUnsignedMediumLE(addr(index));
}
@Override
protected int _getInt(int index) {
return UnsafeByteBufUtil.getInt(addr(index));
}
@Override
protected int _getIntLE(int index) {
return UnsafeByteBufUtil.getIntLE(addr(index));
}
@Override
protected long _getLong(int index) {
return UnsafeByteBufUtil.getLong(addr(index));
}
@Override
protected long _getLongLE(int index) {
return UnsafeByteBufUtil.getLongLE(addr(index));
}
@Override
public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) {
UnsafeByteBufUtil.getBytes(this, addr(index), index, dst, dstIndex, length);
return this;
}
@Override
public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) {
UnsafeByteBufUtil.getBytes(this, addr(index), index, dst, dstIndex, length);
return this;
}
@Override
public ByteBuf getBytes(int index, ByteBuffer dst) {
UnsafeByteBufUtil.getBytes(this, addr(index), index, dst);
return this;
}
@Override
public ByteBuf readBytes(ByteBuffer dst) {
int length = dst.remaining();
checkReadableBytes(length);
getBytes(readerIndex, dst);
readerIndex += length;
return this;
}
@Override
public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException {
UnsafeByteBufUtil.getBytes(this, addr(index), index, out, length);
return this;
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
return getBytes(index, out, length, false);
}
private int getBytes(int index, GatheringByteChannel out, int length, boolean internal) throws IOException {
checkIndex(index, length);
if (length == 0) {
return 0;
}
ByteBuffer tmpBuf;
if (internal) {
tmpBuf = internalNioBuffer();
} else {
tmpBuf = memory.duplicate();
}
index = idx(index);
tmpBuf.clear().position(index).limit(index + length);
return out.write(tmpBuf);
}
@Override
public int getBytes(int index, FileChannel out, long position, int length) throws IOException {
return getBytes(index, out, position, length, false);
}
private int getBytes(int index, FileChannel out, long position, int length, boolean internal) throws IOException {
checkIndex(index, length);
if (length == 0) {
return 0;
}
ByteBuffer tmpBuf = internal ? internalNioBuffer() : memory.duplicate();
index = idx(index);
tmpBuf.clear().position(index).limit(index + length);
return out.write(tmpBuf, position);
}
@Override
public int readBytes(GatheringByteChannel out, int length)
throws IOException {
checkReadableBytes(length);
int readBytes = getBytes(readerIndex, out, length, true);
readerIndex += readBytes;
return readBytes;
}
@Override
public int readBytes(FileChannel out, long position, int length)
throws IOException {
checkReadableBytes(length);
int readBytes = getBytes(readerIndex, out, position, length, true);
readerIndex += readBytes;
return readBytes;
}
@Override
protected void _setByte(int index, int value) {
UnsafeByteBufUtil.setByte(addr(index), (byte) value);
}
@Override
protected void _setShort(int index, int value) {
UnsafeByteBufUtil.setShort(addr(index), value);
}
@Override
protected void _setShortLE(int index, int value) {
UnsafeByteBufUtil.setShortLE(addr(index), value);
}
@Override
protected void _setMedium(int index, int value) {
UnsafeByteBufUtil.setMedium(addr(index), value);
}
@Override
protected void _setMediumLE(int index, int value) {
UnsafeByteBufUtil.setMediumLE(addr(index), value);
}
@Override
protected void _setInt(int index, int value) {
UnsafeByteBufUtil.setInt(addr(index), value);
}
@Override
protected void _setIntLE(int index, int value) {
UnsafeByteBufUtil.setIntLE(addr(index), value);
}
@Override
protected void _setLong(int index, long value) {
UnsafeByteBufUtil.setLong(addr(index), value);
}
@Override
protected void _setLongLE(int index, long value) {
UnsafeByteBufUtil.setLongLE(addr(index), value);
}
@Override
public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) {
UnsafeByteBufUtil.setBytes(this, addr(index), index, src, srcIndex, length);
return this;
}
@Override
public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) {
UnsafeByteBufUtil.setBytes(this, addr(index), index, src, srcIndex, length);
return this;
}
@Override
public ByteBuf setBytes(int index, ByteBuffer src) {
UnsafeByteBufUtil.setBytes(this, addr(index), index, src);
return this;
}
@Override
public int setBytes(int index, InputStream in, int length) throws IOException {
return UnsafeByteBufUtil.setBytes(this, addr(index), index, in, length);
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException {
checkIndex(index, length);
ByteBuffer tmpBuf = internalNioBuffer();
index = idx(index);
tmpBuf.clear().position(index).limit(index + length);
try {
return in.read(tmpBuf);
} catch (ClosedChannelException ignored) {
return -1;
}
}
@Override
public int setBytes(int index, FileChannel in, long position, int length) throws IOException {
checkIndex(index, length);
ByteBuffer tmpBuf = internalNioBuffer();
index = idx(index);
tmpBuf.clear().position(index).limit(index + length);
try {
return in.read(tmpBuf, position);
} catch (ClosedChannelException ignored) {
return -1;
}
}
@Override
public ByteBuf copy(int index, int length) {
return UnsafeByteBufUtil.copy(this, addr(index), index, length);
}
@Override
public int nioBufferCount() {
return 1;
}
@Override
public ByteBuffer[] nioBuffers(int index, int length) {
return new ByteBuffer[] { nioBuffer(index, length) };
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
checkIndex(index, length);
index = idx(index);
return ((ByteBuffer) memory.duplicate().position(index).limit(index + length)).slice();
}
@Override
public ByteBuffer internalNioBuffer(int index, int length) {
checkIndex(index, length);
index = idx(index);
return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length);
}
@Override
public boolean hasArray() {
return false;
}
@Override
public byte[] array() {
throw new UnsupportedOperationException("direct buffer");
}
@Override
public int arrayOffset() {
throw new UnsupportedOperationException("direct buffer");
}
@Override
public boolean hasMemoryAddress() {
return true;
}
@Override
public long memoryAddress() {
ensureAccessible();
return memoryAddress;
}
private long addr(int index) {
return memoryAddress + index;
}
@Override
protected SwappedByteBuf newSwappedByteBuf() {
if (PlatformDependent.isUnaligned()) {
// Only use if unaligned access is supported otherwise there is no gain.
return new UnsafeDirectSwappedByteBuf(this);
}
return super.newSwappedByteBuf();
}
@Override
public ByteBuf setZero(int index, int length) {
UnsafeByteBufUtil.setZero(this, addr(index), index, length);
return this;
}
@Override
public ByteBuf writeZero(int length) {
ensureWritable(length);
int wIndex = writerIndex;
setZero(wIndex, length);
writerIndex = wIndex + length;
return this;
}
}
| apache-2.0 |
Kast0rTr0y/jackrabbit-oak | oak-run/run_writeacl.sh | 2200 | #!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TITLE=ConcurrentWriteACLTest
BENCH="ConcurrentWriteACLTest"
ADMIN="true"
RUNTIME=10
RANDOM_USER="true"
FIXS="Oak-Mongo" # Jackrabbit"
THREADS="1,2,4,8,10,15,20,50"
PROFILE=false
NUM_ITEMS=10
LOG=$TITLE"_$(date +'%Y%m%d_%H%M%S').csv"
echo "Benchmarks: $BENCH" > $LOG
echo "Fixtures: $FIXS" >> $LOG
echo "Admin User: $ADMIN" >> $LOG
echo "Runtime: $RUNTIME" >> $LOG
echo "Num Items: $NUM_ITEMS" >> $LOG
echo "Concurrency: $THREADS" >> $LOG
echo "Random User: $RANDOM_USER" >> $LOG
echo "Profiling: $PROFILE" >> $LOG
echo "--------------------------------------" >> $LOG
for bm in $BENCH
do
for user in $ADMIN
do
# we start new VMs for each fixture to minimize memory impacts between them
for fix in $FIXS
do
echo "Executing benchmarks as admin: $user on $fix" | tee -a $LOG
echo "-----------------------------------------------------------" | tee -a $LOG
rm -rf target/Jackrabbit-* target/Oak-Tar-*
cmd="java -Xmx2048m -Dprofile=$PROFILE -Druntime=$RUNTIME -Dwarmup=5 -jar target/oak-run-*-SNAPSHOT.jar benchmark --itemsToRead $NUM_ITEMS --csvFile $LOG --concurrency $THREADS --runAsAdmin $user --report false --randomUser $RANDOM_USER $bm $fix"
echo $cmd
$cmd
done
done
done
echo "-----------------------------------------"
echo "Benchmark completed. see $LOG for details:"
cat $LOG
| apache-2.0 |
keycloak/keycloak | testsuite/integration-arquillian/tests/base/src/test/java/org/keycloak/testsuite/broker/KcSamlBrokerTest.java | 21270 | package org.keycloak.testsuite.broker;
import org.keycloak.admin.client.resource.IdentityProviderResource;
import org.keycloak.admin.client.resource.RealmResource;
import org.keycloak.admin.client.resource.UserResource;
import com.google.common.collect.ImmutableMap;
import org.keycloak.broker.saml.mappers.AttributeToRoleMapper;
import org.keycloak.broker.saml.mappers.UserAttributeMapper;
import org.keycloak.dom.saml.v2.assertion.AssertionType;
import org.keycloak.dom.saml.v2.assertion.AttributeStatementType;
import org.keycloak.dom.saml.v2.assertion.AttributeType;
import org.keycloak.dom.saml.v2.assertion.StatementAbstractType;
import org.keycloak.dom.saml.v2.protocol.AuthnRequestType;
import org.keycloak.dom.saml.v2.protocol.ResponseType;
import org.keycloak.models.IdentityProviderMapperModel;
import org.keycloak.models.IdentityProviderMapperSyncMode;
import org.keycloak.representations.idm.IdentityProviderMapperRepresentation;
import org.keycloak.representations.idm.RoleRepresentation;
import org.keycloak.representations.idm.UserRepresentation;
import org.keycloak.saml.common.constants.JBossSAMLURIConstants;
import org.keycloak.saml.common.exceptions.ConfigurationException;
import org.keycloak.saml.common.exceptions.ParsingException;
import org.keycloak.saml.common.exceptions.ProcessingException;
import org.keycloak.saml.processing.api.saml.v2.request.SAML2Request;
import org.keycloak.saml.processing.core.parsers.saml.protocol.SAMLProtocolQNames;
import org.keycloak.saml.processing.core.saml.v2.common.SAMLDocumentHolder;
import org.keycloak.testsuite.saml.AbstractSamlTest;
import org.keycloak.testsuite.util.SamlClient;
import org.keycloak.testsuite.util.SamlClient.Binding;
import org.keycloak.testsuite.util.SamlClientBuilder;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.ws.rs.core.Response;
import org.hamcrest.Matchers;
import org.junit.Assert;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.not;
import static org.junit.Assert.assertThat;
import static org.keycloak.testsuite.saml.RoleMapperTest.ROLE_ATTRIBUTE_NAME;
import static org.keycloak.testsuite.util.Matchers.isSamlResponse;
import static org.keycloak.testsuite.util.Matchers.statusCodeIsHC;
import static org.keycloak.testsuite.util.SamlStreams.assertionsUnencrypted;
import static org.keycloak.testsuite.util.SamlStreams.attributeStatements;
import static org.keycloak.testsuite.util.SamlStreams.attributesUnecrypted;
import static org.keycloak.testsuite.broker.BrokerTestTools.getConsumerRoot;
import static org.keycloak.testsuite.broker.BrokerTestTools.getProviderRoot;
/**
* Final class as it's not intended to be overriden. Feel free to remove "final" if you really know what you are doing.
*/
public final class KcSamlBrokerTest extends AbstractAdvancedBrokerTest {
@Override
protected BrokerConfiguration getBrokerConfiguration() {
return KcSamlBrokerConfiguration.INSTANCE;
}
private static final String EMPTY_ATTRIBUTE_NAME = "empty.attribute.name";
@Override
protected Iterable<IdentityProviderMapperRepresentation> createIdentityProviderMappers(IdentityProviderMapperSyncMode syncMode) {
IdentityProviderMapperRepresentation attrMapper1 = new IdentityProviderMapperRepresentation();
attrMapper1.setName("manager-role-mapper");
attrMapper1.setIdentityProviderMapper(AttributeToRoleMapper.PROVIDER_ID);
attrMapper1.setConfig(ImmutableMap.<String,String>builder()
.put(IdentityProviderMapperModel.SYNC_MODE, syncMode.toString())
.put(UserAttributeMapper.ATTRIBUTE_NAME, "Role")
.put(ATTRIBUTE_VALUE, ROLE_MANAGER)
.put("role", ROLE_MANAGER)
.build());
IdentityProviderMapperRepresentation attrMapper2 = new IdentityProviderMapperRepresentation();
attrMapper2.setName("user-role-mapper");
attrMapper2.setIdentityProviderMapper(AttributeToRoleMapper.PROVIDER_ID);
attrMapper2.setConfig(ImmutableMap.<String,String>builder()
.put(IdentityProviderMapperModel.SYNC_MODE, syncMode.toString())
.put(UserAttributeMapper.ATTRIBUTE_NAME, "Role")
.put(ATTRIBUTE_VALUE, ROLE_USER)
.put("role", ROLE_USER)
.build());
IdentityProviderMapperRepresentation attrMapper3 = new IdentityProviderMapperRepresentation();
attrMapper3.setName("friendly-mapper");
attrMapper3.setIdentityProviderMapper(AttributeToRoleMapper.PROVIDER_ID);
attrMapper3.setConfig(ImmutableMap.<String,String>builder()
.put(IdentityProviderMapperModel.SYNC_MODE, syncMode.toString())
.put(UserAttributeMapper.ATTRIBUTE_FRIENDLY_NAME, KcSamlBrokerConfiguration.ATTRIBUTE_TO_MAP_FRIENDLY_NAME)
.put(ATTRIBUTE_VALUE, ROLE_FRIENDLY_MANAGER)
.put("role", ROLE_FRIENDLY_MANAGER)
.build());
IdentityProviderMapperRepresentation attrMapper4 = new IdentityProviderMapperRepresentation();
attrMapper4.setName("user-role-dot-guide-mapper");
attrMapper4.setIdentityProviderMapper(AttributeToRoleMapper.PROVIDER_ID);
attrMapper4.setConfig(ImmutableMap.<String,String>builder()
.put(IdentityProviderMapperModel.SYNC_MODE, syncMode.toString())
.put(UserAttributeMapper.ATTRIBUTE_NAME, "Role")
.put(ATTRIBUTE_VALUE, ROLE_USER_DOT_GUIDE)
.put("role", ROLE_USER_DOT_GUIDE)
.build());
IdentityProviderMapperRepresentation attrMapper5 = new IdentityProviderMapperRepresentation();
attrMapper5.setName("empty-attribute-to-role-mapper");
attrMapper5.setIdentityProviderMapper(AttributeToRoleMapper.PROVIDER_ID);
attrMapper5.setConfig(ImmutableMap.<String,String>builder()
.put(IdentityProviderMapperModel.SYNC_MODE, syncMode.toString())
.put(UserAttributeMapper.ATTRIBUTE_NAME, EMPTY_ATTRIBUTE_NAME)
.put(ATTRIBUTE_VALUE, "")
.put("role", EMPTY_ATTRIBUTE_ROLE)
.build());
return Arrays.asList(attrMapper1, attrMapper2, attrMapper3, attrMapper4, attrMapper5 );
}
protected void createAdditionalMapperWithCustomSyncMode(IdentityProviderMapperSyncMode syncMode) {
IdentityProviderMapperRepresentation friendlyManagerMapper = new IdentityProviderMapperRepresentation();
friendlyManagerMapper.setName("friendly-manager-role-mapper");
friendlyManagerMapper.setIdentityProviderMapper(AttributeToRoleMapper.PROVIDER_ID);
friendlyManagerMapper.setConfig(ImmutableMap.<String,String>builder()
.put(IdentityProviderMapperModel.SYNC_MODE, syncMode.toString())
.put(UserAttributeMapper.ATTRIBUTE_NAME, "Role")
.put(ATTRIBUTE_VALUE, ROLE_FRIENDLY_MANAGER)
.put("role", ROLE_FRIENDLY_MANAGER)
.build());
friendlyManagerMapper.setIdentityProviderAlias(bc.getIDPAlias());
RealmResource realm = adminClient.realm(bc.consumerRealmName());
IdentityProviderResource idpResource = realm.identityProviders().get(bc.getIDPAlias());
idpResource.addMapper(friendlyManagerMapper).close();
}
@Test
public void mapperUpdatesRolesOnEveryLogInForLegacyMode() {
createRolesForRealm(bc.providerRealmName());
createRolesForRealm(bc.consumerRealmName());
createRoleMappersForConsumerRealm(IdentityProviderMapperSyncMode.FORCE);
RoleRepresentation managerRole = adminClient.realm(bc.providerRealmName()).roles().get(ROLE_MANAGER).toRepresentation();
RoleRepresentation friendlyManagerRole = adminClient.realm(bc.providerRealmName()).roles().get(ROLE_FRIENDLY_MANAGER).toRepresentation();
RoleRepresentation userRole = adminClient.realm(bc.providerRealmName()).roles().get(ROLE_USER).toRepresentation();
UserResource userResource = adminClient.realm(bc.providerRealmName()).users().get(userId);
userResource.roles().realmLevel().add(Collections.singletonList(managerRole));
logInAsUserInIDPForFirstTime();
Set<String> currentRoles = userResource.roles().realmLevel().listAll().stream()
.map(RoleRepresentation::getName)
.collect(Collectors.toSet());
assertThat(currentRoles, hasItems(ROLE_MANAGER));
assertThat(currentRoles, not(hasItems(ROLE_USER, ROLE_FRIENDLY_MANAGER)));
logoutFromRealm(getConsumerRoot(), bc.consumerRealmName());
userResource.roles().realmLevel().add(Collections.singletonList(userRole));
userResource.roles().realmLevel().add(Collections.singletonList(friendlyManagerRole));
logInAsUserInIDP();
currentRoles = userResource.roles().realmLevel().listAll().stream()
.map(RoleRepresentation::getName)
.collect(Collectors.toSet());
assertThat(currentRoles, hasItems(ROLE_MANAGER, ROLE_USER, ROLE_FRIENDLY_MANAGER));
logoutFromRealm(getConsumerRoot(), bc.consumerRealmName());
userResource.roles().realmLevel().remove(Collections.singletonList(friendlyManagerRole));
logInAsUserInIDP();
currentRoles = userResource.roles().realmLevel().listAll().stream()
.map(RoleRepresentation::getName)
.collect(Collectors.toSet());
assertThat(currentRoles, hasItems(ROLE_MANAGER, ROLE_USER));
assertThat(currentRoles, not(hasItems(ROLE_FRIENDLY_MANAGER)));
logoutFromRealm(getProviderRoot(), bc.providerRealmName());
logoutFromRealm(getConsumerRoot(), bc.consumerRealmName());
}
@Test
public void roleWithDots() {
createRolesForRealm(bc.providerRealmName());
createRolesForRealm(bc.consumerRealmName());
createRoleMappersForConsumerRealm();
RoleRepresentation managerRole = adminClient.realm(bc.providerRealmName()).roles().get(ROLE_MANAGER).toRepresentation();
RoleRepresentation userRole = adminClient.realm(bc.providerRealmName()).roles().get(ROLE_USER).toRepresentation();
RoleRepresentation userRoleDotGuide = adminClient.realm(bc.providerRealmName()).roles().get(ROLE_USER_DOT_GUIDE).toRepresentation();
UserResource userResourceProv = adminClient.realm(bc.providerRealmName()).users().get(userId);
userResourceProv.roles().realmLevel().add(Collections.singletonList(managerRole));
logInAsUserInIDPForFirstTime();
String consUserId = adminClient.realm(bc.consumerRealmName()).users().search(bc.getUserLogin()).iterator().next().getId();
UserResource userResourceCons = adminClient.realm(bc.consumerRealmName()).users().get(consUserId);
Set<String> currentRoles = userResourceCons.roles().realmLevel().listAll().stream()
.map(RoleRepresentation::getName)
.collect(Collectors.toSet());
assertThat(currentRoles, hasItems(ROLE_MANAGER));
assertThat(currentRoles, not(hasItems(ROLE_USER, ROLE_FRIENDLY_MANAGER, ROLE_USER_DOT_GUIDE)));
logoutFromRealm(getConsumerRoot(), bc.consumerRealmName());
UserRepresentation urp = userResourceProv.toRepresentation();
urp.setAttributes(new HashMap<>());
urp.getAttributes().put(KcSamlBrokerConfiguration.ATTRIBUTE_TO_MAP_FRIENDLY_NAME, Collections.singletonList(ROLE_FRIENDLY_MANAGER));
userResourceProv.update(urp);
userResourceProv.roles().realmLevel().add(Collections.singletonList(userRole));
userResourceProv.roles().realmLevel().add(Collections.singletonList(userRoleDotGuide));
logInAsUserInIDP();
currentRoles = userResourceCons.roles().realmLevel().listAll().stream()
.map(RoleRepresentation::getName)
.collect(Collectors.toSet());
assertThat(currentRoles, hasItems(ROLE_MANAGER, ROLE_USER, ROLE_USER_DOT_GUIDE, ROLE_FRIENDLY_MANAGER));
logoutFromRealm(getConsumerRoot(), bc.consumerRealmName());
urp = userResourceProv.toRepresentation();
urp.setAttributes(new HashMap<>());
userResourceProv.update(urp);
logInAsUserInIDP();
currentRoles = userResourceCons.roles().realmLevel().listAll().stream()
.map(RoleRepresentation::getName)
.collect(Collectors.toSet());
assertThat(currentRoles, hasItems(ROLE_MANAGER, ROLE_USER, ROLE_USER_DOT_GUIDE));
assertThat(currentRoles, not(hasItems(ROLE_FRIENDLY_MANAGER)));
logoutFromRealm(getProviderRoot(), bc.providerRealmName());
logoutFromRealm(getConsumerRoot(), bc.consumerRealmName());
}
// KEYCLOAK-6106
@Test
public void loginClientWithDotsInName() throws Exception {
AuthnRequestType loginRep = SamlClient.createLoginRequestDocument(AbstractSamlTest.SAML_CLIENT_ID_SALES_POST + ".dot/ted", getConsumerRoot() + "/sales-post/saml", null);
Document doc = SAML2Request.convert(loginRep);
SAMLDocumentHolder samlResponse = new SamlClientBuilder()
.authnRequest(getConsumerSamlEndpoint(bc.consumerRealmName()), doc, Binding.POST).build() // Request to consumer IdP
.login().idp(bc.getIDPAlias()).build()
.processSamlResponse(Binding.POST) // AuthnRequest to producer IdP
.targetAttributeSamlRequest()
.build()
.login().user(bc.getUserLogin(), bc.getUserPassword()).build()
.processSamlResponse(Binding.POST) // Response from producer IdP
.build()
// first-broker flow
.updateProfile().firstName("a").lastName("b").email(bc.getUserEmail()).username(bc.getUserLogin()).build()
.followOneRedirect()
.getSamlResponse(Binding.POST); // Response from consumer IdP
Assert.assertThat(samlResponse, Matchers.notNullValue());
Assert.assertThat(samlResponse.getSamlObject(), isSamlResponse(JBossSAMLURIConstants.STATUS_SUCCESS));
}
@Test
public void emptyAttributeToRoleMapperTest() throws ParsingException, ConfigurationException, ProcessingException {
createRolesForRealm(bc.consumerRealmName());
createRoleMappersForConsumerRealm();
AuthnRequestType loginRep = SamlClient.createLoginRequestDocument(AbstractSamlTest.SAML_CLIENT_ID_SALES_POST + ".dot/ted", getConsumerRoot() + "/sales-post/saml", null);
Document doc = SAML2Request.convert(loginRep);
SAMLDocumentHolder samlResponse = new SamlClientBuilder()
.authnRequest(getConsumerSamlEndpoint(bc.consumerRealmName()), doc, Binding.POST).build() // Request to consumer IdP
.login().idp(bc.getIDPAlias()).build()
.processSamlResponse(Binding.POST) // AuthnRequest to producer IdP
.targetAttributeSamlRequest()
.build()
.login().user(bc.getUserLogin(), bc.getUserPassword()).build()
.processSamlResponse(Binding.POST) // Response from producer IdP
.transformObject(ob -> {
assertThat(ob, org.keycloak.testsuite.util.Matchers.isSamlResponse(JBossSAMLURIConstants.STATUS_SUCCESS));
ResponseType resp = (ResponseType) ob;
Set<StatementAbstractType> statements = resp.getAssertions().get(0).getAssertion().getStatements();
AttributeStatementType attributeType = (AttributeStatementType) statements.stream()
.filter(statement -> statement instanceof AttributeStatementType)
.findFirst().orElse(new AttributeStatementType());
AttributeType attr = new AttributeType(EMPTY_ATTRIBUTE_NAME);
attr.addAttributeValue(null);
attributeType.addAttribute(new AttributeStatementType.ASTChoiceType(attr));
resp.getAssertions().get(0).getAssertion().addStatement(attributeType);
return ob;
})
.build()
// first-broker flow
.updateProfile().firstName("a").lastName("b").email(bc.getUserEmail()).username(bc.getUserLogin()).build()
.followOneRedirect()
.getSamlResponse(Binding.POST); // Response from consumer IdP
Assert.assertThat(samlResponse, Matchers.notNullValue());
Assert.assertThat(samlResponse.getSamlObject(), isSamlResponse(JBossSAMLURIConstants.STATUS_SUCCESS));
Stream<AssertionType> assertionTypeStream = assertionsUnencrypted(samlResponse.getSamlObject());
Stream<AttributeType> attributeStatementTypeStream = attributesUnecrypted(attributeStatements(assertionTypeStream));
Set<String> attributeValues = attributeStatementTypeStream
.filter(a -> a.getName().equals(ROLE_ATTRIBUTE_NAME))
.flatMap(a -> a.getAttributeValue().stream())
.map(Object::toString)
.collect(Collectors.toSet());
assertThat(attributeValues, hasItems(EMPTY_ATTRIBUTE_ROLE));
}
// KEYCLOAK-17935
@Test
public void loginInResponseToMismatch() throws Exception {
AuthnRequestType loginRep = SamlClient.createLoginRequestDocument(AbstractSamlTest.SAML_CLIENT_ID_SALES_POST + ".dot/ted", getConsumerRoot() + "/sales-post/saml", null);
Document doc = SAML2Request.convert(loginRep);
new SamlClientBuilder()
.authnRequest(getConsumerSamlEndpoint(bc.consumerRealmName()), doc, Binding.POST).build() // Request to consumer IdP
.login().idp(bc.getIDPAlias()).build()
.processSamlResponse(Binding.POST) // AuthnRequest to producer IdP
.targetAttributeSamlRequest()
.build()
.login().user(bc.getUserLogin(), bc.getUserPassword()).build()
.processSamlResponse(Binding.POST) // Response from producer IdP
.transformDocument(this::tamperInResponseTo)
.build()
.execute(hr -> assertThat(hr, statusCodeIsHC(Response.Status.BAD_REQUEST))); // Response from consumer IdP
}
// KEYCLOAK-17935
@Test
public void loginInResponseToMissing() throws Exception {
AuthnRequestType loginRep = SamlClient.createLoginRequestDocument(AbstractSamlTest.SAML_CLIENT_ID_SALES_POST + ".dot/ted", getConsumerRoot() + "/sales-post/saml", null);
Document doc = SAML2Request.convert(loginRep);
new SamlClientBuilder()
.authnRequest(getConsumerSamlEndpoint(bc.consumerRealmName()), doc, Binding.POST).build() // Request to consumer IdP
.login().idp(bc.getIDPAlias()).build()
.processSamlResponse(Binding.POST) // AuthnRequest to producer IdP
.targetAttributeSamlRequest()
.build()
.login().user(bc.getUserLogin(), bc.getUserPassword()).build()
.processSamlResponse(Binding.POST) // Response from producer IdP
.transformDocument(this::removeInResponseTo)
.build()
.execute(hr -> assertThat(hr, statusCodeIsHC(Response.Status.BAD_REQUEST))); // Response from consumer IdP
}
// KEYCLOAK-17935
@Test
public void loginInResponseToEmpty() throws Exception {
AuthnRequestType loginRep = SamlClient.createLoginRequestDocument(AbstractSamlTest.SAML_CLIENT_ID_SALES_POST + ".dot/ted", getConsumerRoot() + "/sales-post/saml", null);
Document doc = SAML2Request.convert(loginRep);
new SamlClientBuilder()
.authnRequest(getConsumerSamlEndpoint(bc.consumerRealmName()), doc, Binding.POST).build() // Request to consumer IdP
.login().idp(bc.getIDPAlias()).build()
.processSamlResponse(Binding.POST) // AuthnRequest to producer IdP
.targetAttributeSamlRequest()
.build()
.login().user(bc.getUserLogin(), bc.getUserPassword()).build()
.processSamlResponse(Binding.POST) // Response from producer IdP
.transformDocument(this::clearInResponseTo)
.build()
.execute(hr -> assertThat(hr, statusCodeIsHC(Response.Status.BAD_REQUEST))); // Response from consumer IdP
}
private Document tamperInResponseTo(Document orig) {
Element rootElement = orig.getDocumentElement();
rootElement.setAttribute(SAMLProtocolQNames.ATTR_IN_RESPONSE_TO.getQName().getLocalPart(), "TAMPERED_" + rootElement.getAttribute("InResponseTo"));
return orig;
}
private Document removeInResponseTo(Document orig) {
Element rootElement = orig.getDocumentElement();
rootElement.removeAttribute(SAMLProtocolQNames.ATTR_IN_RESPONSE_TO.getQName().getLocalPart());
return orig;
}
private Document clearInResponseTo(Document orig) {
Element rootElement = orig.getDocumentElement();
rootElement.setAttribute(SAMLProtocolQNames.ATTR_IN_RESPONSE_TO.getQName().getLocalPart(), "");
return orig;
}
}
| apache-2.0 |
JoeChien23/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java | 38058 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher.Event;
import org.apache.zookeeper.ZKUtil;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.AsyncCallback.*;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.KeeperException.Code;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
*
* This class implements a simple library to perform leader election on top of
* Apache Zookeeper. Using Zookeeper as a coordination service, leader election
* can be performed by atomically creating an ephemeral lock file (znode) on
* Zookeeper. The service instance that successfully creates the znode becomes
* active and the rest become standbys. <br/>
* This election mechanism is only efficient for small number of election
* candidates (order of 10's) because contention on single znode by a large
* number of candidates can result in Zookeeper overload. <br/>
* The elector does not guarantee fencing (protection of shared resources) among
* service instances. After it has notified an instance about becoming a leader,
* then that instance must ensure that it meets the service consistency
* requirements. If it cannot do so, then it is recommended to quit the
* election. The application implements the {@link ActiveStandbyElectorCallback}
* to interact with the elector
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ActiveStandbyElector implements StatCallback, StringCallback {
/**
* Callback interface to interact with the ActiveStandbyElector object. <br/>
* The application will be notified with a callback only on state changes
* (i.e. there will never be successive calls to becomeActive without an
* intermediate call to enterNeutralMode). <br/>
* The callbacks will be running on Zookeeper client library threads. The
* application should return from these callbacks quickly so as not to impede
* Zookeeper client library performance and notifications. The app will
* typically remember the state change and return from the callback. It will
* then proceed with implementing actions around that state change. It is
* possible to be called back again while these actions are in flight and the
* app should handle this scenario.
*/
public interface ActiveStandbyElectorCallback {
/**
* This method is called when the app becomes the active leader.
* If the service fails to become active, it should throw
* ServiceFailedException. This will cause the elector to
* sleep for a short period, then re-join the election.
*
* Callback implementations are expected to manage their own
* timeouts (e.g. when making an RPC to a remote node).
*/
void becomeActive() throws ServiceFailedException;
/**
* This method is called when the app becomes a standby
*/
void becomeStandby();
/**
* If the elector gets disconnected from Zookeeper and does not know about
* the lock state, then it will notify the service via the enterNeutralMode
* interface. The service may choose to ignore this or stop doing state
* changing operations. Upon reconnection, the elector verifies the leader
* status and calls back on the becomeActive and becomeStandby app
* interfaces. <br/>
* Zookeeper disconnects can happen due to network issues or loss of
* Zookeeper quorum. Thus enterNeutralMode can be used to guard against
* split-brain issues. In such situations it might be prudent to call
* becomeStandby too. However, such state change operations might be
* expensive and enterNeutralMode can help guard against doing that for
* transient issues.
*/
void enterNeutralMode();
/**
* If there is any fatal error (e.g. wrong ACL's, unexpected Zookeeper
* errors or Zookeeper persistent unavailability) then notifyFatalError is
* called to notify the app about it.
*/
void notifyFatalError(String errorMessage);
/**
* If an old active has failed, rather than exited gracefully, then
* the new active may need to take some fencing actions against it
* before proceeding with failover.
*
* @param oldActiveData the application data provided by the prior active
*/
void fenceOldActive(byte[] oldActiveData);
}
/**
* Name of the lock znode used by the library. Protected for access in test
* classes
*/
@VisibleForTesting
protected static final String LOCK_FILENAME = "ActiveStandbyElectorLock";
@VisibleForTesting
protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
private static enum ConnectionState {
DISCONNECTED, CONNECTED, TERMINATED
};
static enum State {
INIT, ACTIVE, STANDBY, NEUTRAL
};
private State state = State.INIT;
private int createRetryCount = 0;
private int statRetryCount = 0;
private ZooKeeper zkClient;
private WatcherWithClientRef watcher;
private ConnectionState zkConnectionState = ConnectionState.TERMINATED;
private final ActiveStandbyElectorCallback appClient;
private final String zkHostPort;
private final int zkSessionTimeout;
private final List<ACL> zkAcl;
private final List<ZKAuthInfo> zkAuthInfo;
private byte[] appData;
private final String zkLockFilePath;
private final String zkBreadCrumbPath;
private final String znodeWorkingDir;
private final int maxRetryNum;
private Lock sessionReestablishLockForTests = new ReentrantLock();
private boolean wantToBeInElection;
/**
* Create a new ActiveStandbyElector object <br/>
* The elector is created by providing to it the Zookeeper configuration, the
* parent znode under which to create the znode and a reference to the
* callback interface. <br/>
* The parent znode name must be the same for all service instances and
* different across services. <br/>
* After the leader has been lost, a new leader will be elected after the
* session timeout expires. Hence, the app must set this parameter based on
* its needs for failure response time. The session timeout must be greater
* than the Zookeeper disconnect timeout and is recommended to be 3X that
* value to enable Zookeeper to retry transient disconnections. Setting a very
* short session timeout may result in frequent transitions between active and
* standby states during issues like network outages/GS pauses.
*
* @param zookeeperHostPorts
* ZooKeeper hostPort for all ZooKeeper servers
* @param zookeeperSessionTimeout
* ZooKeeper session timeout
* @param parentZnodeName
* znode under which to create the lock
* @param acl
* ZooKeeper ACL's
* @param authInfo a list of authentication credentials to add to the
* ZK connection
* @param app
* reference to callback interface object
* @throws IOException
* @throws HadoopIllegalArgumentException
*/
public ActiveStandbyElector(String zookeeperHostPorts,
int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
List<ZKAuthInfo> authInfo,
ActiveStandbyElectorCallback app, int maxRetryNum) throws IOException,
HadoopIllegalArgumentException, KeeperException {
if (app == null || acl == null || parentZnodeName == null
|| zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
throw new HadoopIllegalArgumentException("Invalid argument");
}
zkHostPort = zookeeperHostPorts;
zkSessionTimeout = zookeeperSessionTimeout;
zkAcl = acl;
zkAuthInfo = authInfo;
appClient = app;
znodeWorkingDir = parentZnodeName;
zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME;
zkBreadCrumbPath = znodeWorkingDir + "/" + BREADCRUMB_FILENAME;
this.maxRetryNum = maxRetryNum;
// createConnection for future API calls
createConnection();
}
/**
* To participate in election, the app will call joinElection. The result will
* be notified by a callback on either the becomeActive or becomeStandby app
* interfaces. <br/>
* After this the elector will automatically monitor the leader status and
* perform re-election if necessary<br/>
* The app could potentially start off in standby mode and ignore the
* becomeStandby call.
*
* @param data
* to be set by the app. non-null data must be set.
* @throws HadoopIllegalArgumentException
* if valid data is not supplied
*/
public synchronized void joinElection(byte[] data)
throws HadoopIllegalArgumentException {
if (data == null) {
throw new HadoopIllegalArgumentException("data cannot be null");
}
if (wantToBeInElection) {
LOG.info("Already in election. Not re-connecting.");
return;
}
appData = new byte[data.length];
System.arraycopy(data, 0, appData, 0, data.length);
LOG.debug("Attempting active election for " + this);
joinElectionInternal();
}
/**
* @return true if the configured parent znode exists
*/
public synchronized boolean parentZNodeExists()
throws IOException, InterruptedException {
Preconditions.checkState(zkClient != null);
try {
return zkClient.exists(znodeWorkingDir, false) != null;
} catch (KeeperException e) {
throw new IOException("Couldn't determine existence of znode '" +
znodeWorkingDir + "'", e);
}
}
/**
* Utility function to ensure that the configured base znode exists.
* This recursively creates the znode as well as all of its parents.
*/
public synchronized void ensureParentZNode()
throws IOException, InterruptedException {
Preconditions.checkState(!wantToBeInElection,
"ensureParentZNode() may not be called while in the election");
String pathParts[] = znodeWorkingDir.split("/");
Preconditions.checkArgument(pathParts.length >= 1 &&
pathParts[0].isEmpty(),
"Invalid path: %s", znodeWorkingDir);
StringBuilder sb = new StringBuilder();
for (int i = 1; i < pathParts.length; i++) {
sb.append("/").append(pathParts[i]);
String prefixPath = sb.toString();
LOG.debug("Ensuring existence of " + prefixPath);
try {
createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT);
} catch (KeeperException e) {
if (isNodeExists(e.code())) {
// This is OK - just ensuring existence.
continue;
} else {
throw new IOException("Couldn't create " + prefixPath, e);
}
}
}
LOG.info("Successfully created " + znodeWorkingDir + " in ZK.");
}
/**
* Clear all of the state held within the parent ZNode.
* This recursively deletes everything within the znode as well as the
* parent znode itself. It should only be used when it's certain that
* no electors are currently participating in the election.
*/
public synchronized void clearParentZNode()
throws IOException, InterruptedException {
Preconditions.checkState(!wantToBeInElection,
"clearParentZNode() may not be called while in the election");
try {
LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK...");
zkDoWithRetries(new ZKAction<Void>() {
@Override
public Void run() throws KeeperException, InterruptedException {
ZKUtil.deleteRecursive(zkClient, znodeWorkingDir);
return null;
}
});
} catch (KeeperException e) {
throw new IOException("Couldn't clear parent znode " + znodeWorkingDir,
e);
}
LOG.info("Successfully deleted " + znodeWorkingDir + " from ZK.");
}
/**
* Any service instance can drop out of the election by calling quitElection.
* <br/>
* This will lose any leader status, if held, and stop monitoring of the lock
* node. <br/>
* If the instance wants to participate in election again, then it needs to
* call joinElection(). <br/>
* This allows service instances to take themselves out of rotation for known
* impending unavailable states (e.g. long GC pause or software upgrade).
*
* @param needFence true if the underlying daemon may need to be fenced
* if a failover occurs due to dropping out of the election.
*/
public synchronized void quitElection(boolean needFence) {
LOG.info("Yielding from election");
if (!needFence && state == State.ACTIVE) {
// If active is gracefully going back to standby mode, remove
// our permanent znode so no one fences us.
tryDeleteOwnBreadCrumbNode();
}
reset();
wantToBeInElection = false;
}
/**
* Exception thrown when there is no active leader
*/
public static class ActiveNotFoundException extends Exception {
private static final long serialVersionUID = 3505396722342846462L;
}
/**
* get data set by the active leader
*
* @return data set by the active instance
* @throws ActiveNotFoundException
* when there is no active leader
* @throws KeeperException
* other zookeeper operation errors
* @throws InterruptedException
* @throws IOException
* when ZooKeeper connection could not be established
*/
public synchronized byte[] getActiveData() throws ActiveNotFoundException,
KeeperException, InterruptedException, IOException {
try {
if (zkClient == null) {
createConnection();
}
Stat stat = new Stat();
return getDataWithRetries(zkLockFilePath, false, stat);
} catch(KeeperException e) {
Code code = e.code();
if (isNodeDoesNotExist(code)) {
// handle the commonly expected cases that make sense for us
throw new ActiveNotFoundException();
} else {
throw e;
}
}
}
/**
* interface implementation of Zookeeper callback for create
*/
@Override
public synchronized void processResult(int rc, String path, Object ctx,
String name) {
if (isStaleClient(ctx)) return;
LOG.debug("CreateNode result: " + rc + " for path: " + path
+ " connectionState: " + zkConnectionState +
" for " + this);
Code code = Code.get(rc);
if (isSuccess(code)) {
// we successfully created the znode. we are the leader. start monitoring
if (becomeActive()) {
monitorActiveStatus();
} else {
reJoinElectionAfterFailureToBecomeActive();
}
return;
}
if (isNodeExists(code)) {
if (createRetryCount == 0) {
// znode exists and we did not retry the operation. so a different
// instance has created it. become standby and monitor lock.
becomeStandby();
}
// if we had retried then the znode could have been created by our first
// attempt to the server (that we lost) and this node exists response is
// for the second attempt. verify this case via ephemeral node owner. this
// will happen on the callback for monitoring the lock.
monitorActiveStatus();
return;
}
String errorMessage = "Received create error from Zookeeper. code:"
+ code.toString() + " for path " + path;
LOG.debug(errorMessage);
if (shouldRetry(code)) {
if (createRetryCount < maxRetryNum) {
LOG.debug("Retrying createNode createRetryCount: " + createRetryCount);
++createRetryCount;
createLockNodeAsync();
return;
}
errorMessage = errorMessage
+ ". Not retrying further znode create connection errors.";
} else if (isSessionExpired(code)) {
// This isn't fatal - the client Watcher will re-join the election
LOG.warn("Lock acquisition failed because session was lost");
return;
}
fatalError(errorMessage);
}
/**
* interface implementation of Zookeeper callback for monitor (exists)
*/
@Override
public synchronized void processResult(int rc, String path, Object ctx,
Stat stat) {
if (isStaleClient(ctx)) return;
assert wantToBeInElection :
"Got a StatNode result after quitting election";
LOG.debug("StatNode result: " + rc + " for path: " + path
+ " connectionState: " + zkConnectionState + " for " + this);
Code code = Code.get(rc);
if (isSuccess(code)) {
// the following owner check completes verification in case the lock znode
// creation was retried
if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
// we own the lock znode. so we are the leader
if (!becomeActive()) {
reJoinElectionAfterFailureToBecomeActive();
}
} else {
// we dont own the lock znode. so we are a standby.
becomeStandby();
}
// the watch set by us will notify about changes
return;
}
if (isNodeDoesNotExist(code)) {
// the lock znode disappeared before we started monitoring it
enterNeutralMode();
joinElectionInternal();
return;
}
String errorMessage = "Received stat error from Zookeeper. code:"
+ code.toString();
LOG.debug(errorMessage);
if (shouldRetry(code)) {
if (statRetryCount < maxRetryNum) {
++statRetryCount;
monitorLockNodeAsync();
return;
}
errorMessage = errorMessage
+ ". Not retrying further znode monitoring connection errors.";
} else if (isSessionExpired(code)) {
// This isn't fatal - the client Watcher will re-join the election
LOG.warn("Lock monitoring failed because session was lost");
return;
}
fatalError(errorMessage);
}
/**
* We failed to become active. Re-join the election, but
* sleep for a few seconds after terminating our existing
* session, so that other nodes have a chance to become active.
* The failure to become active is already logged inside
* becomeActive().
*/
private void reJoinElectionAfterFailureToBecomeActive() {
reJoinElection(SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE);
}
/**
* interface implementation of Zookeeper watch events (connection and node),
* proxied by {@link WatcherWithClientRef}.
*/
synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
Event.EventType eventType = event.getType();
if (isStaleClient(zk)) return;
LOG.debug("Watcher event type: " + eventType + " with state:"
+ event.getState() + " for path:" + event.getPath()
+ " connectionState: " + zkConnectionState
+ " for " + this);
if (eventType == Event.EventType.None) {
// the connection state has changed
switch (event.getState()) {
case SyncConnected:
LOG.info("Session connected.");
// if the listener was asked to move to safe state then it needs to
// be undone
ConnectionState prevConnectionState = zkConnectionState;
zkConnectionState = ConnectionState.CONNECTED;
if (prevConnectionState == ConnectionState.DISCONNECTED &&
wantToBeInElection) {
monitorActiveStatus();
}
break;
case Disconnected:
LOG.info("Session disconnected. Entering neutral mode...");
// ask the app to move to safe state because zookeeper connection
// is not active and we dont know our state
zkConnectionState = ConnectionState.DISCONNECTED;
enterNeutralMode();
break;
case Expired:
// the connection got terminated because of session timeout
// call listener to reconnect
LOG.info("Session expired. Entering neutral mode and rejoining...");
enterNeutralMode();
reJoinElection(0);
break;
case SaslAuthenticated:
LOG.info("Successfully authenticated to ZooKeeper using SASL.");
break;
default:
fatalError("Unexpected Zookeeper watch event state: "
+ event.getState());
break;
}
return;
}
// a watch on lock path in zookeeper has fired. so something has changed on
// the lock. ideally we should check that the path is the same as the lock
// path but trusting zookeeper for now
String path = event.getPath();
if (path != null) {
switch (eventType) {
case NodeDeleted:
if (state == State.ACTIVE) {
enterNeutralMode();
}
joinElectionInternal();
break;
case NodeDataChanged:
monitorActiveStatus();
break;
default:
LOG.debug("Unexpected node event: " + eventType + " for path: " + path);
monitorActiveStatus();
}
return;
}
// some unexpected error has occurred
fatalError("Unexpected watch error from Zookeeper");
}
/**
* Get a new zookeeper client instance. protected so that test class can
* inherit and pass in a mock object for zookeeper
*
* @return new zookeeper client instance
* @throws IOException
* @throws KeeperException zookeeper connectionloss exception
*/
protected synchronized ZooKeeper getNewZooKeeper() throws IOException,
KeeperException {
// Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
// may trigger the Connected event immediately. So, if we register the
// watcher after constructing ZooKeeper, we may miss that event. Instead,
// we construct the watcher first, and have it block any events it receives
// before we can set its ZooKeeper reference.
watcher = new WatcherWithClientRef();
ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
watcher.setZooKeeperRef(zk);
// Wait for the asynchronous success/failure. This may throw an exception
// if we don't connect within the session timeout.
watcher.waitForZKConnectionEvent(zkSessionTimeout);
for (ZKAuthInfo auth : zkAuthInfo) {
zk.addAuthInfo(auth.getScheme(), auth.getAuth());
}
return zk;
}
private void fatalError(String errorMessage) {
LOG.fatal(errorMessage);
reset();
appClient.notifyFatalError(errorMessage);
}
private void monitorActiveStatus() {
assert wantToBeInElection;
LOG.debug("Monitoring active leader for " + this);
statRetryCount = 0;
monitorLockNodeAsync();
}
private void joinElectionInternal() {
Preconditions.checkState(appData != null,
"trying to join election without any app data");
if (zkClient == null) {
if (!reEstablishSession()) {
fatalError("Failed to reEstablish connection with ZooKeeper");
return;
}
}
createRetryCount = 0;
wantToBeInElection = true;
createLockNodeAsync();
}
private void reJoinElection(int sleepTime) {
LOG.info("Trying to re-establish ZK session");
// Some of the test cases rely on expiring the ZK sessions and
// ensuring that the other node takes over. But, there's a race
// where the original lease holder could reconnect faster than the other
// thread manages to take the lock itself. This lock allows the
// tests to block the reconnection. It's a shame that this leaked
// into non-test code, but the lock is only acquired here so will never
// be contended.
sessionReestablishLockForTests.lock();
try {
terminateConnection();
sleepFor(sleepTime);
// Should not join election even before the SERVICE is reported
// as HEALTHY from ZKFC monitoring.
if (appData != null) {
joinElectionInternal();
} else {
LOG.info("Not joining election since service has not yet been " +
"reported as healthy.");
}
} finally {
sessionReestablishLockForTests.unlock();
}
}
/**
* Sleep for the given number of milliseconds.
* This is non-static, and separated out, so that unit tests
* can override the behavior not to sleep.
*/
@VisibleForTesting
protected void sleepFor(int sleepMs) {
if (sleepMs > 0) {
try {
Thread.sleep(sleepMs);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
@VisibleForTesting
void preventSessionReestablishmentForTests() {
sessionReestablishLockForTests.lock();
}
@VisibleForTesting
void allowSessionReestablishmentForTests() {
sessionReestablishLockForTests.unlock();
}
@VisibleForTesting
synchronized long getZKSessionIdForTests() {
if (zkClient != null) {
return zkClient.getSessionId();
} else {
return -1;
}
}
@VisibleForTesting
synchronized State getStateForTests() {
return state;
}
private boolean reEstablishSession() {
int connectionRetryCount = 0;
boolean success = false;
while(!success && connectionRetryCount < maxRetryNum) {
LOG.debug("Establishing zookeeper connection for " + this);
try {
createConnection();
success = true;
} catch(IOException e) {
LOG.warn(e);
sleepFor(5000);
} catch(KeeperException e) {
LOG.warn(e);
sleepFor(5000);
}
++connectionRetryCount;
}
return success;
}
private void createConnection() throws IOException, KeeperException {
if (zkClient != null) {
try {
zkClient.close();
} catch (InterruptedException e) {
throw new IOException("Interrupted while closing ZK",
e);
}
zkClient = null;
watcher = null;
}
zkClient = getNewZooKeeper();
LOG.debug("Created new connection for " + this);
}
@InterfaceAudience.Private
public synchronized void terminateConnection() {
if (zkClient == null) {
return;
}
LOG.debug("Terminating ZK connection for " + this);
ZooKeeper tempZk = zkClient;
zkClient = null;
watcher = null;
try {
tempZk.close();
} catch(InterruptedException e) {
LOG.warn(e);
}
zkConnectionState = ConnectionState.TERMINATED;
wantToBeInElection = false;
}
private void reset() {
state = State.INIT;
terminateConnection();
}
private boolean becomeActive() {
assert wantToBeInElection;
if (state == State.ACTIVE) {
// already active
return true;
}
try {
Stat oldBreadcrumbStat = fenceOldActive();
writeBreadCrumbNode(oldBreadcrumbStat);
LOG.debug("Becoming active for " + this);
appClient.becomeActive();
state = State.ACTIVE;
return true;
} catch (Exception e) {
LOG.warn("Exception handling the winning of election", e);
// Caller will handle quitting and rejoining the election.
return false;
}
}
/**
* Write the "ActiveBreadCrumb" node, indicating that this node may need
* to be fenced on failover.
* @param oldBreadcrumbStat
*/
private void writeBreadCrumbNode(Stat oldBreadcrumbStat)
throws KeeperException, InterruptedException {
Preconditions.checkState(appData != null, "no appdata");
LOG.info("Writing znode " + zkBreadCrumbPath +
" to indicate that the local node is the most recent active...");
if (oldBreadcrumbStat == null) {
// No previous active, just create the node
createWithRetries(zkBreadCrumbPath, appData, zkAcl,
CreateMode.PERSISTENT);
} else {
// There was a previous active, update the node
setDataWithRetries(zkBreadCrumbPath, appData, oldBreadcrumbStat.getVersion());
}
}
/**
* Try to delete the "ActiveBreadCrumb" node when gracefully giving up
* active status.
* If this fails, it will simply warn, since the graceful release behavior
* is only an optimization.
*/
private void tryDeleteOwnBreadCrumbNode() {
assert state == State.ACTIVE;
LOG.info("Deleting bread-crumb of active node...");
// Sanity check the data. This shouldn't be strictly necessary,
// but better to play it safe.
Stat stat = new Stat();
byte[] data = null;
try {
data = zkClient.getData(zkBreadCrumbPath, false, stat);
if (!Arrays.equals(data, appData)) {
throw new IllegalStateException(
"We thought we were active, but in fact " +
"the active znode had the wrong data: " +
StringUtils.byteToHexString(data) + " (stat=" + stat + ")");
}
deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
} catch (Exception e) {
LOG.warn("Unable to delete our own bread-crumb of being active at " +
zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
"Expecting to be fenced by the next active.");
}
}
/**
* If there is a breadcrumb node indicating that another node may need
* fencing, try to fence that node.
* @return the Stat of the breadcrumb node that was read, or null
* if no breadcrumb node existed
*/
private Stat fenceOldActive() throws InterruptedException, KeeperException {
final Stat stat = new Stat();
byte[] data;
LOG.info("Checking for any old active which needs to be fenced...");
try {
data = zkDoWithRetries(new ZKAction<byte[]>() {
@Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(zkBreadCrumbPath, false, stat);
}
});
} catch (KeeperException ke) {
if (isNodeDoesNotExist(ke.code())) {
LOG.info("No old node to fence");
return null;
}
// If we failed to read for any other reason, then likely we lost
// our session, or we don't have permissions, etc. In any case,
// we probably shouldn't become active, and failing the whole
// thing is the best bet.
throw ke;
}
LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
if (Arrays.equals(data, appData)) {
LOG.info("But old node has our own data, so don't need to fence it.");
} else {
appClient.fenceOldActive(data);
}
return stat;
}
private void becomeStandby() {
if (state != State.STANDBY) {
LOG.debug("Becoming standby for " + this);
state = State.STANDBY;
appClient.becomeStandby();
}
}
private void enterNeutralMode() {
if (state != State.NEUTRAL) {
LOG.debug("Entering neutral mode for " + this);
state = State.NEUTRAL;
appClient.enterNeutralMode();
}
}
private void createLockNodeAsync() {
zkClient.create(zkLockFilePath, appData, zkAcl, CreateMode.EPHEMERAL,
this, zkClient);
}
private void monitorLockNodeAsync() {
zkClient.exists(zkLockFilePath,
watcher, this,
zkClient);
}
private String createWithRetries(final String path, final byte[] data,
final List<ACL> acl, final CreateMode mode)
throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction<String>() {
@Override
public String run() throws KeeperException, InterruptedException {
return zkClient.create(path, data, acl, mode);
}
});
}
private byte[] getDataWithRetries(final String path, final boolean watch,
final Stat stat) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction<byte[]>() {
@Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(path, watch, stat);
}
});
}
private Stat setDataWithRetries(final String path, final byte[] data,
final int version) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction<Stat>() {
@Override
public Stat run() throws KeeperException, InterruptedException {
return zkClient.setData(path, data, version);
}
});
}
private void deleteWithRetries(final String path, final int version)
throws KeeperException, InterruptedException {
zkDoWithRetries(new ZKAction<Void>() {
@Override
public Void run() throws KeeperException, InterruptedException {
zkClient.delete(path, version);
return null;
}
});
}
private <T> T zkDoWithRetries(ZKAction<T> action) throws KeeperException,
InterruptedException {
int retry = 0;
while (true) {
try {
return action.run();
} catch (KeeperException ke) {
if (shouldRetry(ke.code()) && ++retry < maxRetryNum) {
continue;
}
throw ke;
}
}
}
private interface ZKAction<T> {
T run() throws KeeperException, InterruptedException;
}
/**
* The callbacks and watchers pass a reference to the ZK client
* which made the original call. We don't want to take action
* based on any callbacks from prior clients after we quit
* the election.
* @param ctx the ZK client passed into the watcher
* @return true if it matches the current client
*/
private synchronized boolean isStaleClient(Object ctx) {
Preconditions.checkNotNull(ctx);
if (zkClient != (ZooKeeper)ctx) {
LOG.warn("Ignoring stale result from old client with sessionId " +
String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
return true;
}
return false;
}
/**
* Watcher implementation which keeps a reference around to the
* original ZK connection, and passes it back along with any
* events.
*/
private final class WatcherWithClientRef implements Watcher {
private ZooKeeper zk;
/**
* Latch fired whenever any event arrives. This is used in order
* to wait for the Connected event when the client is first created.
*/
private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
/**
* Latch used to wait until the reference to ZooKeeper is set.
*/
private CountDownLatch hasSetZooKeeper = new CountDownLatch(1);
/**
* Waits for the next event from ZooKeeper to arrive.
*
* @param connectionTimeoutMs zookeeper connection timeout in milliseconds
* @throws KeeperException if the connection attempt times out. This will
* be a ZooKeeper ConnectionLoss exception code.
* @throws IOException if interrupted while connecting to ZooKeeper
*/
private void waitForZKConnectionEvent(int connectionTimeoutMs)
throws KeeperException, IOException {
try {
if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
+ connectionTimeoutMs + " milliseconds");
zk.close();
throw KeeperException.create(Code.CONNECTIONLOSS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException(
"Interrupted when connecting to zookeeper server", e);
}
}
private void setZooKeeperRef(ZooKeeper zk) {
Preconditions.checkState(this.zk == null,
"zk already set -- must be set exactly once");
this.zk = zk;
hasSetZooKeeper.countDown();
}
@Override
public void process(WatchedEvent event) {
hasReceivedEvent.countDown();
try {
if (!hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS)) {
LOG.debug("Event received with stale zk");
}
ActiveStandbyElector.this.processWatchEvent(
zk, event);
} catch (Throwable t) {
fatalError(
"Failed to process watcher event " + event + ": " +
StringUtils.stringifyException(t));
}
}
}
private static boolean isSuccess(Code code) {
return (code == Code.OK);
}
private static boolean isNodeExists(Code code) {
return (code == Code.NODEEXISTS);
}
private static boolean isNodeDoesNotExist(Code code) {
return (code == Code.NONODE);
}
private static boolean isSessionExpired(Code code) {
return (code == Code.SESSIONEXPIRED);
}
private static boolean shouldRetry(Code code) {
return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT;
}
@Override
public String toString() {
return "elector id=" + System.identityHashCode(this) +
" appData=" +
((appData == null) ? "null" : StringUtils.byteToHexString(appData)) +
" cb=" + appClient;
}
}
| apache-2.0 |
humblec/external-storage | vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go | 9290 | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
)
var (
replaceLong = templates.LongDesc(i18n.T(`
Replace a resource by filename or stdin.
JSON and YAML formats are accepted. If replacing an existing resource, the
complete resource spec must be provided. This can be obtained by
$ kubectl get TYPE NAME -o yaml
Please refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.`))
replaceExample = templates.Examples(i18n.T(`
# Replace a pod using the data in pod.json.
kubectl replace -f ./pod.json
# Replace a pod based on the JSON passed into stdin.
cat pod.json | kubectl replace -f -
# Update a single-container pod's image version (tag) to v4
kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -
# Force replace, delete and then re-create the resource
kubectl replace --force -f ./pod.json`))
)
func NewCmdReplace(f cmdutil.Factory, out io.Writer) *cobra.Command {
options := &resource.FilenameOptions{}
cmd := &cobra.Command{
Use: "replace -f FILENAME",
DisableFlagsInUseLine: true,
Short: i18n.T("Replace a resource by filename or stdin"),
Long: replaceLong,
Example: replaceExample,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(cmdutil.ValidateOutputArgs(cmd))
err := RunReplace(f, out, cmd, args, options)
cmdutil.CheckErr(err)
},
}
usage := "to use to replace the resource."
cmdutil.AddFilenameOptionFlags(cmd, options, usage)
cmd.MarkFlagRequired("filename")
cmd.Flags().Bool("force", false, "Delete and re-create the specified resource")
cmd.Flags().Bool("cascade", false, "Only relevant during a force replace. If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController).")
cmd.Flags().Int("grace-period", -1, "Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative.")
cmd.Flags().Duration("timeout", 0, "Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object. Any other values should contain a corresponding time unit (e.g. 1s, 2m, 3h).")
cmdutil.AddValidateFlags(cmd)
cmdutil.AddOutputFlagsForMutation(cmd)
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddRecordFlag(cmd)
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
func RunReplace(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *resource.FilenameOptions) error {
schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"))
if err != nil {
return err
}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
force := cmdutil.GetFlagBool(cmd, "force")
if cmdutil.IsFilenameSliceEmpty(options.Filenames) {
return cmdutil.UsageErrorf(cmd, "Must specify --filename to replace")
}
shortOutput := cmdutil.GetFlagString(cmd, "output") == "name"
if force {
return forceReplace(f, out, cmd, args, shortOutput, options)
}
if cmdutil.GetFlagInt(cmd, "grace-period") >= 0 {
return fmt.Errorf("--grace-period must have --force specified")
}
if cmdutil.GetFlagDuration(cmd, "timeout") != 0 {
return fmt.Errorf("--timeout must have --force specified")
}
r := f.NewBuilder().
Unstructured().
Schema(schema).
ContinueOnError().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, options).
Flatten().
Do()
if err := r.Err(); err != nil {
return err
}
return r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, f.JSONEncoder()); err != nil {
return cmdutil.AddSourceToErr("replacing", info.Source, err)
}
if cmdutil.ShouldRecord(cmd, info) {
if err := cmdutil.RecordChangeCause(info.Object, f.Command(cmd, false)); err != nil {
return cmdutil.AddSourceToErr("replacing", info.Source, err)
}
}
// Serialize the object with the annotation applied.
obj, err := resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, true, info.Object)
if err != nil {
return cmdutil.AddSourceToErr("replacing", info.Source, err)
}
info.Refresh(obj, true)
f.PrintObjectSpecificMessage(obj, out)
f.PrintSuccess(shortOutput, out, info.Mapping.Resource, info.Name, false, "replaced")
return nil
})
}
func forceReplace(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool, options *resource.FilenameOptions) error {
schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"))
if err != nil {
return err
}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
for i, filename := range options.Filenames {
if filename == "-" {
tempDir, err := ioutil.TempDir("", "kubectl_replace_")
if err != nil {
return err
}
defer os.RemoveAll(tempDir)
tempFilename := filepath.Join(tempDir, "resource.stdin")
err = cmdutil.DumpReaderToFile(os.Stdin, tempFilename)
if err != nil {
return err
}
options.Filenames[i] = tempFilename
}
}
r := f.NewBuilder().
Unstructured().
ContinueOnError().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, options).
ResourceTypeOrNameArgs(false, args...).RequireObject(false).
Flatten().
Do()
if err := r.Err(); err != nil {
return err
}
mapper := r.Mapper().RESTMapper
//Replace will create a resource if it doesn't exist already, so ignore not found error
ignoreNotFound := true
timeout := cmdutil.GetFlagDuration(cmd, "timeout")
gracePeriod := cmdutil.GetFlagInt(cmd, "grace-period")
waitForDeletion := false
if gracePeriod == 0 {
// To preserve backwards compatibility, but prevent accidental data loss, we convert --grace-period=0
// into --grace-period=1 and wait until the object is successfully deleted.
gracePeriod = 1
waitForDeletion = true
}
// By default use a reaper to delete all related resources.
if cmdutil.GetFlagBool(cmd, "cascade") {
glog.Warningf("\"cascade\" is set, kubectl will delete and re-create all resources managed by this resource (e.g. Pods created by a ReplicationController). Consider using \"kubectl rolling-update\" if you want to update a ReplicationController together with its Pods.")
err = ReapResult(r, f, out, cmdutil.GetFlagBool(cmd, "cascade"), ignoreNotFound, timeout, gracePeriod, waitForDeletion, shortOutput, mapper, false)
} else {
err = DeleteResult(r, f, out, ignoreNotFound, gracePeriod, shortOutput, mapper)
}
if err != nil {
return err
}
if timeout == 0 {
timeout = kubectl.Timeout
}
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
return wait.PollImmediate(kubectl.Interval, timeout, func() (bool, error) {
if err := info.Get(); !errors.IsNotFound(err) {
return false, err
}
return true, nil
})
})
if err != nil {
return err
}
r = f.NewBuilder().
Unstructured().
Schema(schema).
ContinueOnError().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, options).
Flatten().
Do()
err = r.Err()
if err != nil {
return err
}
count := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, f.JSONEncoder()); err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, info) {
if err := cmdutil.RecordChangeCause(info.Object, f.Command(cmd, false)); err != nil {
return cmdutil.AddSourceToErr("replacing", info.Source, err)
}
}
obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object)
if err != nil {
return err
}
count++
info.Refresh(obj, true)
f.PrintObjectSpecificMessage(obj, out)
f.PrintSuccess(shortOutput, out, info.Mapping.Resource, info.Name, false, "replaced")
return nil
})
if err != nil {
return err
}
if count == 0 {
return fmt.Errorf("no objects passed to replace")
}
return nil
}
| apache-2.0 |
lavalamp/test-infra | velodrome/transform/plugins/type_filter_wrapper.go | 2546 | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"fmt"
"k8s.io/test-infra/velodrome/sql"
"github.com/spf13/cobra"
)
// TypeFilterWrapperPlugin allows ignoring either PR or issues from processing
type TypeFilterWrapperPlugin struct {
pullRequests bool
issues bool
plugin Plugin
// List of issues that we should ignore
pass map[string]bool
}
var _ Plugin = &TypeFilterWrapperPlugin{}
// NewTypeFilterWrapperPlugin is the constructor of TypeFilterWrapperPlugin
func NewTypeFilterWrapperPlugin(plugin Plugin) *TypeFilterWrapperPlugin {
return &TypeFilterWrapperPlugin{
plugin: plugin,
pass: map[string]bool{},
}
}
// AddFlags adds "no-pull-requests" and "no-issues" to the command help
func (t *TypeFilterWrapperPlugin) AddFlags(cmd *cobra.Command) {
cmd.Flags().BoolVar(&t.pullRequests, "no-pull-requests", false, "Ignore pull-requests")
cmd.Flags().BoolVar(&t.issues, "no-issues", false, "Ignore issues")
}
// CheckFlags makes sure not both PR and issues are ignored
func (t *TypeFilterWrapperPlugin) CheckFlags() error {
if t.pullRequests && t.issues {
return fmt.Errorf(
"you can't ignore both pull-requests and issues")
}
return nil
}
// ReceiveIssue calls plugin.ReceiveIssue() if issues are not ignored
func (t *TypeFilterWrapperPlugin) ReceiveIssue(issue sql.Issue) []Point {
if issue.IsPR && t.pullRequests {
return nil
} else if !issue.IsPR && t.issues {
return nil
} else {
t.pass[issue.ID] = true
return t.plugin.ReceiveIssue(issue)
}
}
// ReceiveIssueEvent calls plugin.ReceiveIssueEvent() if issues are not ignored
func (t *TypeFilterWrapperPlugin) ReceiveIssueEvent(event sql.IssueEvent) []Point {
if !t.pass[event.IssueID] {
return nil
}
return t.plugin.ReceiveIssueEvent(event)
}
// ReceiveComment calls plugin.ReceiveComment() if issues are not ignored
func (t *TypeFilterWrapperPlugin) ReceiveComment(comment sql.Comment) []Point {
if !t.pass[comment.IssueID] {
return nil
}
return t.plugin.ReceiveComment(comment)
}
| apache-2.0 |
macs524/mybatis_learn | src/test/java/org/apache/ibatis/domain/misc/generics/GenericSubclass.java | 819 | /**
* Copyright 2009-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ibatis.domain.misc.generics;
public abstract class GenericSubclass extends GenericAbstract<Long> {
@Override
public abstract Long getId();
}
| apache-2.0 |
DamianPilot382/Rubiks-Cube-Solver | opencv/sources/modules/cudafeatures2d/src/brute_force_matcher.cpp | 43804 | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
Ptr<cv::cuda::DescriptorMatcher> cv::cuda::DescriptorMatcher::createBFMatcher(int) { throw_no_cuda(); return Ptr<cv::cuda::DescriptorMatcher>(); }
#else /* !defined (HAVE_CUDA) */
namespace cv { namespace cuda { namespace device
{
namespace bf_match
{
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream);
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream);
}
namespace bf_knnmatch
{
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
cudaStream_t stream);
template <typename T> void match2L1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
cudaStream_t stream);
template <typename T> void match2L2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
cudaStream_t stream);
template <typename T> void match2Hamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
cudaStream_t stream);
}
namespace bf_radius_match
{
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, float maxDistance, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, float maxDistance, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, float maxDistance, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb* trains, int n, float maxDistance, const PtrStepSzb* masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb* trains, int n, float maxDistance, const PtrStepSzb* masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb* trains, int n, float maxDistance, const PtrStepSzb* masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
}
}}}
namespace
{
static void makeGpuCollection(const std::vector<GpuMat>& trainDescCollection,
const std::vector<GpuMat>& masks,
GpuMat& trainCollection,
GpuMat& maskCollection)
{
if (trainDescCollection.empty())
return;
if (masks.empty())
{
Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStepSzb)));
PtrStepSzb* trainCollectionCPU_ptr = trainCollectionCPU.ptr<PtrStepSzb>();
for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr)
*trainCollectionCPU_ptr = trainDescCollection[i];
trainCollection.upload(trainCollectionCPU);
maskCollection.release();
}
else
{
CV_Assert( masks.size() == trainDescCollection.size() );
Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStepSzb)));
Mat maskCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStepb)));
PtrStepSzb* trainCollectionCPU_ptr = trainCollectionCPU.ptr<PtrStepSzb>();
PtrStepb* maskCollectionCPU_ptr = maskCollectionCPU.ptr<PtrStepb>();
for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr, ++maskCollectionCPU_ptr)
{
const GpuMat& train = trainDescCollection[i];
const GpuMat& mask = masks[i];
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.cols == train.rows) );
*trainCollectionCPU_ptr = train;
*maskCollectionCPU_ptr = mask;
}
trainCollection.upload(trainCollectionCPU);
maskCollection.upload(maskCollectionCPU);
}
}
class BFMatcher_Impl : public cv::cuda::DescriptorMatcher
{
public:
explicit BFMatcher_Impl(int norm) : norm_(norm)
{
CV_Assert( norm == NORM_L1 || norm == NORM_L2 || norm == NORM_HAMMING );
}
virtual bool isMaskSupported() const { return true; }
virtual void add(const std::vector<GpuMat>& descriptors)
{
trainDescCollection_.insert(trainDescCollection_.end(), descriptors.begin(), descriptors.end());
}
virtual const std::vector<GpuMat>& getTrainDescriptors() const
{
return trainDescCollection_;
}
virtual void clear()
{
trainDescCollection_.clear();
}
virtual bool empty() const
{
return trainDescCollection_.empty();
}
virtual void train()
{
}
virtual void match(InputArray queryDescriptors, InputArray trainDescriptors,
std::vector<DMatch>& matches,
InputArray mask = noArray());
virtual void match(InputArray queryDescriptors,
std::vector<DMatch>& matches,
const std::vector<GpuMat>& masks = std::vector<GpuMat>());
virtual void matchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
OutputArray matches,
InputArray mask = noArray(),
Stream& stream = Stream::Null());
virtual void matchAsync(InputArray queryDescriptors,
OutputArray matches,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
Stream& stream = Stream::Null());
virtual void matchConvert(InputArray gpu_matches,
std::vector<DMatch>& matches);
virtual void knnMatch(InputArray queryDescriptors, InputArray trainDescriptors,
std::vector<std::vector<DMatch> >& matches,
int k,
InputArray mask = noArray(),
bool compactResult = false);
virtual void knnMatch(InputArray queryDescriptors,
std::vector<std::vector<DMatch> >& matches,
int k,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false);
virtual void knnMatchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
OutputArray matches,
int k,
InputArray mask = noArray(),
Stream& stream = Stream::Null());
virtual void knnMatchAsync(InputArray queryDescriptors,
OutputArray matches,
int k,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
Stream& stream = Stream::Null());
virtual void knnMatchConvert(InputArray gpu_matches,
std::vector< std::vector<DMatch> >& matches,
bool compactResult = false);
virtual void radiusMatch(InputArray queryDescriptors, InputArray trainDescriptors,
std::vector<std::vector<DMatch> >& matches,
float maxDistance,
InputArray mask = noArray(),
bool compactResult = false);
virtual void radiusMatch(InputArray queryDescriptors,
std::vector<std::vector<DMatch> >& matches,
float maxDistance,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false);
virtual void radiusMatchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
OutputArray matches,
float maxDistance,
InputArray mask = noArray(),
Stream& stream = Stream::Null());
virtual void radiusMatchAsync(InputArray queryDescriptors,
OutputArray matches,
float maxDistance,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
Stream& stream = Stream::Null());
virtual void radiusMatchConvert(InputArray gpu_matches,
std::vector< std::vector<DMatch> >& matches,
bool compactResult = false);
private:
int norm_;
std::vector<GpuMat> trainDescCollection_;
};
//
// 1 to 1 match
//
void BFMatcher_Impl::match(InputArray _queryDescriptors, InputArray _trainDescriptors,
std::vector<DMatch>& matches,
InputArray _mask)
{
GpuMat d_matches;
matchAsync(_queryDescriptors, _trainDescriptors, d_matches, _mask);
matchConvert(d_matches, matches);
}
void BFMatcher_Impl::match(InputArray _queryDescriptors,
std::vector<DMatch>& matches,
const std::vector<GpuMat>& masks)
{
GpuMat d_matches;
matchAsync(_queryDescriptors, d_matches, masks);
matchConvert(d_matches, matches);
}
void BFMatcher_Impl::matchAsync(InputArray _queryDescriptors, InputArray _trainDescriptors,
OutputArray _matches,
InputArray _mask,
Stream& stream)
{
using namespace cv::cuda::device::bf_match;
const GpuMat query = _queryDescriptors.getGpuMat();
const GpuMat train = _trainDescriptors.getGpuMat();
const GpuMat mask = _mask.getGpuMat();
if (query.empty() || train.empty())
{
_matches.release();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
CV_Assert( train.cols == query.cols && train.type() == query.type() );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.rows == query.rows && mask.cols == train.rows) );
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
const caller_t* callers = norm_ == NORM_L1 ? callersL1 : norm_ == NORM_L2 ? callersL2 : callersHamming;
const caller_t func = callers[query.depth()];
if (func == 0)
{
CV_Error(Error::StsUnsupportedFormat, "unsupported combination of query.depth() and norm");
}
const int nQuery = query.rows;
_matches.create(2, nQuery, CV_32SC1);
GpuMat matches = _matches.getGpuMat();
GpuMat trainIdx(1, nQuery, CV_32SC1, matches.ptr(0));
GpuMat distance(1, nQuery, CV_32FC1, matches.ptr(1));
func(query, train, mask, trainIdx, distance, StreamAccessor::getStream(stream));
}
void BFMatcher_Impl::matchAsync(InputArray _queryDescriptors,
OutputArray _matches,
const std::vector<GpuMat>& masks,
Stream& stream)
{
using namespace cv::cuda::device::bf_match;
const GpuMat query = _queryDescriptors.getGpuMat();
if (query.empty() || trainDescCollection_.empty())
{
_matches.release();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
GpuMat trainCollection, maskCollection;
makeGpuCollection(trainDescCollection_, masks, trainCollection, maskCollection);
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
const caller_t* callers = norm_ == NORM_L1 ? callersL1 : norm_ == NORM_L2 ? callersL2 : callersHamming;
const caller_t func = callers[query.depth()];
if (func == 0)
{
CV_Error(Error::StsUnsupportedFormat, "unsupported combination of query.depth() and norm");
}
const int nQuery = query.rows;
_matches.create(3, nQuery, CV_32SC1);
GpuMat matches = _matches.getGpuMat();
GpuMat trainIdx(1, nQuery, CV_32SC1, matches.ptr(0));
GpuMat imgIdx(1, nQuery, CV_32SC1, matches.ptr(1));
GpuMat distance(1, nQuery, CV_32FC1, matches.ptr(2));
func(query, trainCollection, maskCollection, trainIdx, imgIdx, distance, StreamAccessor::getStream(stream));
}
void BFMatcher_Impl::matchConvert(InputArray _gpu_matches,
std::vector<DMatch>& matches)
{
Mat gpu_matches;
if (_gpu_matches.kind() == _InputArray::CUDA_GPU_MAT)
{
_gpu_matches.getGpuMat().download(gpu_matches);
}
else
{
gpu_matches = _gpu_matches.getMat();
}
if (gpu_matches.empty())
{
matches.clear();
return;
}
CV_Assert( (gpu_matches.type() == CV_32SC1) && (gpu_matches.rows == 2 || gpu_matches.rows == 3) );
const int nQuery = gpu_matches.cols;
matches.clear();
matches.reserve(nQuery);
const int* trainIdxPtr = NULL;
const int* imgIdxPtr = NULL;
const float* distancePtr = NULL;
if (gpu_matches.rows == 2)
{
trainIdxPtr = gpu_matches.ptr<int>(0);
distancePtr = gpu_matches.ptr<float>(1);
}
else
{
trainIdxPtr = gpu_matches.ptr<int>(0);
imgIdxPtr = gpu_matches.ptr<int>(1);
distancePtr = gpu_matches.ptr<float>(2);
}
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)
{
const int trainIdx = trainIdxPtr[queryIdx];
if (trainIdx == -1)
continue;
const int imgIdx = imgIdxPtr ? imgIdxPtr[queryIdx] : 0;
const float distance = distancePtr[queryIdx];
DMatch m(queryIdx, trainIdx, imgIdx, distance);
matches.push_back(m);
}
}
//
// knn match
//
void BFMatcher_Impl::knnMatch(InputArray _queryDescriptors, InputArray _trainDescriptors,
std::vector<std::vector<DMatch> >& matches,
int k,
InputArray _mask,
bool compactResult)
{
GpuMat d_matches;
knnMatchAsync(_queryDescriptors, _trainDescriptors, d_matches, k, _mask);
knnMatchConvert(d_matches, matches, compactResult);
}
void BFMatcher_Impl::knnMatch(InputArray _queryDescriptors,
std::vector<std::vector<DMatch> >& matches,
int k,
const std::vector<GpuMat>& masks,
bool compactResult)
{
if (k == 2)
{
GpuMat d_matches;
knnMatchAsync(_queryDescriptors, d_matches, k, masks);
knnMatchConvert(d_matches, matches, compactResult);
}
else
{
const GpuMat query = _queryDescriptors.getGpuMat();
if (query.empty() || trainDescCollection_.empty())
{
matches.clear();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
std::vector< std::vector<DMatch> > curMatches;
std::vector<DMatch> temp;
temp.reserve(2 * k);
matches.resize(query.rows);
for (size_t i = 0; i < matches.size(); ++i)
matches[i].reserve(k);
for (size_t imgIdx = 0; imgIdx < trainDescCollection_.size(); ++imgIdx)
{
knnMatch(query, trainDescCollection_[imgIdx], curMatches, k, masks.empty() ? GpuMat() : masks[imgIdx]);
for (int queryIdx = 0; queryIdx < query.rows; ++queryIdx)
{
std::vector<DMatch>& localMatch = curMatches[queryIdx];
std::vector<DMatch>& globalMatch = matches[queryIdx];
for (size_t i = 0; i < localMatch.size(); ++i)
localMatch[i].imgIdx = imgIdx;
temp.clear();
std::merge(globalMatch.begin(), globalMatch.end(), localMatch.begin(), localMatch.end(), std::back_inserter(temp));
globalMatch.clear();
const size_t count = std::min(static_cast<size_t>(k), temp.size());
std::copy(temp.begin(), temp.begin() + count, std::back_inserter(globalMatch));
}
}
if (compactResult)
{
std::vector< std::vector<DMatch> >::iterator new_end = std::remove_if(matches.begin(), matches.end(), std::mem_fun_ref(&std::vector<DMatch>::empty));
matches.erase(new_end, matches.end());
}
}
}
void BFMatcher_Impl::knnMatchAsync(InputArray _queryDescriptors, InputArray _trainDescriptors,
OutputArray _matches,
int k,
InputArray _mask,
Stream& stream)
{
using namespace cv::cuda::device::bf_knnmatch;
const GpuMat query = _queryDescriptors.getGpuMat();
const GpuMat train = _trainDescriptors.getGpuMat();
const GpuMat mask = _mask.getGpuMat();
if (query.empty() || train.empty())
{
_matches.release();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
CV_Assert( train.cols == query.cols && train.type() == query.type() );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.rows == query.rows && mask.cols == train.rows) );
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
const caller_t* callers = norm_ == NORM_L1 ? callersL1 : norm_ == NORM_L2 ? callersL2 : callersHamming;
const caller_t func = callers[query.depth()];
if (func == 0)
{
CV_Error(Error::StsUnsupportedFormat, "unsupported combination of query.depth() and norm");
}
const int nQuery = query.rows;
const int nTrain = train.rows;
GpuMat trainIdx, distance, allDist;
if (k == 2)
{
_matches.create(2, nQuery, CV_32SC2);
GpuMat matches = _matches.getGpuMat();
trainIdx = GpuMat(1, nQuery, CV_32SC2, matches.ptr(0));
distance = GpuMat(1, nQuery, CV_32FC2, matches.ptr(1));
}
else
{
_matches.create(2 * nQuery, k, CV_32SC1);
GpuMat matches = _matches.getGpuMat();
trainIdx = GpuMat(nQuery, k, CV_32SC1, matches.ptr(0), matches.step);
distance = GpuMat(nQuery, k, CV_32FC1, matches.ptr(nQuery), matches.step);
BufferPool pool(stream);
allDist = pool.getBuffer(nQuery, nTrain, CV_32FC1);
}
trainIdx.setTo(Scalar::all(-1), stream);
func(query, train, k, mask, trainIdx, distance, allDist, StreamAccessor::getStream(stream));
}
void BFMatcher_Impl::knnMatchAsync(InputArray _queryDescriptors,
OutputArray _matches,
int k,
const std::vector<GpuMat>& masks,
Stream& stream)
{
using namespace cv::cuda::device::bf_knnmatch;
if (k != 2)
{
CV_Error(Error::StsNotImplemented, "only k=2 mode is supported for now");
}
const GpuMat query = _queryDescriptors.getGpuMat();
if (query.empty() || trainDescCollection_.empty())
{
_matches.release();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
GpuMat trainCollection, maskCollection;
makeGpuCollection(trainDescCollection_, masks, trainCollection, maskCollection);
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
cudaStream_t stream);
static const caller_t callersL1[] =
{
match2L1_gpu<unsigned char>, 0/*match2L1_gpu<signed char>*/,
match2L1_gpu<unsigned short>, match2L1_gpu<short>,
match2L1_gpu<int>, match2L1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*match2L2_gpu<unsigned char>*/, 0/*match2L2_gpu<signed char>*/,
0/*match2L2_gpu<unsigned short>*/, 0/*match2L2_gpu<short>*/,
0/*match2L2_gpu<int>*/, match2L2_gpu<float>
};
static const caller_t callersHamming[] =
{
match2Hamming_gpu<unsigned char>, 0/*match2Hamming_gpu<signed char>*/,
match2Hamming_gpu<unsigned short>, 0/*match2Hamming_gpu<short>*/,
match2Hamming_gpu<int>, 0/*match2Hamming_gpu<float>*/
};
const caller_t* callers = norm_ == NORM_L1 ? callersL1 : norm_ == NORM_L2 ? callersL2 : callersHamming;
const caller_t func = callers[query.depth()];
if (func == 0)
{
CV_Error(Error::StsUnsupportedFormat, "unsupported combination of query.depth() and norm");
}
const int nQuery = query.rows;
_matches.create(3, nQuery, CV_32SC2);
GpuMat matches = _matches.getGpuMat();
GpuMat trainIdx(1, nQuery, CV_32SC2, matches.ptr(0));
GpuMat imgIdx(1, nQuery, CV_32SC2, matches.ptr(1));
GpuMat distance(1, nQuery, CV_32FC2, matches.ptr(2));
trainIdx.setTo(Scalar::all(-1), stream);
func(query, trainCollection, maskCollection, trainIdx, imgIdx, distance, StreamAccessor::getStream(stream));
}
void BFMatcher_Impl::knnMatchConvert(InputArray _gpu_matches,
std::vector< std::vector<DMatch> >& matches,
bool compactResult)
{
Mat gpu_matches;
if (_gpu_matches.kind() == _InputArray::CUDA_GPU_MAT)
{
_gpu_matches.getGpuMat().download(gpu_matches);
}
else
{
gpu_matches = _gpu_matches.getMat();
}
if (gpu_matches.empty())
{
matches.clear();
return;
}
CV_Assert( ((gpu_matches.type() == CV_32SC2) && (gpu_matches.rows == 2 || gpu_matches.rows == 3)) ||
(gpu_matches.type() == CV_32SC1) );
int nQuery = -1, k = -1;
const int* trainIdxPtr = NULL;
const int* imgIdxPtr = NULL;
const float* distancePtr = NULL;
if (gpu_matches.type() == CV_32SC2)
{
nQuery = gpu_matches.cols;
k = 2;
if (gpu_matches.rows == 2)
{
trainIdxPtr = gpu_matches.ptr<int>(0);
distancePtr = gpu_matches.ptr<float>(1);
}
else
{
trainIdxPtr = gpu_matches.ptr<int>(0);
imgIdxPtr = gpu_matches.ptr<int>(1);
distancePtr = gpu_matches.ptr<float>(2);
}
}
else
{
nQuery = gpu_matches.rows / 2;
k = gpu_matches.cols;
trainIdxPtr = gpu_matches.ptr<int>(0);
distancePtr = gpu_matches.ptr<float>(nQuery);
}
matches.clear();
matches.reserve(nQuery);
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)
{
matches.push_back(std::vector<DMatch>());
std::vector<DMatch>& curMatches = matches.back();
curMatches.reserve(k);
for (int i = 0; i < k; ++i)
{
const int trainIdx = *trainIdxPtr;
if (trainIdx == -1)
continue;
const int imgIdx = imgIdxPtr ? *imgIdxPtr : 0;
const float distance = *distancePtr;
DMatch m(queryIdx, trainIdx, imgIdx, distance);
curMatches.push_back(m);
++trainIdxPtr;
++distancePtr;
if (imgIdxPtr)
++imgIdxPtr;
}
if (compactResult && curMatches.empty())
{
matches.pop_back();
}
}
}
//
// radius match
//
void BFMatcher_Impl::radiusMatch(InputArray _queryDescriptors, InputArray _trainDescriptors,
std::vector<std::vector<DMatch> >& matches,
float maxDistance,
InputArray _mask,
bool compactResult)
{
GpuMat d_matches;
radiusMatchAsync(_queryDescriptors, _trainDescriptors, d_matches, maxDistance, _mask);
radiusMatchConvert(d_matches, matches, compactResult);
}
void BFMatcher_Impl::radiusMatch(InputArray _queryDescriptors,
std::vector<std::vector<DMatch> >& matches,
float maxDistance,
const std::vector<GpuMat>& masks,
bool compactResult)
{
GpuMat d_matches;
radiusMatchAsync(_queryDescriptors, d_matches, maxDistance, masks);
radiusMatchConvert(d_matches, matches, compactResult);
}
void BFMatcher_Impl::radiusMatchAsync(InputArray _queryDescriptors, InputArray _trainDescriptors,
OutputArray _matches,
float maxDistance,
InputArray _mask,
Stream& stream)
{
using namespace cv::cuda::device::bf_radius_match;
const GpuMat query = _queryDescriptors.getGpuMat();
const GpuMat train = _trainDescriptors.getGpuMat();
const GpuMat mask = _mask.getGpuMat();
if (query.empty() || train.empty())
{
_matches.release();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
CV_Assert( train.cols == query.cols && train.type() == query.type() );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.rows == query.rows && mask.cols == train.rows) );
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, float maxDistance, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
const caller_t* callers = norm_ == NORM_L1 ? callersL1 : norm_ == NORM_L2 ? callersL2 : callersHamming;
const caller_t func = callers[query.depth()];
if (func == 0)
{
CV_Error(Error::StsUnsupportedFormat, "unsupported combination of query.depth() and norm");
}
const int nQuery = query.rows;
const int nTrain = train.rows;
const int cols = std::max((nTrain / 100), nQuery);
_matches.create(2 * nQuery + 1, cols, CV_32SC1);
GpuMat matches = _matches.getGpuMat();
GpuMat trainIdx(nQuery, cols, CV_32SC1, matches.ptr(0), matches.step);
GpuMat distance(nQuery, cols, CV_32FC1, matches.ptr(nQuery), matches.step);
GpuMat nMatches(1, nQuery, CV_32SC1, matches.ptr(2 * nQuery));
nMatches.setTo(Scalar::all(0), stream);
func(query, train, maxDistance, mask, trainIdx, distance, nMatches, StreamAccessor::getStream(stream));
}
void BFMatcher_Impl::radiusMatchAsync(InputArray _queryDescriptors,
OutputArray _matches,
float maxDistance,
const std::vector<GpuMat>& masks,
Stream& stream)
{
using namespace cv::cuda::device::bf_radius_match;
const GpuMat query = _queryDescriptors.getGpuMat();
if (query.empty() || trainDescCollection_.empty())
{
_matches.release();
return;
}
CV_Assert( query.channels() == 1 && query.depth() < CV_64F );
GpuMat trainCollection, maskCollection;
makeGpuCollection(trainDescCollection_, masks, trainCollection, maskCollection);
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb* trains, int n, float maxDistance, const PtrStepSzb* masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
const caller_t* callers = norm_ == NORM_L1 ? callersL1 : norm_ == NORM_L2 ? callersL2 : callersHamming;
const caller_t func = callers[query.depth()];
if (func == 0)
{
CV_Error(Error::StsUnsupportedFormat, "unsupported combination of query.depth() and norm");
}
const int nQuery = query.rows;
_matches.create(3 * nQuery + 1, nQuery, CV_32FC1);
GpuMat matches = _matches.getGpuMat();
GpuMat trainIdx(nQuery, nQuery, CV_32SC1, matches.ptr(0), matches.step);
GpuMat imgIdx(nQuery, nQuery, CV_32SC1, matches.ptr(nQuery), matches.step);
GpuMat distance(nQuery, nQuery, CV_32FC1, matches.ptr(2 * nQuery), matches.step);
GpuMat nMatches(1, nQuery, CV_32SC1, matches.ptr(3 * nQuery));
nMatches.setTo(Scalar::all(0), stream);
std::vector<PtrStepSzb> trains_(trainDescCollection_.begin(), trainDescCollection_.end());
std::vector<PtrStepSzb> masks_(masks.begin(), masks.end());
func(query, &trains_[0], static_cast<int>(trains_.size()), maxDistance, masks_.size() == 0 ? 0 : &masks_[0],
trainIdx, imgIdx, distance, nMatches, StreamAccessor::getStream(stream));
}
void BFMatcher_Impl::radiusMatchConvert(InputArray _gpu_matches,
std::vector< std::vector<DMatch> >& matches,
bool compactResult)
{
Mat gpu_matches;
if (_gpu_matches.kind() == _InputArray::CUDA_GPU_MAT)
{
_gpu_matches.getGpuMat().download(gpu_matches);
}
else
{
gpu_matches = _gpu_matches.getMat();
}
if (gpu_matches.empty())
{
matches.clear();
return;
}
CV_Assert( gpu_matches.type() == CV_32SC1 || gpu_matches.type() == CV_32FC1 );
int nQuery = -1;
const int* trainIdxPtr = NULL;
const int* imgIdxPtr = NULL;
const float* distancePtr = NULL;
const int* nMatchesPtr = NULL;
if (gpu_matches.type() == CV_32SC1)
{
nQuery = (gpu_matches.rows - 1) / 2;
trainIdxPtr = gpu_matches.ptr<int>(0);
distancePtr = gpu_matches.ptr<float>(nQuery);
nMatchesPtr = gpu_matches.ptr<int>(2 * nQuery);
}
else
{
nQuery = (gpu_matches.rows - 1) / 3;
trainIdxPtr = gpu_matches.ptr<int>(0);
imgIdxPtr = gpu_matches.ptr<int>(nQuery);
distancePtr = gpu_matches.ptr<float>(2 * nQuery);
nMatchesPtr = gpu_matches.ptr<int>(3 * nQuery);
}
matches.clear();
matches.reserve(nQuery);
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)
{
const int nMatched = std::min(nMatchesPtr[queryIdx], gpu_matches.cols);
if (nMatched == 0)
{
if (!compactResult)
{
matches.push_back(std::vector<DMatch>());
}
}
else
{
matches.push_back(std::vector<DMatch>(nMatched));
std::vector<DMatch>& curMatches = matches.back();
for (int i = 0; i < nMatched; ++i)
{
const int trainIdx = trainIdxPtr[i];
const int imgIdx = imgIdxPtr ? imgIdxPtr[i] : 0;
const float distance = distancePtr[i];
DMatch m(queryIdx, trainIdx, imgIdx, distance);
curMatches[i] = m;
}
std::sort(curMatches.begin(), curMatches.end());
}
trainIdxPtr += gpu_matches.cols;
distancePtr += gpu_matches.cols;
if (imgIdxPtr)
imgIdxPtr += gpu_matches.cols;
}
}
}
Ptr<cv::cuda::DescriptorMatcher> cv::cuda::DescriptorMatcher::createBFMatcher(int norm)
{
return makePtr<BFMatcher_Impl>(norm);
}
#endif /* !defined (HAVE_CUDA) */
| apache-2.0 |
erikdubbelboer/druid | integration-tests/run_cluster.sh | 2942 | # cleanup
for node in druid-historical druid-coordinator druid-overlord druid-router druid-broker druid-middlemanager druid-zookeeper-kafka druid-metadata-storage;
do
docker stop $node
docker rm $node
done
# environment variables
DIR=$(cd $(dirname $0) && pwd)
DOCKERDIR=$DIR/docker
SHARED_DIR=${HOME}/shared
SUPERVISORDIR=/usr/lib/druid/conf
RESOURCEDIR=$DIR/src/test/resources
# so docker IP addr will be known during docker build
echo $DOCKER_IP > $DOCKERDIR/docker_ip
# Make directories if they dont exist
mkdir -p $SHARED_DIR/logs
mkdir -p $SHARED_DIR/tasklogs
# install druid jars
rm -rf $SHARED_DIR/docker
cp -R docker $SHARED_DIR/docker
mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib
# Build Druid Cluster Image
docker build -t druid/cluster $SHARED_DIR/docker
# Start zookeeper and kafka
docker run -d --name druid-zookeeper-kafka -p 2181:2181 -p 9092:9092 -v $SHARED_DIR:/shared -v $DOCKERDIR/zookeeper.conf:$SUPERVISORDIR/zookeeper.conf -v $DOCKERDIR/kafka.conf:$SUPERVISORDIR/kafka.conf druid/cluster
# Start MYSQL
docker run -d --name druid-metadata-storage -v $SHARED_DIR:/shared -v $DOCKERDIR/metadata-storage.conf:$SUPERVISORDIR/metadata-storage.conf druid/cluster
# Start Overlord
docker run -d --name druid-overlord -p 8090:8090 -v $SHARED_DIR:/shared -v $DOCKERDIR/overlord.conf:$SUPERVISORDIR/overlord.conf --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Coordinator
docker run -d --name druid-coordinator -p 8081:8081 -v $SHARED_DIR:/shared -v $DOCKERDIR/coordinator.conf:$SUPERVISORDIR/coordinator.conf --link druid-overlord:druid-overlord --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Historical
docker run -d --name druid-historical -v $SHARED_DIR:/shared -v $DOCKERDIR/historical.conf:$SUPERVISORDIR/historical.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Middlemanger
docker run -d --name druid-middlemanager -p 8100:8100 -p 8101:8101 -p 8102:8102 -p 8103:8103 -p 8104:8104 -p 8105:8105 -v $RESOURCEDIR:/resources -v $SHARED_DIR:/shared -v $DOCKERDIR/middlemanager.conf:$SUPERVISORDIR/middlemanager.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-overlord:druid-overlord druid/cluster
# Start Broker
docker run -d --name druid-broker -p 8082:8082 -v $SHARED_DIR:/shared -v $DOCKERDIR/broker.conf:$SUPERVISORDIR/broker.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-middlemanager:druid-middlemanager --link druid-historical:druid-historical druid/cluster
# Start Router
docker run -d --name druid-router -p 8888:8888 -v $SHARED_DIR:/shared -v $DOCKERDIR/router.conf:$SUPERVISORDIR/router.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
| apache-2.0 |
marsorp/blog | presto166/presto-main/src/main/java/com/facebook/presto/operator/aggregation/MultimapAggregationFunction.java | 8045 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator.aggregation;
import com.facebook.presto.ExceededMemoryLimitException;
import com.facebook.presto.bytecode.DynamicClassLoader;
import com.facebook.presto.metadata.BoundVariables;
import com.facebook.presto.metadata.FunctionRegistry;
import com.facebook.presto.metadata.SqlAggregationFunction;
import com.facebook.presto.operator.aggregation.state.KeyValuePairStateSerializer;
import com.facebook.presto.operator.aggregation.state.KeyValuePairsState;
import com.facebook.presto.operator.aggregation.state.KeyValuePairsStateFactory;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.spi.block.Block;
import com.facebook.presto.spi.block.BlockBuilder;
import com.facebook.presto.spi.type.Type;
import com.facebook.presto.spi.type.TypeManager;
import com.facebook.presto.type.ArrayType;
import com.facebook.presto.type.MapType;
import com.google.common.collect.ImmutableList;
import java.lang.invoke.MethodHandle;
import java.util.List;
import static com.facebook.presto.metadata.Signature.comparableTypeParameter;
import static com.facebook.presto.metadata.Signature.typeVariable;
import static com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata;
import static com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata.ParameterType.BLOCK_INDEX;
import static com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata.ParameterType.BLOCK_INPUT_CHANNEL;
import static com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata.ParameterType.NULLABLE_BLOCK_INPUT_CHANNEL;
import static com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata.ParameterType.STATE;
import static com.facebook.presto.operator.aggregation.AggregationUtils.generateAggregationName;
import static com.facebook.presto.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT;
import static com.facebook.presto.spi.type.TypeSignature.parseTypeSignature;
import static com.facebook.presto.util.ImmutableCollectors.toImmutableList;
import static com.facebook.presto.util.Reflection.methodHandle;
import static java.lang.String.format;
public class MultimapAggregationFunction
extends SqlAggregationFunction
{
public static final MultimapAggregationFunction MULTIMAP_AGG = new MultimapAggregationFunction();
public static final String NAME = "multimap_agg";
private static final MethodHandle OUTPUT_FUNCTION = methodHandle(MultimapAggregationFunction.class, "output", KeyValuePairsState.class, BlockBuilder.class);
private static final MethodHandle INPUT_FUNCTION = methodHandle(MultimapAggregationFunction.class, "input", KeyValuePairsState.class, Block.class, Block.class, int.class);
private static final MethodHandle COMBINE_FUNCTION = methodHandle(MultimapAggregationFunction.class, "combine", KeyValuePairsState.class, KeyValuePairsState.class);
public MultimapAggregationFunction()
{
super(NAME,
ImmutableList.of(comparableTypeParameter("K"), typeVariable("V")),
ImmutableList.of(),
parseTypeSignature("map(K,array(V))"),
ImmutableList.of(parseTypeSignature("K"), parseTypeSignature("V")));
}
@Override
public String getDescription()
{
return "Aggregates all the rows (key/value pairs) into a single multimap";
}
@Override
public InternalAggregationFunction specialize(BoundVariables boundVariables, int arity, TypeManager typeManager, FunctionRegistry functionRegistry)
{
Type keyType = boundVariables.getTypeVariable("K");
Type valueType = boundVariables.getTypeVariable("V");
return generateAggregation(keyType, valueType);
}
private static InternalAggregationFunction generateAggregation(Type keyType, Type valueType)
{
DynamicClassLoader classLoader = new DynamicClassLoader(MultimapAggregationFunction.class.getClassLoader());
List<Type> inputTypes = ImmutableList.of(keyType, valueType);
Type outputType = new MapType(keyType, new ArrayType(valueType));
KeyValuePairStateSerializer stateSerializer = new KeyValuePairStateSerializer(keyType, valueType, true);
Type intermediateType = stateSerializer.getSerializedType();
AggregationMetadata metadata = new AggregationMetadata(
generateAggregationName(NAME, outputType.getTypeSignature(), inputTypes.stream().map(Type::getTypeSignature).collect(toImmutableList())),
createInputParameterMetadata(keyType, valueType),
INPUT_FUNCTION,
COMBINE_FUNCTION,
OUTPUT_FUNCTION,
KeyValuePairsState.class,
stateSerializer,
new KeyValuePairsStateFactory(keyType, valueType),
outputType);
GenericAccumulatorFactoryBinder factory = AccumulatorCompiler.generateAccumulatorFactoryBinder(metadata, classLoader);
return new InternalAggregationFunction(NAME, inputTypes, intermediateType, outputType, true, factory);
}
private static List<ParameterMetadata> createInputParameterMetadata(Type keyType, Type valueType)
{
return ImmutableList.of(new ParameterMetadata(STATE),
new ParameterMetadata(BLOCK_INPUT_CHANNEL, keyType),
new ParameterMetadata(NULLABLE_BLOCK_INPUT_CHANNEL, valueType),
new ParameterMetadata(BLOCK_INDEX));
}
public static void input(KeyValuePairsState state, Block key, Block value, int position)
{
KeyValuePairs pairs = state.get();
if (pairs == null) {
pairs = new KeyValuePairs(state.getKeyType(), state.getValueType(), true);
state.set(pairs);
}
long startSize = pairs.estimatedInMemorySize();
try {
pairs.add(key, value, position, position);
}
catch (ExceededMemoryLimitException e) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("The result of map_agg may not exceed %s", e.getMaxMemory()));
}
state.addMemoryUsage(pairs.estimatedInMemorySize() - startSize);
}
public static void combine(KeyValuePairsState state, KeyValuePairsState otherState)
{
if (state.get() != null && otherState.get() != null) {
Block keys = otherState.get().getKeys();
Block values = otherState.get().getValues();
KeyValuePairs pairs = state.get();
long startSize = pairs.estimatedInMemorySize();
for (int i = 0; i < keys.getPositionCount(); i++) {
try {
pairs.add(keys, values, i, i);
}
catch (ExceededMemoryLimitException e) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("The result of map_agg may not exceed %s", e.getMaxMemory()));
}
}
state.addMemoryUsage(pairs.estimatedInMemorySize() - startSize);
}
else if (state.get() == null) {
state.set(otherState.get());
}
}
public static void output(KeyValuePairsState state, BlockBuilder out)
{
KeyValuePairs pairs = state.get();
if (pairs == null) {
out.appendNull();
}
else {
Block block = pairs.toMultimapNativeEncoding();
out.writeObject(block);
out.closeEntry();
}
}
}
| apache-2.0 |
evandor/skysail-webconsole | webconsole.client/client/dist/lib/@angular/platform-browser-dynamic/esm/platform_browser_private.js | 161 | import { __platform_browser_private__ as r } from '@angular/platform-browser';
export var getDOM = r.getDOM;
//# sourceMappingURL=platform_browser_private.js.map | apache-2.0 |
bhutchinson/rice | rice-framework/krad-development-tools/src/main/java/org/kuali/rice/krad/devtools/datadictionary/URLMonitor.java | 3664 | /**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.krad.devtools.datadictionary;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.net.URL;
import java.util.LinkedList;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.zip.CRC32;
import java.util.zip.CheckedInputStream;
/**
* This is a description of what this class does - gilesp don't forget to fill this in.
*
* @author Kuali Rice Team ([email protected])
*
*/
public class URLMonitor {
private static final Log LOG = LogFactory.getLog(URLMonitor.class);
private final LinkedList<URLContentChangedListener> listeners = new LinkedList<URLContentChangedListener>();
private final Map<URL, Long> resourceMap = new ConcurrentHashMap();
private final int reloadIntervalMilliseconds;
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
public URLMonitor(int reloadIntervalMilliseconds) {
this.reloadIntervalMilliseconds = reloadIntervalMilliseconds;
}
public void shutdownScheduler() {
scheduler.shutdown();
}
public synchronized void addListener(URLContentChangedListener listener) {
listeners.add(listener);
if (listeners.size() == 1) {
scheduler.scheduleAtFixedRate(urlPoller,
reloadIntervalMilliseconds, reloadIntervalMilliseconds, TimeUnit.MILLISECONDS);
}
}
public void addURI(URL zipUrl) {
resourceMap.put(zipUrl, getCRC(zipUrl));
}
private Long getCRC(URL zipUrl) {
Long result = -1l;
try {
CRC32 crc = new CRC32();
CheckedInputStream cis = new CheckedInputStream(zipUrl.openStream(), crc);
byte[] buffer = new byte[1024];
int length;
//read the entry from zip file and extract it to disk
while( (length = cis.read(buffer)) > 0);
cis.close();
result = crc.getValue();
} catch (IOException e) {
LOG.warn("Unable to calculate CRC, resource doesn't exist?", e);
}
return result;
}
private final Runnable urlPoller = new Runnable() {
@Override
public void run() {
for (Map.Entry<URL, Long> entry : resourceMap.entrySet()) {
Long crc = getCRC(entry.getKey());
if (!entry.getValue().equals(crc)) {
entry.setValue(crc);
for (URLContentChangedListener listener : listeners) {
listener.urlContentChanged(entry.getKey());
}
}
}
}
};
public static interface URLContentChangedListener {
public void urlContentChanged(URL url);
}
} | apache-2.0 |
svstanev/presto | presto-accumulo/src/test/java/com/facebook/presto/accumulo/TestAccumuloClient.java | 3498 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.accumulo;
import com.facebook.presto.accumulo.conf.AccumuloConfig;
import com.facebook.presto.accumulo.conf.AccumuloTableProperties;
import com.facebook.presto.accumulo.index.ColumnCardinalityCache;
import com.facebook.presto.accumulo.index.IndexLookup;
import com.facebook.presto.accumulo.metadata.AccumuloTable;
import com.facebook.presto.accumulo.metadata.ZooKeeperMetadataManager;
import com.facebook.presto.spi.ColumnMetadata;
import com.facebook.presto.spi.ConnectorTableMetadata;
import com.facebook.presto.spi.SchemaTableName;
import com.facebook.presto.type.TypeRegistry;
import com.google.common.collect.ImmutableList;
import org.apache.accumulo.core.client.Connector;
import org.testng.annotations.Test;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.facebook.presto.spi.type.BigintType.BIGINT;
import static org.testng.Assert.assertNotNull;
public class TestAccumuloClient
{
private final AccumuloClient client;
private final ZooKeeperMetadataManager zooKeeperMetadataManager;
public TestAccumuloClient()
throws Exception
{
AccumuloConfig config = new AccumuloConfig()
.setUsername("root")
.setPassword("secret");
Connector connector = AccumuloQueryRunner.getAccumuloConnector();
config.setZooKeepers(connector.getInstance().getZooKeepers());
zooKeeperMetadataManager = new ZooKeeperMetadataManager(config, new TypeRegistry());
client = new AccumuloClient(connector, config, zooKeeperMetadataManager, new AccumuloTableManager(connector), new IndexLookup(connector, new ColumnCardinalityCache(connector, config)));
}
@Test
public void testCreateTableEmptyAccumuloColumn()
throws Exception
{
SchemaTableName tableName = new SchemaTableName("default", "test_create_table_empty_accumulo_column");
try {
List<ColumnMetadata> columns = ImmutableList.of(
new ColumnMetadata("id", BIGINT),
new ColumnMetadata("a", BIGINT),
new ColumnMetadata("b", BIGINT),
new ColumnMetadata("c", BIGINT),
new ColumnMetadata("d", BIGINT));
Map<String, Object> properties = new HashMap<>();
new AccumuloTableProperties().getTableProperties().forEach(meta -> properties.put(meta.getName(), meta.getDefaultValue()));
properties.put("external", true);
properties.put("column_mapping", "a:a:a,b::b,c:c:,d::");
client.createTable(new ConnectorTableMetadata(tableName, columns, properties));
assertNotNull(client.getTable(tableName));
}
finally {
AccumuloTable table = zooKeeperMetadataManager.getTable(tableName);
if (table != null) {
client.dropTable(table);
}
}
}
}
| apache-2.0 |
drewsetski/koding | go/src/koding/remoteapi/client/j_credential/post_remote_api_j_credential_fetch_users_id_responses.go | 4007 | package j_credential
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/go-openapi/swag"
strfmt "github.com/go-openapi/strfmt"
"koding/remoteapi/models"
)
// PostRemoteAPIJCredentialFetchUsersIDReader is a Reader for the PostRemoteAPIJCredentialFetchUsersID structure.
type PostRemoteAPIJCredentialFetchUsersIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PostRemoteAPIJCredentialFetchUsersIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPostRemoteAPIJCredentialFetchUsersIDOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewPostRemoteAPIJCredentialFetchUsersIDOK creates a PostRemoteAPIJCredentialFetchUsersIDOK with default headers values
func NewPostRemoteAPIJCredentialFetchUsersIDOK() *PostRemoteAPIJCredentialFetchUsersIDOK {
return &PostRemoteAPIJCredentialFetchUsersIDOK{}
}
/*PostRemoteAPIJCredentialFetchUsersIDOK handles this case with default header values.
OK
*/
type PostRemoteAPIJCredentialFetchUsersIDOK struct {
Payload PostRemoteAPIJCredentialFetchUsersIDOKBody
}
func (o *PostRemoteAPIJCredentialFetchUsersIDOK) Error() string {
return fmt.Sprintf("[POST /remote.api/JCredential.fetchUsers/{id}][%d] postRemoteApiJCredentialFetchUsersIdOK %+v", 200, o.Payload)
}
func (o *PostRemoteAPIJCredentialFetchUsersIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
/*PostRemoteAPIJCredentialFetchUsersIDOKBody post remote API j credential fetch users ID o k body
swagger:model PostRemoteAPIJCredentialFetchUsersIDOKBody
*/
type PostRemoteAPIJCredentialFetchUsersIDOKBody struct {
models.JCredential
models.DefaultResponse
}
// UnmarshalJSON unmarshals this object from a JSON structure
func (o *PostRemoteAPIJCredentialFetchUsersIDOKBody) UnmarshalJSON(raw []byte) error {
var postRemoteAPIJCredentialFetchUsersIDOKBodyAO0 models.JCredential
if err := swag.ReadJSON(raw, &postRemoteAPIJCredentialFetchUsersIDOKBodyAO0); err != nil {
return err
}
o.JCredential = postRemoteAPIJCredentialFetchUsersIDOKBodyAO0
var postRemoteAPIJCredentialFetchUsersIDOKBodyAO1 models.DefaultResponse
if err := swag.ReadJSON(raw, &postRemoteAPIJCredentialFetchUsersIDOKBodyAO1); err != nil {
return err
}
o.DefaultResponse = postRemoteAPIJCredentialFetchUsersIDOKBodyAO1
return nil
}
// MarshalJSON marshals this object to a JSON structure
func (o PostRemoteAPIJCredentialFetchUsersIDOKBody) MarshalJSON() ([]byte, error) {
var _parts [][]byte
postRemoteAPIJCredentialFetchUsersIDOKBodyAO0, err := swag.WriteJSON(o.JCredential)
if err != nil {
return nil, err
}
_parts = append(_parts, postRemoteAPIJCredentialFetchUsersIDOKBodyAO0)
postRemoteAPIJCredentialFetchUsersIDOKBodyAO1, err := swag.WriteJSON(o.DefaultResponse)
if err != nil {
return nil, err
}
_parts = append(_parts, postRemoteAPIJCredentialFetchUsersIDOKBodyAO1)
return swag.ConcatJSON(_parts...), nil
}
// Validate validates this post remote API j credential fetch users ID o k body
func (o *PostRemoteAPIJCredentialFetchUsersIDOKBody) Validate(formats strfmt.Registry) error {
var res []error
if err := o.JCredential.Validate(formats); err != nil {
res = append(res, err)
}
if err := o.DefaultResponse.Validate(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
| apache-2.0 |
indashnet/InDashNet.Open.UN2000 | lichee/linux-3.4/arch/arm/mach-sun7i/include/mach/hardware.h | 1080 | /*
* arch/arm/mach-sun7i/include/mach/hardware.h
*
* This file contains the hardware definitions of the RealView boards.
*
* Copyright (C) 2012-2016 Allwinner Limited.
* Benn Huang ([email protected])
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __SW_HARDWARE_H
#define __SW_HARDWARE_H
#define IO_ADDRESS(x) ((x) + 0xf0000000)
#define __io_address(n) __io(IO_ADDRESS(n))
#endif
| apache-2.0 |
axiubest/TaojiuhuiBuyer | 淘酒汇 XIU/Jiu/FDAlertView.h | 1504 | //
// FDAlertView.h
// FDAlertViewDemo
//
// Created by fergusding on 15/5/26.
// Copyright (c) 2015年 fergusding. All rights reserved.
//
#import <UIKit/UIKit.h>
@protocol FDAlertViewDelegate;
@interface FDAlertView : UIView
@property (strong, nonatomic) UIView *contentView;
@property (strong, nonatomic) UIImage *icon;
@property (strong, nonatomic) NSString *title;
@property (strong, nonatomic) NSString *message;
@property (weak, nonatomic) id<FDAlertViewDelegate> delegate;
- (instancetype)initWithTitle:(NSString *)title icon:(UIImage *)icon message:(NSString *)message delegate:(id<FDAlertViewDelegate>)delegate buttonTitles:(NSString *)buttonTitles, ... NS_REQUIRES_NIL_TERMINATION;
// Show the alert view in current window
- (void)show;
// Hide the alert view
- (void)hide;
// Set the color and font size of title, if color is nil, default is black. if fontsize is 0, default is 14
- (void)setTitleColor:(UIColor *)color fontSize:(CGFloat)size;
// Set the color and font size of message, if color is nil, default is black. if fontsize is 0, default is 12
- (void)setMessageColor:(UIColor *)color fontSize:(CGFloat)size;
// Set the color and font size of button at the index, if color is nil, default is black. if fontsize is 0, default is 16
- (void)setButtonTitleColor:(UIColor *)color fontSize:(CGFloat)size atIndex:(NSInteger)index;
@end
@protocol FDAlertViewDelegate <NSObject>
- (void)alertView:(FDAlertView *)alertView clickedButtonAtIndex:(NSInteger)buttonIndex;
@end
| apache-2.0 |
apache/flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinator.java | 9405 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.operators.collect;
import org.apache.flink.core.memory.DataInputViewStreamWrapper;
import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
import org.apache.flink.runtime.jobgraph.OperatorID;
import org.apache.flink.runtime.operators.coordination.CoordinationRequest;
import org.apache.flink.runtime.operators.coordination.CoordinationRequestHandler;
import org.apache.flink.runtime.operators.coordination.CoordinationResponse;
import org.apache.flink.runtime.operators.coordination.OperatorCoordinator;
import org.apache.flink.runtime.operators.coordination.OperatorEvent;
import org.apache.flink.util.Preconditions;
import org.apache.flink.util.concurrent.ExecutorThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* {@link OperatorCoordinator} for {@link CollectSinkFunction}.
*
* <p>This coordinator only forwards requests and responses from clients and sinks and it does not
* store any results in itself.
*/
public class CollectSinkOperatorCoordinator
implements OperatorCoordinator, CoordinationRequestHandler {
private static final Logger LOG = LoggerFactory.getLogger(CollectSinkOperatorCoordinator.class);
private final int socketTimeout;
private InetSocketAddress address;
private Socket socket;
private DataInputViewStreamWrapper inStream;
private DataOutputViewStreamWrapper outStream;
private ExecutorService executorService;
public CollectSinkOperatorCoordinator(int socketTimeout) {
this.socketTimeout = socketTimeout;
}
@Override
public void start() throws Exception {
this.executorService =
Executors.newSingleThreadExecutor(
new ExecutorThreadFactory(
"collect-sink-operator-coordinator-executor-thread-pool"));
}
@Override
public void close() throws Exception {
closeConnection();
this.executorService.shutdown();
}
@Override
public void handleEventFromOperator(int subtask, OperatorEvent event) throws Exception {
Preconditions.checkArgument(
event instanceof CollectSinkAddressEvent,
"Operator event must be a CollectSinkAddressEvent");
address = ((CollectSinkAddressEvent) event).getAddress();
LOG.info("Received sink socket server address: " + address);
}
@Override
public CompletableFuture<CoordinationResponse> handleCoordinationRequest(
CoordinationRequest request) {
Preconditions.checkArgument(
request instanceof CollectCoordinationRequest,
"Coordination request must be a CollectCoordinationRequest");
CollectCoordinationRequest collectRequest = (CollectCoordinationRequest) request;
CompletableFuture<CoordinationResponse> responseFuture = new CompletableFuture<>();
if (address == null) {
completeWithEmptyResponse(collectRequest, responseFuture);
return responseFuture;
}
executorService.submit(() -> handleRequestImpl(collectRequest, responseFuture, address));
return responseFuture;
}
private void handleRequestImpl(
CollectCoordinationRequest request,
CompletableFuture<CoordinationResponse> responseFuture,
InetSocketAddress sinkAddress) {
if (sinkAddress == null) {
closeConnection();
completeWithEmptyResponse(request, responseFuture);
return;
}
try {
if (socket == null) {
socket = new Socket();
socket.setSoTimeout(socketTimeout);
socket.setKeepAlive(true);
socket.setTcpNoDelay(true);
socket.connect(sinkAddress);
inStream = new DataInputViewStreamWrapper(socket.getInputStream());
outStream = new DataOutputViewStreamWrapper(socket.getOutputStream());
LOG.info("Sink connection established");
}
// send version and offset to sink server
if (LOG.isDebugEnabled()) {
LOG.debug("Forwarding request to sink socket server");
}
request.serialize(outStream);
// fetch back serialized results
if (LOG.isDebugEnabled()) {
LOG.debug("Fetching serialized result from sink socket server");
}
responseFuture.complete(new CollectCoordinationResponse(inStream));
} catch (Exception e) {
// request failed, close current connection and send back empty results
// we catch every exception here because socket might suddenly becomes null if the sink
// fails
// and we do not want the coordinator to fail
if (LOG.isDebugEnabled()) {
// this is normal when sink restarts or job ends, so we print a debug log
LOG.debug("Collect sink coordinator encounters an exception", e);
}
closeConnection();
completeWithEmptyResponse(request, responseFuture);
}
}
private void completeWithEmptyResponse(
CollectCoordinationRequest request, CompletableFuture<CoordinationResponse> future) {
future.complete(
new CollectCoordinationResponse(
request.getVersion(),
// this lastCheckpointedOffset is OK
// because client will only expose results to the users when the
// checkpointed offset increases
-1,
Collections.emptyList()));
}
private void closeConnection() {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
LOG.warn("Failed to close sink socket server connection", e);
}
}
socket = null;
}
@Override
public void subtaskFailed(int subtask, @Nullable Throwable reason) {
// subtask failed, the socket server does not exist anymore
address = null;
}
@Override
public void subtaskReset(int subtask, long checkpointId) {
// nothing to do here, connections are re-created lazily
}
@Override
public void subtaskReady(int subtask, SubtaskGateway gateway) {
// nothing to do here, connections are re-created lazily
}
@Override
public void checkpointCoordinator(long checkpointId, CompletableFuture<byte[]> result)
throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(address);
result.complete(baos.toByteArray());
}
@Override
public void notifyCheckpointComplete(long checkpointId) {}
@Override
public void resetToCheckpoint(long checkpointId, @Nullable byte[] checkpointData)
throws Exception {
if (checkpointData == null) {
// restore before any checkpoint completed
closeConnection();
} else {
ByteArrayInputStream bais = new ByteArrayInputStream(checkpointData);
ObjectInputStream ois = new ObjectInputStream(bais);
address = (InetSocketAddress) ois.readObject();
}
}
/** Provider for {@link CollectSinkOperatorCoordinator}. */
public static class Provider implements OperatorCoordinator.Provider {
private final OperatorID operatorId;
private final int socketTimeout;
public Provider(OperatorID operatorId, int socketTimeout) {
this.operatorId = operatorId;
this.socketTimeout = socketTimeout;
}
@Override
public OperatorID getOperatorId() {
return operatorId;
}
@Override
public OperatorCoordinator create(Context context) {
// we do not send operator event so we don't need a context
return new CollectSinkOperatorCoordinator(socketTimeout);
}
}
}
| apache-2.0 |
mlperf/inference_results_v0.7 | open/Inspur/code/harness/harness_dlrm/main_dlrm.cc | 11024 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glog/logging.h"
#include "NvInferPlugin.h"
#include "logger.h"
#include "test_settings.h"
#include "loadgen.h"
#include "numpy.hpp"
#include "qsl.hpp"
#include "dlrm_server.h"
#include "dlrm_qsl.hpp"
#include "utils.hpp"
#include "cuda_profiler_api.h"
#include <dlfcn.h>
DEFINE_string(gpu_engines, "", "Engine");
DEFINE_string(plugins, "", "Comma-separated list of shared objects for plugins");
DEFINE_string(scenario, "Offline", "Scenario to run for Loadgen (Offline, SingleStream, Server)");
DEFINE_string(test_mode, "PerformanceOnly", "Testing mode for Loadgen");
DEFINE_string(model, "dlrm", "Model name");
DEFINE_uint32(gpu_batch_size, 16384, "Max Batch size to use for all devices and engines");
DEFINE_bool(use_graphs, false, "Enable cudaGraphs for TensorRT engines"); // TODO: Enable support for Cuda Graphs
DEFINE_bool(verbose, false, "Use verbose logging");
DEFINE_uint32(gpu_copy_streams, 1, "[CURRENTLY NOT USED] Number of copy streams");
DEFINE_uint32(gpu_num_bundles, 2, "Number of event-buffer bundles per GPU");
DEFINE_uint32(complete_threads, 1, "Number of threads per device for sending responses");
DEFINE_uint32(gpu_inference_streams, 1, "Number of inference streams");
DEFINE_double(warmup_duration, 1.0, "Minimum duration to run warmup for");
// configuration files
DEFINE_string(mlperf_conf_path, "", "Path to mlperf.conf");
DEFINE_string(user_conf_path, "", "Path to user.conf");
DEFINE_uint64(single_stream_expected_latency_ns, 100000, "Inverse of desired target QPS");
// Loadgen logging settings
DEFINE_string(logfile_outdir, "", "Specify the existing output directory for the LoadGen logs");
DEFINE_string(logfile_prefix, "", "Specify the filename prefix for the LoadGen log files");
DEFINE_string(logfile_suffix, "", "Specify the filename suffix for the LoadGen log files");
DEFINE_bool(logfile_prefix_with_datetime, false, "Prefix filenames for LoadGen log files");
DEFINE_bool(log_copy_detail_to_stdout, false, "Copy LoadGen detailed logging to stdout");
DEFINE_bool(disable_log_copy_summary_to_stdout, false, "Disable copy LoadGen summary logging to stdout");
DEFINE_string(log_mode, "AsyncPoll", "Logging mode for Loadgen");
DEFINE_uint64(log_mode_async_poll_interval_ms, 1000, "Specify the poll interval for asynchrounous logging");
DEFINE_bool(log_enable_trace, false, "Enable trace logging");
// QSL arguments
DEFINE_string(map_path, "", "Path to map file for samples");
DEFINE_string(sample_partition_path, "", "Path to sample partition file in npy format.");
DEFINE_string(tensor_path, "", "Path to preprocessed samples in npy format (<full_image_name>.npy). Comma-separated list if there are more than one input.");
DEFINE_uint64(performance_sample_count, 0, "Number of samples to load in performance set. 0=use default");
DEFINE_bool(start_from_device, false, "Assuming that inputs start from device memory in QSL");
// Dataset arguments
DEFINE_uint32(min_sample_size, 100, "Minimum number of pairs a sample can contain.");
DEFINE_uint32(max_sample_size, 700, "Maximum number of pairs a sample can contain.");
// BatchMaker arguments
DEFINE_uint32(num_staging_threads, 8, "Number of staging threads in DLRM BatchMaker");
DEFINE_uint32(num_staging_batches, 4, "Number of staging batches in DLRM BatchMaker");
DEFINE_uint32(max_pairs_per_staging_thread, 0, "Maximum pairs to copy in one BatchMaker staging thread (0 = use default");
DEFINE_bool(check_contiguity, false, "Whether to use contiguity checking in BatchMaker (default: false, recommended: true for Offline)");
DEFINE_string(numa_config, "", "NUMA settings: cpu cores for each GPU, assuming each GPU corresponds to one NUMA node");
/* Define a map to convert test mode input string into its corresponding enum value */
std::map<std::string, mlperf::TestScenario> scenarioMap = {
{"Offline", mlperf::TestScenario::Offline},
{"SingleStream", mlperf::TestScenario::SingleStream},
{"Server", mlperf::TestScenario::Server},
};
/* Define a map to convert test mode input string into its corresponding enum value */
std::map<std::string, mlperf::TestMode> testModeMap = {
{"SubmissionRun", mlperf::TestMode::SubmissionRun},
{"AccuracyOnly", mlperf::TestMode::AccuracyOnly},
{"PerformanceOnly", mlperf::TestMode::PerformanceOnly}
};
/* Define a map to convert logging mode input string into its corresponding enum value */
std::map<std::string, mlperf::LoggingMode> logModeMap = {
{"AsyncPoll", mlperf::LoggingMode::AsyncPoll},
{"EndOfTestOnly", mlperf::LoggingMode::EndOfTestOnly},
{"Synchronous", mlperf::LoggingMode::Synchronous}
};
// Example of the format: "0-63,64-127" for 2 GPUs (one per NUMA node).
NumaConfig parseNumaConfig(const std::string& s)
{
NumaConfig config;
if (!s.empty())
{
auto nodes = splitString(s, ",");
for (const auto& node : nodes)
{
auto cpuRange = splitString(node, "-");
CHECK(cpuRange.size() == 2) << "Invalid numa_config setting.";
int start = std::stoi(cpuRange[0]);
int last = std::stoi(cpuRange[1]);
CHECK(start <= last) << "Invalid numa_config setting.";
std::vector<int> cpus(last - start + 1);
for (int i = start; i <= last; ++i)
{
cpus[i - start] = i;
}
config.emplace_back(cpus);
}
}
return config;
}
int main(int argc, char* argv[])
{
FLAGS_alsologtostderr = 1; // Log to console
::google::InitGoogleLogging("TensorRT mlperf");
::google::ParseCommandLineFlags(&argc, &argv, true);
const std::string gSampleName = "DLRM_HARNESS";
auto sampleTest = gLogger.defineTest(gSampleName, argc, const_cast<const char**>(argv));
if (FLAGS_verbose) {
setReportableSeverity(Severity::kVERBOSE);
}
gLogger.reportTestStart(sampleTest);
initLibNvInferPlugins(&gLogger.getTRTLogger(), "");
// Load all the needed shared objects for plugins.
std::vector<std::string> plugin_files = splitString(FLAGS_plugins, ",");
for (auto& s : plugin_files)
{
void* dlh = dlopen(s.c_str(), RTLD_LAZY);
if (nullptr == dlh)
{
gLogError << "Error loading plugin library " << s << std::endl;
return 1;
}
}
// Scope to force all smart objects destruction before CUDA context resets
{
int num_gpu;
cudaGetDeviceCount(&num_gpu);
LOG(INFO) << "Found " << num_gpu << " GPUs";
// Configure the test settings
mlperf::TestSettings testSettings;
testSettings.scenario = scenarioMap[FLAGS_scenario];
testSettings.mode = testModeMap[FLAGS_test_mode];
testSettings.FromConfig(FLAGS_mlperf_conf_path, FLAGS_model, FLAGS_scenario);
testSettings.FromConfig(FLAGS_user_conf_path, FLAGS_model, FLAGS_scenario);
testSettings.single_stream_expected_latency_ns = FLAGS_single_stream_expected_latency_ns;
testSettings.server_coalesce_queries = true;
// Configure the logging settings
mlperf::LogSettings logSettings;
logSettings.log_output.outdir = FLAGS_logfile_outdir;
logSettings.log_output.prefix = FLAGS_logfile_prefix;
logSettings.log_output.suffix = FLAGS_logfile_suffix;
logSettings.log_output.prefix_with_datetime = FLAGS_logfile_prefix_with_datetime;
logSettings.log_output.copy_detail_to_stdout = FLAGS_log_copy_detail_to_stdout;
logSettings.log_output.copy_summary_to_stdout = !FLAGS_disable_log_copy_summary_to_stdout;
logSettings.log_mode = logModeMap[FLAGS_log_mode];
logSettings.log_mode_async_poll_interval_ms = FLAGS_log_mode_async_poll_interval_ms;
logSettings.enable_trace = FLAGS_log_enable_trace;
std::vector<int> gpus(num_gpu);
std::iota(gpus.begin(), gpus.end(), 0);
// Load the sample partition. We do this here to calculate the performance sample count of the underlying
// LWIS QSL, since the super constructor must be in the constructor initialization list.
std::vector<int> originalPartition;
// Scope to automatically close the file
{
npy::NpyFile samplePartitionFile(FLAGS_sample_partition_path);
CHECK_EQ(samplePartitionFile.getDims().size(), 1);
// For now we do not allow numPartitions == FLAGS_performance_sample_count, since the TotalSampleCount
// is determined at runtime in the underlying LWIS QSL.
size_t numPartitions = samplePartitionFile.getDims()[0];
CHECK_EQ(numPartitions > FLAGS_performance_sample_count, true);
std::vector<char> tmp(samplePartitionFile.getTensorSize());
samplePartitionFile.loadAll(tmp);
originalPartition.resize(numPartitions);
memcpy(originalPartition.data(), tmp.data(), tmp.size());
LOG(INFO) << "Loaded " << originalPartition.size() - 1 << " sample partitions. (" << tmp.size() << ") bytes.";
}
// Force underlying QSL to load all samples, since we want to be able to grab any partition given the sample
// index.
size_t perfPairCount = originalPartition.back();
auto qsl = std::make_shared<DLRMSampleLibrary>(
"DLRM QSL",
FLAGS_map_path,
splitString(FLAGS_tensor_path, ","),
originalPartition,
FLAGS_performance_sample_count,
perfPairCount,
0,
true,
FLAGS_start_from_device);
auto dlrm_server = std::make_shared<DLRMServer>(
"DLRM SERVER",
FLAGS_gpu_engines,
qsl,
gpus,
FLAGS_gpu_batch_size,
FLAGS_gpu_num_bundles,
FLAGS_complete_threads,
FLAGS_gpu_inference_streams,
FLAGS_warmup_duration,
FLAGS_num_staging_threads,
FLAGS_num_staging_batches,
FLAGS_max_pairs_per_staging_thread,
FLAGS_check_contiguity,
FLAGS_start_from_device,
parseNumaConfig(FLAGS_numa_config));
LOG(INFO) << "Starting running actual test.";
cudaProfilerStart();
StartTest(dlrm_server.get(), qsl.get(), testSettings, logSettings);
cudaProfilerStop();
LOG(INFO) << "Finished running actual test.";
}
return 0;
}
| apache-2.0 |
elmariofredo/homebrew-fonts | Casks/font-lohit-tamil-classical.rb | 376 | cask :v1 => 'font-lohit-tamil-classical' do
version '2.5.3'
sha256 '325ea1496bb2ae4f77552c268190251a5155717dccda64d497da4388d17c2432'
url "https://fedorahosted.org/releases/l/o/lohit/lohit-tamil-classical-ttf-#{version}.tar.gz"
homepage 'https://fedorahosted.org/lohit/'
license :unknown
font "lohit-tamil-classical-ttf-#{version}/Lohit-Tamil-Classical.ttf"
end
| bsd-2-clause |
davidbrazdil/nacl | tests/lock_manager/nacl_test_util_repl.c | 45466 | /*
* Copyright (c) 2013 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/*
* This file contains the Read-Eval-Print loop. It defines the test
* language embedded in the s-expressions.
*/
#include <ctype.h>
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <unistd.h> /* portability.h */
#define MAX_THREADS 1024
#define MAX_FILES 1024
#define MIN_EPSILON_DELAY_NANOS 1000
#include "native_client/tests/lock_manager/nacl_test_util_repl.h"
#include "native_client/src/shared/platform/nacl_check.h"
#include "native_client/src/shared/platform/posix/nacl_file_lock.h"
#include "native_client/src/shared/platform/posix/nacl_file_lock_intern.h"
#include "native_client/tests/lock_manager/nacl_test_util_sexp.h"
/*
* To test the NaClFileLockManager object's API, we have to invoke the
* API from multiple threads -- since the NaClFileLockManagerLock
* function can block -- and ensure that locking and unlocking happens
* in an acceptable order. The output sequence -- "output" occurs
* when a lock is taken or released, i.e., at lock state transitions
* -- can be serialized, but the test will be inherently racy, e.g.,
* dropping a lock in one thread while two threads are already blocked
* attempting to get the lock has two valid/acceptable output
* sequences. Instead of handling this, we avoid the mess: the test
* driver will be multithreaded, but will (attempt to) coordinate the
* threads so that only one operation is done by one thread at any
* particular time slot -- the sequence of operations will be strictly
* sequential. Because threads must acknowledge that they are about
* to perform the requested operation before they actually do it, we
* introduce a microsleep testing quantum in the execution of the test
* machine, so that the next test step should not proceed prior to the
* thread actually having done its task, even if the task would not
* cause any events to fire.
*
* To handle the possibility that two or more threads will wake up at
* the same time -- and as a result, output "wakeup" messages -- the
* output matcher accepts a specification that is petri-net-like: the
* expected output can be a list of outputs which all must occur, but
* may occur in any order. To handle races, alternation must be
* possible. This is done using a matcher-of-matcher that returns the
* index of the matcher that succeeded, in combination with nth,
* quote, and eval to run the continuation for the match. See
* test_input.txt for one example.
*
* The lisp-like execution environment does not include any bindings
* for variables, so the static vs dynamic scoping question does not
* apply. If/when we include parametric matching, this will have to
* be addressed, in addition to deciding on how the bound variables
* are made available to the match's continuation.
*
* There is no garbage collection. Instead, we copy lists (too much)
* to avoid leaking memory.
*/
static int gVerbosity = 0;
static int gInteractive = 0; /* do not crash on error if set */
static size_t gEpsilonDelayNanos = 0;
static pthread_key_t gNaClTestTsdKey;
#define DPRINTF(args) \
do { \
if (gVerbosity > 1) { \
printf args; \
} \
} while (0)
struct ThreadState;
enum EventType {
kLocked = 0,
kUnlocked = 1,
};
struct Event {
struct Event *next;
enum EventType type;
int thread_id;
int file_id;
};
struct WorkState {
/* initialized before going threaded */
size_t num_workers;
struct ThreadState *workers;
/* protects rest */
pthread_mutex_t mu;
pthread_cond_t cv;
int actor_thread;
#define WORK_PENDING (-1)
#define WORK_ACCEPTED (-2)
#define WORK_ENDED (-3)
void (*action_functor)(void *functor_state);
void *functor_state;
struct Event *q, **qend;
struct NaClFileLockManager flm;
struct NaClFileLockTestInterface *test_if;
void (*orig_set_identity)(struct NaClFileLockEntry *, int desc);
void (*orig_take_lock)(int desc);
void (*orig_drop_lock)(int desc);
};
/*
* Error / abort handling function. We do not grab locks here, since
* this may be called in an error context where the lock is already
* held. Since this is a debugging aid, we tolerate the possibility
* of inconsistent / undefined behavior.
*/
static void NaClReplDumpWs(struct WorkState *ws) {
struct Event *q = ws->q;
while (NULL != q) {
printf("event type %d, tid %d, fid %d\n",
q->type, q->thread_id, q->file_id);
q = q->next;
}
}
static void crash_node(struct WorkState *ws,
char const *reason,
struct NaClSexpNode *n) {
fprintf(stderr, "Runtime error: %s\n", reason);
NaClSexpPrintNode(stderr, n);
putc('\n', stderr);
NaClReplDumpWs(ws);
exit(1);
}
static void crash_cons(struct WorkState *ws,
char const *reason,
struct NaClSexpCons *c) {
fprintf(stderr, "Runtime error: %s\n", reason);
NaClSexpPrintCons(stderr, c);
putc('\n', stderr);
NaClReplDumpWs(ws);
exit(1);
}
static void error_node(struct WorkState *ws,
char const *reason,
struct NaClSexpNode *n) {
printf("Error: %s\n", reason);
NaClSexpPrintNode(stdout, n);
putchar('\n');
NaClReplDumpWs(ws);
/*
* Eventually, with proper garbage collection, we may distinguish
* between batch execution and interactive execution, with errors
* aborting batch execution but letting errors longjmp back to the
* REPL for interactive use.
*/
if (!gInteractive) {
exit(1);
}
}
static void error_cons(struct WorkState *ws,
char const *reason,
struct NaClSexpCons *c) {
printf("Error: %s\n", reason);
NaClSexpPrintCons(stdout, c);
putchar('\n');
NaClReplDumpWs(ws);
if (!gInteractive) {
exit(1);
}
}
void EventOccurred(struct ThreadState *ts, enum EventType type, int file_id);
struct ThreadState {
pthread_t tid;
int this_thread;
struct WorkState *ws;
};
static void NaClFileLockTestSetFileIdentityDataWrapper(
struct NaClFileLockEntry *entry,
int desc) {
struct ThreadState *ts;
ts = (struct ThreadState *) pthread_getspecific(gNaClTestTsdKey);
CHECK(NULL != ts);
(*ts->ws->test_if->set_identity)(ts->ws->test_if,
ts->ws->orig_set_identity,
entry, desc);
}
static void NaClFileLockTestTakeFileLockWrapper(int desc) {
struct ThreadState *ts;
ts = (struct ThreadState *) pthread_getspecific(gNaClTestTsdKey);
CHECK(NULL != ts);
if ((*ts->ws->test_if->take_lock)(ts->ws->test_if,
ts->ws->orig_take_lock,
ts->this_thread, desc)) {
EventOccurred(ts, kLocked, desc);
}
}
static void NaClFileLockTestDropFileLockWrapper(int desc) {
struct ThreadState *ts;
ts = (struct ThreadState *) pthread_getspecific(gNaClTestTsdKey);
CHECK(NULL != ts);
if ((*ts->ws->test_if->drop_lock)(ts->ws->test_if,
ts->ws->orig_drop_lock,
ts->this_thread, desc)) {
EventOccurred(ts, kUnlocked, desc);
}
}
void WorkStateCtor(struct WorkState *ws,
struct NaClFileLockTestInterface *test_if) {
ws->num_workers = 0; /* unknown */
ws->workers = NULL;
pthread_mutex_init(&ws->mu, (pthread_mutexattr_t *) NULL);
pthread_cond_init(&ws->cv, (pthread_condattr_t *) NULL);
ws->q = NULL;
ws->qend = &ws->q;
ws->actor_thread = WORK_PENDING;
NaClFileLockManagerCtor(&ws->flm);
ws->orig_set_identity = ws->flm.set_file_identity_data;
ws->orig_take_lock = ws->flm.take_file_lock;
ws->orig_drop_lock = ws->flm.drop_file_lock;
ws->test_if = test_if;
ws->flm.set_file_identity_data = NaClFileLockTestSetFileIdentityDataWrapper;
ws->flm.take_file_lock = NaClFileLockTestTakeFileLockWrapper;
ws->flm.drop_file_lock = NaClFileLockTestDropFileLockWrapper;
}
void WorkStateDtor(struct WorkState *ws) {
struct Event *p;
free(ws->workers);
pthread_mutex_destroy(&ws->mu);
pthread_cond_destroy(&ws->cv);
while (NULL != (p = ws->q)) {
ws->q = p->next;
free(p);
}
NaClFileLockManagerDtor(&ws->flm);
}
size_t EventQueueLengthMu(struct WorkState *ws) {
struct Event *p;
size_t count = 0;
for (p = ws->q; NULL != p; p = p->next) {
++count;
}
return count;
}
void EnqueueEvent(struct WorkState *ws, struct Event *ev) {
DPRINTF(("EnqueueEvent: %d %d %d\n", ev->type, ev->thread_id, ev->file_id));
pthread_mutex_lock(&ws->mu);
ev->next = NULL;
*ws->qend = ev;
ws->qend = &ev->next;
pthread_cond_broadcast(&ws->cv);
DPRINTF(("EventQueueLength -> %d\n", (int) EventQueueLengthMu(ws)));
pthread_mutex_unlock(&ws->mu);
}
void ComputeAbsTimeout(struct timespec *tspec, int timeout_nanoseconds) {
struct timeval now;
gettimeofday(&now, (struct timezone *) NULL);
tspec->tv_sec = now.tv_sec;
tspec->tv_nsec = now.tv_usec * 1000 + timeout_nanoseconds;
if (tspec->tv_nsec > 1000000000) {
++tspec->tv_sec;
tspec->tv_nsec -= 1000000000;
}
}
/*
* WaitForEventMu: wait for event, with a timeout. May spuriously
* return without timing out. Returns 0 on timeout.
*/
int WaitForEventMu(struct WorkState *ws, struct timespec *abs_timeout) {
int timed_out_failure = 0;
if (NULL != abs_timeout) {
if (pthread_cond_timedwait(&ws->cv, &ws->mu, abs_timeout) == ETIMEDOUT) {
timed_out_failure = 1;
}
} else {
int err;
if (0 != (err = pthread_cond_wait(&ws->cv, &ws->mu))) {
fprintf(stderr, "WaitForEventMu: error %d\n", err);
}
}
return !timed_out_failure;
}
struct Event *EventFactory(enum EventType type, int thread_id, int file_id) {
struct Event *ev = malloc(sizeof *ev);
CHECK(NULL != ev);
ev->next = NULL;
ev->type = type;
ev->thread_id = thread_id;
ev->file_id = file_id;
DPRINTF(("EventFactory(%d, %d, %d)\n", type, thread_id, file_id));
return ev;
}
void EventOccurred(struct ThreadState *ts, enum EventType type, int file_id) {
struct Event *ev = EventFactory(type, ts->this_thread, file_id);
EnqueueEvent(ts->ws, ev);
}
void EventQueueDiscardMu(struct WorkState *ws) {
struct Event *p;
struct Event *rest;
DPRINTF(("EventQueueDiscardMu\n"));
for (p = ws->q; p != NULL; p = rest) {
rest = p->next;
free(p);
}
ws->q = NULL;
ws->qend = &ws->q;
}
int GetWork(struct ThreadState *ts,
void (**functor_out)(void *functor_state),
void **functor_state_out) {
int actor_thread;
int has_work;
DPRINTF(("GetWork(%d)\n", ts->this_thread));
pthread_mutex_lock(&ts->ws->mu);
while (WORK_ENDED != (actor_thread = ts->ws->actor_thread) &&
actor_thread != ts->this_thread) {
DPRINTF(("GetWork(%d) waiting\n", ts->this_thread));
pthread_cond_wait(&ts->ws->cv, &ts->ws->mu);
}
has_work = (actor_thread == ts->this_thread);
if (has_work) {
*functor_out = ts->ws->action_functor;
*functor_state_out = ts->ws->functor_state;
ts->ws->actor_thread = WORK_ACCEPTED;
}
pthread_mutex_unlock(&ts->ws->mu);
DPRINTF(("GetWork(%d) returning %d\n", ts->this_thread, has_work));
return has_work;
}
void PutWork(struct WorkState *ws, int actor_thread,
void (*action_functor)(void *functor_state),
void *functor_state) {
DPRINTF(("PutWork(thread=%d)\n", actor_thread));
pthread_mutex_lock(&ws->mu);
while (ws->actor_thread != WORK_PENDING &&
ws->actor_thread != WORK_ACCEPTED) {
pthread_cond_wait(&ws->cv, &ws->mu);
}
ws->actor_thread = actor_thread;
ws->action_functor = action_functor;
ws->functor_state = functor_state;
pthread_cond_broadcast(&ws->cv);
pthread_mutex_unlock(&ws->mu);
DPRINTF(("PutWork(thread=%d) done\n", actor_thread));
}
void WaitForWorkAcceptance(struct WorkState *ws) {
pthread_mutex_lock(&ws->mu);
while (ws->actor_thread != WORK_ACCEPTED) {
pthread_cond_wait(&ws->cv, &ws->mu);
}
pthread_mutex_unlock(&ws->mu);
}
void AnnounceEndOfWork(struct WorkState *ws) {
DPRINTF(("AnnounceEndOfWork\n"));
pthread_mutex_lock(&ws->mu);
/*
* ensure all previous work is accepted, allowing for no-work programs.
*/
while (ws->actor_thread != WORK_ACCEPTED &&
ws->actor_thread != WORK_PENDING) {
DPRINTF(("AnnounceEndOfWork: waiting for work to drain\n"));
pthread_cond_wait(&ws->cv, &ws->mu);
}
ws->actor_thread = WORK_ENDED;
DPRINTF(("AnnounceEndOfWork: broadcast!\n"));
pthread_cond_broadcast(&ws->cv);
pthread_mutex_unlock(&ws->mu);
}
void *ThreadFunc(void *vstate) {
struct ThreadState *ts = (struct ThreadState *) vstate;
void (*functor)(void *functor_state);
void *functor_state;
/* set ts in TSD */
pthread_setspecific(gNaClTestTsdKey, ts);
DPRINTF(("thread %d\n", ts->this_thread));
while (GetWork(ts, &functor, &functor_state)) {
(*functor)(functor_state);
}
return NULL;
}
void SpawnThreadsMu(struct WorkState *ws, size_t num_threads) {
size_t ix;
int err;
CHECK(0 < num_threads);
ws->num_workers = num_threads;
ws->workers = malloc(num_threads * sizeof *ws->workers);
CHECK(NULL != ws->workers);
for (ix = 0; ix < num_threads; ++ix) {
ws->workers[ix].this_thread = (int) ix;
ws->workers[ix].ws = ws;
if (0 != (err = pthread_create(&ws->workers[ix].tid,
(pthread_attr_t *) NULL,
ThreadFunc, (void *) &ws->workers[ix]))) {
fprintf(stderr, "nacl_file_lock_test: pthread_create failed, %d\n", err);
exit(1);
}
}
}
#define CHECK_PROG(ws, expr, cons) \
do { \
if (!(expr)) { \
fprintf(stderr, \
"nacl_file_lock_test: %s does not hold at program:\n", \
#expr); \
crash_cons(ws, "internal error", cons); \
} \
} while (0)
void CheckStateMu(struct NaClSexpCons *cons, struct WorkState *ws) {
CHECK_PROG(ws, ws->num_workers != 0, cons);
}
void CheckState(struct NaClSexpCons *cons, struct WorkState *ws) {
pthread_mutex_lock(&ws->mu);
CheckStateMu(cons, ws);
pthread_mutex_unlock(&ws->mu);
}
void ReapThreads(struct WorkState *ws) {
size_t ix;
if (ws->num_workers > 0) {
for (ix = 0; ix < ws->num_workers; ++ix) {
pthread_join(ws->workers[ix].tid, (void **) NULL);
}
}
}
struct NaClSexpNode *Eval(struct NaClSexpNode *n, struct WorkState *ws);
struct NaClSexpNode *SetThreadsImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int num_threads;
if (NaClSexpListLength(cons) != 2) {
crash_cons(ws, "set-threads should have 1 argument", cons);
}
if (!NaClSexpIntp(cons->cdr->car)) {
crash_cons(ws, "set-threads should have 1 numeric argument", cons);
}
num_threads = NaClSexpNodeToInt(cons->cdr->car);
if (num_threads < 1) {
crash_cons(ws, "program specifies too few threads", cons);
}
if (num_threads >= MAX_THREADS) {
crash_cons(ws, "program specifies too many threads", cons);
}
pthread_mutex_lock(&ws->mu);
CHECK_PROG(ws, ws->num_workers == 0, cons);
SpawnThreadsMu(ws, num_threads);
pthread_mutex_unlock(&ws->mu);
return NaClSexpNodeWrapInt(num_threads);
}
struct NaClSexpNode *SetFilesImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int num_files;
if (NaClSexpListLength(cons) != 2) {
crash_cons(ws, "set-files should have 1 argument", cons);
}
if (!NaClSexpIntp(cons->cdr->car)) {
crash_cons(ws, "set-files should have 1 numeric argument", cons);
}
num_files = NaClSexpNodeToInt(cons->cdr->car);
if (num_files < 1) {
crash_cons(ws, "program specifies too few files", cons);
}
if (num_files >= MAX_FILES) {
crash_cons(ws, "program specifies too many files", cons);
}
if (!(*ws->test_if->set_num_files)(ws->test_if, num_files)) {
crash_cons(ws, "set-files error", cons);
}
return NaClSexpNodeWrapInt(num_files);
}
struct NaClSexpNode *QuoteImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
UNREFERENCED_PARAMETER(ws);
if (NaClSexpListLength(cons) != 2) {
error_cons(ws, "quote takes a single argument", cons);
return NULL;
}
return NaClSexpDupNode(cons->cdr->car);
}
struct NaClSexpNode *CarImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpNode *p;
struct NaClSexpCons *arg;
struct NaClSexpNode *result = NULL;
if (NaClSexpListLength(cons) != 2) {
error_cons(ws, "car takes a single argument", cons);
return NULL;
}
p = Eval(cons->cdr->car, ws);
if (NULL == p || !NaClSexpConsp(p)) {
error_cons(ws, "car: argument must evaluate to a list", cons);
} else {
arg = NaClSexpNodeToCons(p);
if (NULL != arg) {
result = NaClSexpDupNode(arg->car);
}
}
return result;
}
struct NaClSexpNode *CdrImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpNode *p;
struct NaClSexpCons *arg;
struct NaClSexpNode *result = NULL;
if (NaClSexpListLength(cons) != 2) {
error_cons(ws, "cdr takes a single argument", cons);
return NULL;
}
p = Eval(cons->cdr->car, ws);
if (NULL == p || !NaClSexpConsp(p)) {
error_cons(ws, "cdr: argument must evaluate to a list", cons);
} else {
arg = NaClSexpNodeToCons(p);
if (NULL != arg) {
result = NaClSexpNodeWrapCons(NaClSexpDupCons(arg->cdr));
}
}
return result;
}
struct NaClSexpNode *ConsImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpNode *first;
struct NaClSexpNode *second;
struct NaClSexpNode *result = NULL;
if (NaClSexpListLength(cons) != 3) {
error_cons(ws, "cons take two arguments", cons);
return NULL;
}
first = Eval(cons->cdr->car, ws);
second = Eval(cons->cdr->cdr->car, ws);
if (!NaClSexpConsp(second)) {
error_cons(ws, "cons: second argument must evaluate to a list", cons);
} else {
result = NaClSexpNodeWrapCons(
NaClSexpConsCons(NaClSexpDupNode(first),
NaClSexpDupCons(NaClSexpNodeToCons(second))));
}
NaClSexpFreeNode(first);
NaClSexpFreeNode(second);
return result;
}
struct NaClSexpNode *ListImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpCons *result = NULL;
struct NaClSexpCons **addpos = &result;
struct NaClSexpCons *p;
for (p = cons->cdr; NULL != p; p = p->cdr) {
*addpos = NaClSexpConsWrapNode(Eval(p->car, ws));
addpos = &((*addpos)->cdr);
}
return NaClSexpNodeWrapCons(result);
}
struct NaClSexpNode *NthImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpCons *result = NULL;
struct NaClSexpCons *p;
struct NaClSexpNode *arg_selector;
struct NaClSexpNode *arg_list;
size_t ix = 0;
if (NaClSexpListLength(cons) != 3) {
crash_cons(ws, "nth takes exactly two arguments", cons);
}
arg_selector = Eval(cons->cdr->car, ws);
arg_list = Eval(cons->cdr->cdr->car, ws);
if (!NaClSexpIntp(arg_selector)) {
error_cons(ws, "nth: first arg does not evaluate to an integer", cons);
} else if (!NaClSexpConsp(arg_list)) {
error_cons(ws, "nth: second arg does not evaluate to a list", cons);
} else {
ix = NaClSexpNodeToInt(arg_selector);
for (p = NaClSexpNodeToCons(arg_list);
NULL != p && ix > 0;
p = p->cdr, --ix) {
continue;
}
if (ix == 0) {
result = p;
}
}
return NaClSexpDupNode(NULL == result ? NULL : result->car);
}
struct NaClSexpNode *AppendImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpNode *first = NULL;
struct NaClSexpNode *second = NULL;
struct NaClSexpNode *result = NULL;
if (NaClSexpListLength(cons) != 3) {
crash_cons(ws, "append take exactly two arguments", cons);
}
first = Eval(cons->cdr->car, ws);
second = Eval(cons->cdr->cdr->car, ws);
if (!NaClSexpConsp(first)) {
error_cons(ws, "append: first arg does not evaluate to a list", cons);
} else if (!NaClSexpConsp(second)) {
error_cons(ws, "append: second arg does not evaluate to a list", cons);
} else {
result = NaClSexpNodeWrapCons(NaClSexpAppend(
NaClSexpNodeToCons(first),
NaClSexpNodeToCons(second)));
}
NaClSexpFreeNode(first);
NaClSexpFreeNode(second);
return result;
}
/*
* The arithmetic functions are just for fun. And to test that the
* basic evaluation framework makes sense.
*/
struct NaClSexpNode *MultImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int first = 1;
int value = 0;
struct NaClSexpNode *v = NULL;
struct NaClSexpCons *p;
for (p = cons->cdr; NULL != p; p = p->cdr) {
v = Eval(p->car, ws);
if (!NaClSexpIntp(v)) {
error_node(ws, "argument not integral", v);
return NULL;
}
if (first) {
value = NaClSexpNodeToInt(v);
first = 0;
} else {
value *= NaClSexpNodeToInt(v);
}
NaClSexpFreeNode(v);
}
return NaClSexpNodeWrapInt(value);
}
struct NaClSexpNode *DivImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int first = 1;
int value = 0;
struct NaClSexpNode *v = NULL;
struct NaClSexpCons *p;
for (p = cons->cdr; NULL != p; p = p->cdr) {
v = Eval(p->car, ws);
if (!NaClSexpIntp(v)) {
error_node(ws, "argument not integral", v);
return NULL;
}
if (first) {
value = NaClSexpNodeToInt(v);
first = 0;
} else {
value /= NaClSexpNodeToInt(v);
}
NaClSexpFreeNode(v);
}
return NaClSexpNodeWrapInt(value);
}
struct NaClSexpNode *AddImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int first = 1;
int value = 0;
struct NaClSexpNode *v = NULL;
struct NaClSexpCons *p;
for (p = cons->cdr; NULL != p; p = p->cdr) {
v = Eval(p->car, ws);
if (!NaClSexpIntp(v)) {
error_node(ws, "argument not integral", v);
return NULL;
}
if (first) {
value = NaClSexpNodeToInt(v);
first = 0;
} else {
value += NaClSexpNodeToInt(v);
}
NaClSexpFreeNode(v);
}
return NaClSexpNodeWrapInt(value);
}
struct NaClSexpNode *SubImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int first = 1;
int value = 0;
struct NaClSexpNode *v = NULL;
struct NaClSexpCons *p;
for (p = cons->cdr; NULL != p; p = p->cdr) {
v = Eval(p->car, ws);
if (!NaClSexpIntp(v)) {
error_node(ws, "argument not integral", v);
return NULL;
}
if (first) {
value = NaClSexpNodeToInt(v);
first = 0;
} else {
value -= NaClSexpNodeToInt(v);
}
NaClSexpFreeNode(v);
}
return NaClSexpNodeWrapInt(value);
}
static struct NaClSexpNode *MatcherResult(int64_t matcher_position,
uint64_t matched_bitset) {
DPRINTF(("MatcherResult(%"NACL_PRId64", 0x%"NACL_PRIx64")\n",
matcher_position, matched_bitset));
return NaClSexpNodeWrapCons(
NaClSexpConsCons(NaClSexpNodeWrapInt(matcher_position),
NaClSexpConsCons(NaClSexpNodeWrapInt(matched_bitset),
NULL)));
}
static int MatchResultExtract(int64_t *matcher_pos,
uint64_t *matched_bitset,
struct NaClSexpNode *result) {
struct NaClSexpCons *result_cons;
if (NULL == result) {
return 0;
}
CHECK(NaClSexpConsp(result));
result_cons = NaClSexpNodeToCons(result);
CHECK(2 == NaClSexpListLength(result_cons));
CHECK(NaClSexpIntp(result_cons->car));
CHECK(NaClSexpIntp(result_cons->cdr->car));
if (NULL != matcher_pos) {
*matcher_pos = NaClSexpNodeToInt(result_cons->car);
}
if (NULL != matched_bitset) {
*matched_bitset = NaClSexpNodeToInt(result_cons->cdr->car);
}
return 1;
}
struct NaClSexpNode *EpsilonMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
if (NaClSexpListLength(cons) != 1) {
crash_cons(ws, "epsilon built-in should have no arguments", cons);
}
DPRINTF(("Epsilon\n"));
/*
* Check that the event queue is empty.
*/
if (EventQueueLengthMu(ws) == 0) {
DPRINTF(("epsilon: success -- empty event queue\n"));
return MatcherResult(-1, 0);
}
DPRINTF(("epsilon: fail -- non-zero length event queue\n"));
return NULL;
}
struct NaClSexpNode *PrognImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpNode *node;
struct NaClSexpNode *val = NULL;
while (NULL != (cons = cons->cdr)) {
node = cons->car;
if (NaClSexpConsp(node)) {
NaClSexpFreeNode(val);
val = Eval(node, ws);
} else {
crash_node(ws, "Not a list", node);
}
}
return val;
}
enum WorkType {
kTakeLock = 0,
kDropLock = 1,
};
struct WorkItem {
struct WorkState *ws;
enum WorkType type;
int desc;
};
struct WorkItem *WorkItemFactory(struct WorkState *ws,
enum WorkType type,
int desc) {
struct WorkItem *wi = malloc(sizeof *wi);
CHECK(NULL != wi);
wi->ws = ws;
wi->type = type;
wi->desc = desc;
return wi;
}
static void WorkOnWorkItem(void *functor_state) {
struct WorkItem *wi = (struct WorkItem *) functor_state;
DPRINTF(("WorkOnWorkItem: entered\n"));
switch (wi->type) {
case kTakeLock:
NaClFileLockManagerLock(&wi->ws->flm, wi->desc);
break;
case kDropLock:
NaClFileLockManagerUnlock(&wi->ws->flm, wi->desc);
break;
}
DPRINTF(("WorkOnWorkItem: done\n"));
free(wi);
}
struct NaClSexpNode *LockUnlock(struct NaClSexpCons *cons,
struct WorkState *ws,
enum WorkType op) {
struct NaClSexpNode *p;
int actor_thread;
int file_desc;
CheckState(cons, ws);
if (NaClSexpListLength(cons) != 3) {
error_cons(ws,
"lock/unlock takes 2 arguments: thread-id and file-id", cons);
return NULL;
}
p = Eval(cons->cdr->car, ws);
if (!NaClSexpIntp(p)) {
error_cons(ws,
"lock/unlock thread-id argument evaluate to an integer", cons);
return NULL;
}
actor_thread = NaClSexpNodeToInt(p);
NaClSexpFreeNode(p);
p = Eval(cons->cdr->cdr->car, ws);
if (!NaClSexpIntp(p)) {
error_cons(ws,
"lock/unlock file-id argument evaluate to an integer", cons);
return NULL;
}
file_desc = NaClSexpNodeToInt(p);
NaClSexpFreeNode(p);
PutWork(ws, actor_thread,
WorkOnWorkItem, WorkItemFactory(ws, op, file_desc));
return NULL;
}
struct NaClSexpNode *LockImpl(struct NaClSexpCons *cons, struct WorkState *ws) {
/*
* (lock t f) -- tell thread t to take lock f.
*/
return LockUnlock(cons, ws, kTakeLock);
}
struct NaClSexpNode *UnlockImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
/*
* (unlock t f) -- tell thread t to drop lock f.
*/
return LockUnlock(cons, ws, kDropLock);
}
/*
* Called by matchers, so ws->mu is held already.
*/
struct NaClSexpNode *EventQueueEventMatcher(struct NaClSexpCons *cons,
struct WorkState *ws,
enum EventType expected_event) {
struct NaClSexpNode *n;
int expected_thread_id;
int expected_file_id;
size_t list_pos = 0;
struct NaClSexpNode *p = NULL;
struct Event *event_entry;
if (NaClSexpListLength(cons) != 3) {
error_cons(ws,
"locked/unlocked takes exactly two arguments, thread_id and"
" lock_id", cons);
return NULL;
}
n = Eval(cons->cdr->car, ws);
if (!NaClSexpIntp(n)) {
error_cons(ws,
"locked/unlocked thread_id argument must eval to an integer",
cons);
NaClSexpFreeNode(n);
return NULL;
}
expected_thread_id = NaClSexpNodeToInt(n);
NaClSexpFreeNode(n);
n = Eval(cons->cdr->cdr->car, ws);
if (!NaClSexpIntp(n)) {
error_cons(ws,
"locked/unlocked file_id argument must eval to an integer",
cons);
NaClSexpFreeNode(n);
return NULL;
}
expected_file_id = NaClSexpNodeToInt(n);
NaClSexpFreeNode(n);
DPRINTF(("locked/unlocked matcher: %s, thread %d, file %d\n",
(expected_event == kLocked) ? "locked" : "unlocked",
expected_thread_id, expected_file_id));
for (event_entry = ws->q;
NULL != event_entry;
++list_pos, event_entry = event_entry->next) {
if (event_entry->type == expected_event &&
event_entry->thread_id == expected_thread_id &&
event_entry->file_id == expected_file_id) {
if (list_pos > 8 * sizeof(int64_t)) {
crash_cons(ws, "event queue too deep", cons);
}
p = MatcherResult(-1, 1 << list_pos);
break;
}
}
return p;
}
struct NaClSexpNode *LockedMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
/*
* (locked t f) -- look for event (kLocked t f) in event queue.
*/
return EventQueueEventMatcher(cons, ws, kLocked);
}
struct NaClSexpNode *UnlockedMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
/*
* (unlocked t f) -- look for event (kUnlocked t f) in event queue.
*/
return EventQueueEventMatcher(cons, ws, kUnlocked);
}
struct NaClSexpNode *AllMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws);
struct NaClSexpNode *AnyMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws);
struct NaClSexpNode *Eval(struct NaClSexpNode *n,
struct WorkState *ws);
struct NaClSexpNode *EvalImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct NaClSexpNode *n;
struct NaClSexpNode *eval_n;
/*
* Check that there is a single argument, the invoke Eval on it.
*/
if (NaClSexpListLength(cons) != 2) {
error_cons(ws, "eval takes exactly one argument", cons);
return NULL;
}
n = Eval(cons->cdr->car, ws);
eval_n = Eval(n, ws);
NaClSexpFreeNode(n);
return eval_n;
}
struct NaClSexpNode *MatchMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws);
struct SymbolTable {
char const *name;
int is_matcher;
/*
* The |fn| member may be a built-in function, or a matcher as
* specified by |is_matcher|. If !is_matcher, then it returns an
* s-expression that is the value of the function. If it is a
* matcher, then at the lisp level the "apparent" return value is an
* integer. Internally, successful matchers return the following:
*
* ( sub-matcher-index event-pos-bitset )
*
* For non-composite matchers, sub-matcher-index is NULL. The list
* of event-pos-bitset is an integer with a non-zero bit for each
* events that were matched, with the bit position corresponding to
* their positions in the event queue. NB: it is a test
* specification error if any sub-matchers used in the (any ...)
* special form matches a subset of the events matched by another
* sub-matcher, or sub-matchers used in the (all ...) special form
* overlap.
*
* A matcher that failed to match returns NULL.
*/
struct NaClSexpNode *(*fn)(struct NaClSexpCons *cons, struct WorkState *ws);
};
struct SymbolTable g_symtab[] = {
{
"set-threads", 0, SetThreadsImpl
}, {
"set-files", 0, SetFilesImpl
}, {
"quote", 0, QuoteImpl
}, {
"car", 0, CarImpl
}, {
"cdr", 0, CdrImpl
}, {
"cons", 0, ConsImpl
}, {
"list", 0, ListImpl
}, {
"nth", 0, NthImpl
}, {
"append", 0, AppendImpl
}, {
"eval", 0, EvalImpl
}, {
"*", 0, MultImpl
}, {
"/", 0, DivImpl
}, {
"+", 0, AddImpl
}, {
"-", 0, SubImpl
}, {
"progn", 0, PrognImpl
}, {
"lock", 0, LockImpl
}, {
"unlock", 0, UnlockImpl
}, {
"match", 1, MatchMatcherImpl
}, {
"epsilon", 1, EpsilonMatcherImpl
}, {
"locked", 1, LockedMatcherImpl
}, {
"unlocked", 1, UnlockedMatcherImpl
}, {
"all", 1, AllMatcherImpl
}, {
"any", 1, AnyMatcherImpl
}
};
static int CheckMatchers(struct WorkState *ws,
struct NaClSexpCons *matcher_list,
struct SymbolTable **matcher_entries) {
size_t mx;
size_t ix;
struct NaClSexpCons *matchers;
struct NaClSexpNode *cur;
struct NaClSexpCons *submatcher;
char const *submatcher_name;
for (mx = 0, matchers = matcher_list;
NULL != matchers;
++mx, matchers = matchers->cdr) {
cur = matchers->car;
if (!NaClSexpConsp(cur)) {
error_node(ws, "all: submatcher not a list", cur);
return 0;
}
submatcher = NaClSexpNodeToCons(cur);
if (NULL == submatcher || !NaClSexpTokenp(submatcher->car)) {
error_node(ws, "all: submatcher not a matcher", cur);
return 0;
}
submatcher_name = NaClSexpNodeToToken(submatcher->car);
for (ix = 0; ix < sizeof g_symtab/sizeof g_symtab[0]; ++ix) {
if (!strcmp(g_symtab[ix].name, submatcher_name)) {
/* found! */
if (!g_symtab[ix].is_matcher) {
error_node(ws, "all: submatcher not a matcher", cur);
return 0;
}
/* save pointer for later use */
matcher_entries[mx] = &g_symtab[ix];
break;
}
}
if (ix == sizeof g_symtab/sizeof g_symtab[0]) {
error_node(ws, "all: submatcher not found", cur);
return 0;
}
}
return 1;
}
struct NaClSexpNode *AllMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
int64_t match_index = -1;
uint64_t match_pos = 0;
size_t num_matchers = NaClSexpListLength(cons) - 1;
size_t mx;
struct NaClSexpCons *matchers;
struct NaClSexpNode *cur;
struct SymbolTable **matcher_entries = NULL;
struct NaClSexpNode *match_result;
int64_t submatch_index;
uint64_t submatch_pos;
if (gVerbosity > 2) {
printf("AllMatcherImpl: %"NACL_PRIdS" submatchers\n", num_matchers);
}
if (0 == num_matchers) {
error_cons(ws, "all must have at least one sub-matcher", cons);
return NULL;
}
matcher_entries = (struct SymbolTable **)
malloc(num_matchers * sizeof *matcher_entries);
if (!CheckMatchers(ws, cons->cdr, matcher_entries)) {
error_cons(ws, "all: submatcher error", cons);
free(matcher_entries);
return NULL;
}
/*
* Invoke all submatchers. If any fail, we fail. Accumulate event
* queue indices.
*/
for (mx = 0, matchers = cons->cdr;
NULL != matchers;
++mx, matchers = matchers->cdr) {
cur = matchers->car;
match_result = (*matcher_entries[mx]->fn)(NaClSexpNodeToCons(cur), ws);
if (gVerbosity > 2) {
printf("submatcher "); NaClSexpPrintNode(stdout,cur); printf(" --> ");
NaClSexpPrintNode(stdout, match_result);
printf("\n");
}
if (!MatchResultExtract(&submatch_index, &submatch_pos, match_result)) {
DPRINTF(("submatcher failed\n"));
NaClSexpFreeNode(match_result);
free(matcher_entries);
return NULL;
}
NaClSexpFreeNode(match_result);
if ((match_pos & submatch_pos) != 0) {
error_cons(ws, "all: overlapping matchers", cons);
free(matcher_entries);
return NULL;
}
/*
* Allow, for example, the (all (unlocked 0 0) (any (locked 1 0)
* (locked 2 0))) to propagate the submatch index from the (any
* ...) form up. This does not generalize to more than one (any
* ...) sub-sexpressions in the (all ...) form.
*/
if (-1 == match_index && -1 != submatch_index) {
match_index = submatch_index;
}
match_pos |= submatch_pos;
}
free(matcher_entries);
return MatcherResult(match_index, match_pos);
}
struct NaClSexpNode *AnyMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
uint64_t submatch_pos = 0;
size_t num_matchers = NaClSexpListLength(cons) - 1;
size_t mx;
struct NaClSexpCons *matchers;
struct NaClSexpNode *cur;
struct SymbolTable **matcher_entries = NULL;
struct NaClSexpNode *match_result = NULL;
if (gVerbosity > 2) {
printf("AnyMatcherImpl: %"NACL_PRIdS" submatchers\n", num_matchers);
}
if (0 == num_matchers) {
error_cons(ws, "any must have at least one sub-matcher", cons);
return NULL;
}
matcher_entries = (struct SymbolTable **)
malloc(num_matchers * sizeof *matcher_entries);
if (!CheckMatchers(ws, cons->cdr, matcher_entries)) {
error_cons(ws, "any: submatcher error", cons);
free(matcher_entries);
return NULL;
}
/*
* Invoke all submatchers. If any succeed, we succeed.
*/
for (mx = 0, matchers = cons->cdr;
NULL != matchers;
++mx, matchers = matchers->cdr) {
cur = matchers->car;
match_result = (*matcher_entries[mx]->fn)(NaClSexpNodeToCons(cur), ws);
if (gVerbosity > 2) {
printf("submatcher "); NaClSexpPrintNode(stdout,cur); printf(" --> ");
NaClSexpPrintNode(stdout, match_result);
printf("\n");
}
if (MatchResultExtract((int64_t *) NULL, &submatch_pos, match_result)) {
DPRINTF(("submatcher succeeded\n"));
NaClSexpFreeNode(match_result);
match_result = MatcherResult(mx, submatch_pos);
break;
}
NaClSexpFreeNode(match_result);
}
free(matcher_entries);
return match_result;
}
/*
* Single arg must be a matcher. Evaluate it. If successful, check
* that all events in the event queue were matched; if so, discard all
* events in event queue, and return the match index. If not
* successful or not all events were matched, leave the event queue
* alone and return -1. Typically returned value is used by (nth ...)
* form.
*/
struct NaClSexpNode *MatchMatcherImpl(struct NaClSexpCons *cons,
struct WorkState *ws) {
struct SymbolTable *matcher[1];
struct NaClSexpNode *result;
int64_t which_match;
uint64_t match_set;
DPRINTF(("MatchMatcherImpl\n"));
if (NaClSexpListLength(cons) != 2) {
error_cons(ws, "match takes a single matcher as argument", cons);
return NULL;
}
if (!CheckMatchers(ws, cons->cdr, matcher)) {
error_cons(ws, "match argument should be a matcher", cons);
return NULL;
}
if (!NaClSexpConsp(cons->cdr->car)) {
error_cons(ws, "match argument not a list", cons);
return NULL;
}
result = (*matcher[0]->fn)(NaClSexpNodeToCons(cons->cdr->car), ws);
if (!MatchResultExtract(&which_match, &match_set, result)) {
DPRINTF(("did not match\n"));
NaClSexpFreeNode(result);
result = NULL;
} else if (match_set != ((((uint64_t) 1) << EventQueueLengthMu(ws)) - 1)) {
/* did not match all entries */
DPRINTF(("match set incomplete\n"));
NaClSexpFreeNode(result);
result = NULL;
} else {
DPRINTF(("matched all events\n"));
EventQueueDiscardMu(ws);
NaClSexpFreeNode(result);
result = NaClSexpNodeWrapInt(which_match);
}
return result;
}
struct NaClSexpNode *Eval(struct NaClSexpNode *n, struct WorkState *ws) {
struct NaClSexpCons *cons;
size_t ix;
struct NaClSexpNode *car;
char const *token;
struct NaClSexpNode *val = NULL;
struct timespec timeout_time;
int wait_for_more;
int last_match;
if (!NaClSexpConsp(n)) {
/* for now; symbol table lookup when there are symbols with values */
return NaClSexpDupNode(n);
}
cons = NaClSexpNodeToCons(n);
if (NULL == cons) {
return NaClSexpDupNode(n);
}
car = cons->car;
if (!NaClSexpTokenp(car)) {
crash_cons(ws,
"nacl_file_lock_test: car of list should be a built-in function",
cons);
}
token = NaClSexpNodeToToken(car);
DPRINTF(("function %s\n", token));
for (ix = 0; ix < sizeof g_symtab / sizeof g_symtab[0]; ++ix) {
if (!strcmp(token, g_symtab[ix].name)) {
if (!g_symtab[ix].is_matcher) {
val = (*g_symtab[ix].fn)(cons, ws);
} else {
/*
* A "matcher" special form is a bit weird/tricky. These are
* not blocking functions, since matchers can be used in
* conjunction with other matchers in (all ...) or (any ...)
* forms. What we do is the following: we wait for events,
* and as events arrive in the event queue, we run the
* matcher, which must immediately return whether a match
* against the contents of the event queue occurred. If there
* is no match, we wait for another event, until the timeout
* occurs; if timeout occurs and the matcher did not match,
* then it is an error and we abort the test program. For the
* (all ...) form, the AllMatcherImpl (essentially AND) will
* check that all arguments are themselves matchers and run
* them to see if all succeeds, and AllMatcherImpl only
* succeeds if all succeeded. The AnyMatcherImpl (OR) will
* succeed if any of the argument matchers succeed. Matchers
* return the position in the event queue where the match
* occurred, so the composite matchers can remove the events
* appropriately. Matching and removal is done with the event
* list locked, so worker threads that wake up etc cannot add
* new events. Position is relative to the head, so even if
* this were not the case, we would not get confused about
* which event was matched.
*
* Later we may consider matchers that introduce a variable to
* be bound to an event parameter, to be used later by
* subsequent matchers. This can be used to cut down on the
* combinatoric explosion that occurs with possible futures
* when, for example, several threads might wake up when a
* lock becomes available. We need to think about scoping in
* that case.
*/
DPRINTF(("Matcher found\n"));
pthread_mutex_lock(&ws->mu);
last_match = 0;
ComputeAbsTimeout(&timeout_time, gEpsilonDelayNanos);
wait_for_more = 0;
while (!last_match) {
DPRINTF(("Checking EventQueueLengthMu -> %d\n",
(int) EventQueueLengthMu(ws)));
if (EventQueueLengthMu(ws) == 0 || wait_for_more) {
DPRINTF(("Waiting for event\n"));
if (!WaitForEventMu(ws, &timeout_time)) {
DPRINTF(("Event timed out\n"));
last_match = 1;
}
}
/*
* Run matcher on event queue; matchers are invoked while
* holding the lock.
*/
val = (*g_symtab[ix].fn)(cons, ws);
if (gVerbosity > 1) {
printf("matcher returned: ");
NaClSexpPrintNode(stdout,val);
putchar('\n');
}
if (NULL != val) {
/* successful match! */
break;
}
if (last_match) {
error_cons(ws, "match failed; timed out", cons);
val = NULL;
break;
}
DPRINTF(("match failed, waiting for more events\n"));
wait_for_more = 1;
}
pthread_mutex_unlock(&ws->mu);
}
return val;
}
}
fprintf(stderr, "No such function: %s\n", token);
return NULL;
}
void ReadEvalPrintLoop(struct NaClSexpIo *input,
int interactive,
int verbosity,
size_t epsilon_delay_nanos,
struct NaClFileLockTestInterface *test_if) {
struct WorkState ws;
struct NaClSexpNode *n;
struct NaClSexpNode *val;
int err;
gInteractive = interactive;
gVerbosity = verbosity;
if (epsilon_delay_nanos < MIN_EPSILON_DELAY_NANOS) {
epsilon_delay_nanos = MIN_EPSILON_DELAY_NANOS;
}
gEpsilonDelayNanos = epsilon_delay_nanos;
err = pthread_key_create(&gNaClTestTsdKey, (void (*)(void *)) NULL);
CHECK(0 == err);
WorkStateCtor(&ws, test_if);
while (NULL != (n = NaClSexpReadSexp(input))) {
if (gVerbosity > 1 && n->type == kNaClSexpCons) {
printf("Program list length: %"NACL_PRIdS"\n",
NaClSexpListLength(n->u.cval));
}
if (gVerbosity) {
NaClSexpPrintNode(stdout, n);
printf(" => ");
}
val = Eval(n, &ws);
NaClSexpFreeNode(n);
NaClSexpPrintNode(stdout, val);
printf("\n");
NaClSexpFreeNode(val);
}
AnnounceEndOfWork(&ws);
ReapThreads(&ws);
WorkStateDtor(&ws);
}
| bsd-3-clause |
h8liu/golang | src/pkg/runtime/cgo/gcc_netbsd_arm.c | 1559 | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <sys/types.h>
#include <pthread.h>
#include <signal.h>
#include <string.h>
#include "libcgo.h"
static void *threadentry(void*);
static void (*setmg_gcc)(void*, void*);
void
x_cgo_init(G *g, void (*setmg)(void*, void*))
{
pthread_attr_t attr;
size_t size;
setmg_gcc = setmg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
void
_cgo_sys_thread_start(ThreadStart *ts)
{
pthread_attr_t attr;
sigset_t ign, oset;
pthread_t p;
size_t size;
int err;
sigfillset(&ign);
pthread_sigmask(SIG_SETMASK, &ign, &oset);
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
if (err != 0) {
fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
abort();
}
}
extern void crosscall_arm2(void (*fn)(void), void (*setmg_gcc)(void*, void*), void *g, void *m);
static void*
threadentry(void *v)
{
ThreadStart ts;
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096 * 2;
crosscall_arm2(ts.fn, setmg_gcc, (void*)ts.m, (void*)ts.g);
return nil;
}
| bsd-3-clause |
chromium/chromium | third_party/blink/web_tests/external/wpt/css/css-text/word-spacing/word-spacing-computed-001.html | 2639 | <!DOCTYPE html>
<meta charset="UTF-8">
<title>CSS Text Test: computed value of 'word-spacing: normal' and of various <length></title>
<link rel="author" title="Gérard Talbot" href="http://www.gtalbot.org/BrowserBugsSection/css21testsuite/">
<link rel="help" href="https://www.w3.org/TR/css-text-3/#word-spacing-property">
<meta content="This test checks that the computed value of 'normal' for the property word-spacing is zero. We also check the computed value of various <length>." name="assert">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<style>
div#target
{
font-size: 20px;
}
</style>
<div id="target">A Z</div>
<div id="log"></div>
<script>
function startTesting()
{
var targetElement = document.getElementById("target");
function verifyComputedStyle(property_name, specified_value, expected_value, description)
{
test(function()
{
targetElement.style.setProperty(property_name, "91px");
/*
The purpose of setting the property to an arbitrary value
is to act as a fallback value in case the specified value
fails. Since we are running 12 consecutive tests on the
same element, then it is necessary to 'reset' its property
to an arbitrary value.
*/
targetElement.style.setProperty(property_name, specified_value);
assert_equals(getComputedStyle(targetElement)[property_name], expected_value);
}, description);
}
verifyComputedStyle("word-spacing", "normal", "0px", "testing word-spacing: normal");
verifyComputedStyle("word-spacing", "0", "0px", "testing word-spacing: 0");
verifyComputedStyle("word-spacing", "1.27cm", "48px", "testing word-spacing: 1.27cm");
verifyComputedStyle("word-spacing", "1em", "20px", "testing word-spacing: 1em");
verifyComputedStyle("word-spacing", "0.5in", "48px", "testing word-spacing: 0.5in");
verifyComputedStyle("word-spacing", "25.4mm", "96px", "testing word-spacing: 25.4mm");
verifyComputedStyle("word-spacing", "5pc", "80px", "testing word-spacing: 5pc");
verifyComputedStyle("word-spacing", "18pt", "24px", "testing word-spacing: 18pt");
verifyComputedStyle("word-spacing", "7px", "7px", "testing word-spacing: 7px");
verifyComputedStyle("word-spacing", "101.6Q", "96px", "testing word-spacing: 101.6Q");
verifyComputedStyle("word-spacing", "3rem", "48px", "testing word-spacing: 3rem");
verifyComputedStyle("word-spacing", "0ch", "0px", "testing word-spacing: 0ch");
}
startTesting();
</script>
| bsd-3-clause |
kmate/HaRe | old/testing/simplifyExpr/IfThenElseIn1_TokOut.hs | 143 | module IfThenElseIn1 where
-- refactorer should give an error!
f x@(y:ys) = if x == [] then error "Error!"
else y
| bsd-3-clause |
HasClass0/webmin | fdisk/help/k.pl.UTF-8.html | 451 | <header>Zachowaj ustawienia podczas restartu</header>
Ustawia dla napędu znacznik <i>keep_settings_over_reset</i>. <p>
Gdy ten znacznik jest ustawiony, sterownik zachowa ustawienia opcji
<b>Korzystanie z DMA</b>, <b>Niemaskowanie przerwań</b>
i <b>Odczyty i zapisy wielosektorowe</b> po miękkim restarcie,
(jak też się dzieje w przypadku wystąpienia błędu). <p>
Nie wszystkie napędy wspierają tę właściwość.
<hr>
| bsd-3-clause |
webdev1001/webmin | sendmail/help/alias_enabled.zh_TW.Big5.html | 101 | <header>¬O§_¨Ï¥Î§O¦W</header>
¨M©w¬O§_n¨Ï¥Î³oÓ§O¦W. ³QÃö³¬ªº§O¦W±N¤£·|³Q¥Î¦b¶i¤Jªº¹q¤l¶l¥ó.
<hr>
| bsd-3-clause |
irubi/rabel | db/migrate/20111209080432_add_plane_id_to_nodes.rb | 148 | class AddPlaneIdToNodes < ActiveRecord::Migration
def change
add_column :nodes, :plane_id, :integer
add_index :nodes, :plane_id
end
end
| bsd-3-clause |
lneves/FrameworkBenchmarks | frameworks/Swift/perfect/perfect-mysql.dockerfile | 461 | FROM swift:4.1
ADD . /perfect
WORKDIR /perfect
RUN apt-get update -yqq && apt-get install -yqq libpq-dev && apt-get install -y xsltproc docbook-xsl uuid-dev clang pkg-config libicu-dev libpython2.7 libxml2-dev wget git libssl-dev uuid-dev libsqlite3-dev libpq-dev libmysqlclient-dev libbson-dev libmongoc-dev libcurl4-openssl-dev && apt-get -y install libmysqlclient-dev libmongoc-1.0-0 libbson-1.0
RUN swift build
EXPOSE 8080
CMD .build/debug/Perfect-MySQL
| bsd-3-clause |
WeRockStar/java-design-patterns | front-controller/src/main/java/com/iluwatar/front/controller/ArcherCommand.java | 1344 | /**
* The MIT License
* Copyright (c) 2014 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.front.controller;
/**
*
* Command for archers.
*
*/
public class ArcherCommand implements Command {
@Override
public void process() {
new ArcherView().display();
}
}
| mit |
vraravam/activeadmin | lib/active_admin/view_helpers/auto_link_helper.rb | 1517 | module ActiveAdmin
module ViewHelpers
module AutoLinkHelper
# Automatically links objects to their resource controllers. If
# the resource has not been registered, a string representation of
# the object is returned.
#
# The default content in the link is returned from ActiveAdmin::ViewHelpers::DisplayHelper#display_name
#
# You can pass in the content to display
# eg: auto_link(@post, "My Link")
#
def auto_link(resource, content = display_name(resource))
if url = auto_url_for(resource)
link_to content, url
else
content
end
end
# Like `auto_link`, except that it only returns a URL instead of a full <a> tag
def auto_url_for(resource)
config = active_admin_resource_for(resource.class)
return unless config
if config.controller.action_methods.include?("show") &&
authorized?(ActiveAdmin::Auth::READ, resource)
url_for config.route_instance_path resource, url_options
elsif config.controller.action_methods.include?("edit") &&
authorized?(ActiveAdmin::Auth::UPDATE, resource)
url_for config.route_edit_instance_path resource, url_options
end
end
# Returns the ActiveAdmin::Resource instance for a class
def active_admin_resource_for(klass)
if respond_to? :active_admin_namespace
active_admin_namespace.resource_for klass
end
end
end
end
end
| mit |
WeRockStar/java-design-patterns | iterator/src/test/java/com/iluwatar/iterator/TreasureChestTest.java | 4546 | /**
* The MIT License
* Copyright (c) 2014 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.iterator;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
/**
* Date: 12/14/15 - 2:58 PM
*
* @author Jeroen Meulemeester
*/
@RunWith(Parameterized.class)
public class TreasureChestTest {
/**
* Create a list of all expected items in the chest.
*
* @return The set of all expected items in the chest
*/
@Parameterized.Parameters
public static List<Object[]> data() {
final List<Object[]> parameters = new ArrayList<>();
parameters.add(new Object[]{new Item(ItemType.POTION, "Potion of courage")});
parameters.add(new Object[]{new Item(ItemType.RING, "Ring of shadows")});
parameters.add(new Object[]{new Item(ItemType.POTION, "Potion of wisdom")});
parameters.add(new Object[]{new Item(ItemType.POTION, "Potion of blood")});
parameters.add(new Object[]{new Item(ItemType.WEAPON, "Sword of silver +1")});
parameters.add(new Object[]{new Item(ItemType.POTION, "Potion of rust")});
parameters.add(new Object[]{new Item(ItemType.POTION, "Potion of healing")});
parameters.add(new Object[]{new Item(ItemType.RING, "Ring of armor")});
parameters.add(new Object[]{new Item(ItemType.WEAPON, "Steel halberd")});
parameters.add(new Object[]{new Item(ItemType.WEAPON, "Dagger of poison")});
return parameters;
}
/**
* One of the expected items in the chest
*/
private final Item expectedItem;
/**
* Create a new test instance, test if the given expected item can be retrieved from the chest
*
* @param expectedItem One of the items that should be in the chest
*/
public TreasureChestTest(final Item expectedItem) {
this.expectedItem = expectedItem;
}
/**
* Test if the expected item can be retrieved from the chest using the {@link ItemIterator}
*/
@Test
public void testIterator() {
final TreasureChest chest = new TreasureChest();
final ItemIterator iterator = chest.iterator(expectedItem.getType());
assertNotNull(iterator);
while (iterator.hasNext()) {
final Item item = iterator.next();
assertNotNull(item);
assertEquals(this.expectedItem.getType(), item.getType());
final String name = item.toString();
assertNotNull(name);
if (this.expectedItem.toString().equals(name)) {
return;
}
}
fail("Expected to find item [" + this.expectedItem + "] using iterator, but we didn't.");
}
/**
* Test if the expected item can be retrieved from the chest using the {@link
* TreasureChest#getItems()} method
*/
@Test
public void testGetItems() throws Exception {
final TreasureChest chest = new TreasureChest();
final List<Item> items = chest.getItems();
assertNotNull(items);
for (final Item item : items) {
assertNotNull(item);
assertNotNull(item.getType());
assertNotNull(item.toString());
final boolean sameType = this.expectedItem.getType() == item.getType();
final boolean sameName = this.expectedItem.toString().equals(item.toString());
if (sameType && sameName) {
return;
}
}
fail("Expected to find item [" + this.expectedItem + "] in the item list, but we didn't.");
}
} | mit |
g0ddish/webpack | test/cases/parsing/harmony-duplicate-export/d.js | 17 | exports.x = "d";
| mit |
lwthatcher/Compass | web/node_modules/npm/node_modules/npm-registry-client/test/star.js | 2885 | var test = require("tap").test
var server = require("./lib/server.js")
var common = require("./lib/common.js")
var client = common.freshClient()
var cache = require("./fixtures/underscore/cache.json")
function nop () {}
var URI = "https://npm.registry:8043/rewrite"
var STARRED = true
var USERNAME = "username"
var PASSWORD = "%1234@asdf%"
var EMAIL = "[email protected]"
var AUTH = {
username : USERNAME,
password : PASSWORD,
email : EMAIL
}
var PARAMS = {
starred : STARRED,
auth : AUTH
}
test("star call contract", function (t) {
t.throws(function () {
client.star(undefined, PARAMS, nop)
}, "requires a URI")
t.throws(function () {
client.star([], PARAMS, nop)
}, "requires URI to be a string")
t.throws(function () {
client.star(URI, undefined, nop)
}, "requires params object")
t.throws(function () {
client.star(URI, "", nop)
}, "params must be object")
t.throws(function () {
client.star(URI, PARAMS, undefined)
}, "requires callback")
t.throws(function () {
client.star(URI, PARAMS, "callback")
}, "callback must be function")
t.throws(
function () {
var params = {
starred : STARRED
}
client.star(URI, params, nop)
},
{ name : "AssertionError", message : "must pass auth to star" },
"params must include auth"
)
t.test("token auth disallowed in star", function (t) {
var params = {
auth : {
token : "lol"
}
}
client.star(URI, params, function (err) {
t.equal(
err && err.message,
"This operation is unsupported for token-based auth",
"star doesn't support token-based auth"
)
t.end()
})
})
t.end()
})
test("star a package", function (t) {
server.expect("GET", "/underscore?write=true", function (req, res) {
t.equal(req.method, "GET")
res.json(cache)
})
server.expect("PUT", "/underscore", function (req, res) {
t.equal(req.method, "PUT")
var b = ""
req.setEncoding("utf8")
req.on("data", function (d) {
b += d
})
req.on("end", function () {
var updated = JSON.parse(b)
var already = [
"vesln", "mvolkmann", "lancehunt", "mikl", "linus", "vasc", "bat",
"dmalam", "mbrevoort", "danielr", "rsimoes", "thlorenz"
]
for (var i = 0; i < already.length; i++) {
var current = already[i]
t.ok(
updated.users[current],
current + " still likes this package"
)
}
t.ok(updated.users[USERNAME], "user is in the starred list")
res.statusCode = 201
res.json({starred:true})
})
})
var params = {
starred : STARRED,
auth : AUTH
}
client.star("http://localhost:1337/underscore", params, function (error, data) {
t.ifError(error, "no errors")
t.ok(data.starred, "was starred")
t.end()
})
})
| mit |
DevFactory/java-design-patterns | facade/src/main/java/com/iluwatar/facade/App.java | 2123 | /**
* The MIT License
* Copyright (c) 2014 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.facade;
/**
*
* The Facade design pattern is often used when a system is very complex or difficult to understand
* because the system has a large number of interdependent classes or its source code is
* unavailable. This pattern hides the complexities of the larger system and provides a simpler
* interface to the client. It typically involves a single wrapper class which contains a set of
* members required by client. These members access the system on behalf of the facade client and
* hide the implementation details.
* <p>
* In this example the Facade is ({@link DwarvenGoldmineFacade}) and it provides a simpler interface
* to the goldmine subsystem.
*
*/
public class App {
/**
* Program entry point
*
* @param args command line args
*/
public static void main(String[] args) {
DwarvenGoldmineFacade facade = new DwarvenGoldmineFacade();
facade.startNewDay();
facade.digOutGold();
facade.endDay();
}
}
| mit |
ScottHolden/azure-sdk-for-net | src/SDKs/Graph.RBAC/Graph.RBAC/Generated/Models/ServicePrincipalCreateParameters.cs | 3231 | // Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Microsoft.Azure.Graph.RBAC.Models
{
using Microsoft.Azure;
using Microsoft.Azure.Graph;
using Microsoft.Azure.Graph.RBAC;
using Microsoft.Rest;
using Newtonsoft.Json;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
/// <summary>
/// Request parameters for creating a new service principal.
/// </summary>
public partial class ServicePrincipalCreateParameters
{
/// <summary>
/// Initializes a new instance of the ServicePrincipalCreateParameters
/// class.
/// </summary>
public ServicePrincipalCreateParameters()
{
CustomInit();
}
/// <summary>
/// Initializes a new instance of the ServicePrincipalCreateParameters
/// class.
/// </summary>
/// <param name="appId">application Id</param>
/// <param name="accountEnabled">Whether the account is enabled</param>
/// <param name="keyCredentials">A collection of KeyCredential
/// objects.</param>
/// <param name="passwordCredentials">A collection of
/// PasswordCredential objects</param>
public ServicePrincipalCreateParameters(string appId, bool accountEnabled, IList<KeyCredential> keyCredentials = default(IList<KeyCredential>), IList<PasswordCredential> passwordCredentials = default(IList<PasswordCredential>))
{
AppId = appId;
AccountEnabled = accountEnabled;
KeyCredentials = keyCredentials;
PasswordCredentials = passwordCredentials;
CustomInit();
}
/// <summary>
/// An initialization method that performs custom operations like setting defaults
/// </summary>
partial void CustomInit();
/// <summary>
/// Gets or sets application Id
/// </summary>
[JsonProperty(PropertyName = "appId")]
public string AppId { get; set; }
/// <summary>
/// Gets or sets whether the account is enabled
/// </summary>
[JsonProperty(PropertyName = "accountEnabled")]
public bool AccountEnabled { get; set; }
/// <summary>
/// Gets or sets a collection of KeyCredential objects.
/// </summary>
[JsonProperty(PropertyName = "keyCredentials")]
public IList<KeyCredential> KeyCredentials { get; set; }
/// <summary>
/// Gets or sets a collection of PasswordCredential objects
/// </summary>
[JsonProperty(PropertyName = "passwordCredentials")]
public IList<PasswordCredential> PasswordCredentials { get; set; }
/// <summary>
/// Validate the object.
/// </summary>
/// <exception cref="ValidationException">
/// Thrown if validation fails
/// </exception>
public virtual void Validate()
{
if (AppId == null)
{
throw new ValidationException(ValidationRules.CannotBeNull, "AppId");
}
}
}
}
| mit |
sergey-shandar/autorest | src/generator/AutoRest.CSharp.Azure.Fluent.Tests/Expected/AcceptanceTests/Lro/LRORetrysOperations.cs | 67705 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Fixtures.Azure.AcceptanceTestsLro
{
using Fixtures.Azure;
using Microsoft.Rest;
using Microsoft.Rest.Azure;
using Models;
using Newtonsoft.Json;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
/// <summary>
/// LRORetrysOperations operations.
/// </summary>
internal partial class LRORetrysOperations : IServiceOperations<AutoRestLongRunningOperationTestService>, ILRORetrysOperations
{
/// <summary>
/// Initializes a new instance of the LRORetrysOperations class.
/// </summary>
/// <param name='client'>
/// Reference to the service client.
/// </param>
/// <exception cref="System.ArgumentNullException">
/// Thrown when a required parameter is null
/// </exception>
internal LRORetrysOperations(AutoRestLongRunningOperationTestService client)
{
if (client == null)
{
throw new System.ArgumentNullException("client");
}
Client = client;
}
/// <summary>
/// Gets a reference to the AutoRestLongRunningOperationTestService
/// </summary>
public AutoRestLongRunningOperationTestService Client { get; private set; }
/// <summary>
/// Long running put request, service returns a 500, then a 201 to the initial
/// request, with an entity that contains ProvisioningState=’Creating’. Polls
/// return this value until the last poll returns a ‘200’ with
/// ProvisioningState=’Succeeded’
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationResponse<ProductInner>> Put201CreatingSucceeded200WithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send Request
AzureOperationResponse<ProductInner> _response = await BeginPut201CreatingSucceeded200WithHttpMessagesAsync(product, customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPutOrPatchOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running put request, service returns a 500, then a 200 to the initial
/// request, with an entity that contains ProvisioningState=’Creating’. Poll
/// the endpoint indicated in the Azure-AsyncOperation header for operation
/// status
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationResponse<ProductInner,LRORetrysPutAsyncRelativeRetrySucceededHeadersInner>> PutAsyncRelativeRetrySucceededWithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send Request
AzureOperationResponse<ProductInner,LRORetrysPutAsyncRelativeRetrySucceededHeadersInner> _response = await BeginPutAsyncRelativeRetrySucceededWithHttpMessagesAsync(product, customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPutOrPatchOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running delete request, service returns a 500, then a 202 to the
/// initial request, with an entity that contains ProvisioningState=’Accepted’.
/// Polls return this value until the last poll returns a ‘200’ with
/// ProvisioningState=’Succeeded’
/// </summary>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationResponse<ProductInner,LRORetrysDeleteProvisioning202Accepted200SucceededHeadersInner>> DeleteProvisioning202Accepted200SucceededWithHttpMessagesAsync(Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send request
AzureOperationResponse<ProductInner,LRORetrysDeleteProvisioning202Accepted200SucceededHeadersInner> _response = await BeginDeleteProvisioning202Accepted200SucceededWithHttpMessagesAsync(customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPostOrDeleteOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running delete request, service returns a 500, then a 202 to the
/// initial request. Polls return this value until the last poll returns a
/// ‘200’ with ProvisioningState=’Succeeded’
/// </summary>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationHeaderResponse<LRORetrysDelete202Retry200HeadersInner>> Delete202Retry200WithHttpMessagesAsync(Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send request
AzureOperationHeaderResponse<LRORetrysDelete202Retry200HeadersInner> _response = await BeginDelete202Retry200WithHttpMessagesAsync(customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPostOrDeleteOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running delete request, service returns a 500, then a 202 to the
/// initial request. Poll the endpoint indicated in the Azure-AsyncOperation
/// header for operation status
/// </summary>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationHeaderResponse<LRORetrysDeleteAsyncRelativeRetrySucceededHeadersInner>> DeleteAsyncRelativeRetrySucceededWithHttpMessagesAsync(Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send request
AzureOperationHeaderResponse<LRORetrysDeleteAsyncRelativeRetrySucceededHeadersInner> _response = await BeginDeleteAsyncRelativeRetrySucceededWithHttpMessagesAsync(customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPostOrDeleteOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running post request, service returns a 500, then a 202 to the initial
/// request, with 'Location' and 'Retry-After' headers, Polls return a 200 with
/// a response body after success
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationHeaderResponse<LRORetrysPost202Retry200HeadersInner>> Post202Retry200WithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send request
AzureOperationHeaderResponse<LRORetrysPost202Retry200HeadersInner> _response = await BeginPost202Retry200WithHttpMessagesAsync(product, customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPostOrDeleteOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running post request, service returns a 500, then a 202 to the initial
/// request, with an entity that contains ProvisioningState=’Creating’. Poll
/// the endpoint indicated in the Azure-AsyncOperation header for operation
/// status
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// The headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public async Task<AzureOperationHeaderResponse<LRORetrysPostAsyncRelativeRetrySucceededHeadersInner>> PostAsyncRelativeRetrySucceededWithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Send request
AzureOperationHeaderResponse<LRORetrysPostAsyncRelativeRetrySucceededHeadersInner> _response = await BeginPostAsyncRelativeRetrySucceededWithHttpMessagesAsync(product, customHeaders, cancellationToken).ConfigureAwait(false);
return await Client.GetPostOrDeleteOperationResultAsync(_response, customHeaders, cancellationToken).ConfigureAwait(false);
}
/// <summary>
/// Long running put request, service returns a 500, then a 201 to the initial
/// request, with an entity that contains ProvisioningState=’Creating’. Polls
/// return this value until the last poll returns a ‘200’ with
/// ProvisioningState=’Succeeded’
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <exception cref="SerializationException">
/// Thrown when unable to deserialize the response
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationResponse<ProductInner>> BeginPut201CreatingSucceeded200WithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("product", product);
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginPut201CreatingSucceeded200", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/put/201/creating/succeeded/200").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("PUT");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
if(product != null)
{
_requestContent = Microsoft.Rest.Serialization.SafeJsonConvert.SerializeObject(product, Client.SerializationSettings);
_httpRequest.Content = new StringContent(_requestContent, System.Text.Encoding.UTF8);
_httpRequest.Content.Headers.ContentType =System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json; charset=utf-8");
}
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 200 && (int)_statusCode != 201)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationResponse<ProductInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
// Deserialize Response
if ((int)_statusCode == 200)
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
try
{
_result.Body = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<ProductInner>(_responseContent, Client.DeserializationSettings);
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the response.", _responseContent, ex);
}
}
// Deserialize Response
if ((int)_statusCode == 201)
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
try
{
_result.Body = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<ProductInner>(_responseContent, Client.DeserializationSettings);
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the response.", _responseContent, ex);
}
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
/// <summary>
/// Long running put request, service returns a 500, then a 200 to the initial
/// request, with an entity that contains ProvisioningState=’Creating’. Poll
/// the endpoint indicated in the Azure-AsyncOperation header for operation
/// status
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <exception cref="SerializationException">
/// Thrown when unable to deserialize the response
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationResponse<ProductInner,LRORetrysPutAsyncRelativeRetrySucceededHeadersInner>> BeginPutAsyncRelativeRetrySucceededWithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("product", product);
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginPutAsyncRelativeRetrySucceeded", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/putasync/retry/succeeded").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("PUT");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
if(product != null)
{
_requestContent = Microsoft.Rest.Serialization.SafeJsonConvert.SerializeObject(product, Client.SerializationSettings);
_httpRequest.Content = new StringContent(_requestContent, System.Text.Encoding.UTF8);
_httpRequest.Content.Headers.ContentType =System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json; charset=utf-8");
}
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 200)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationResponse<ProductInner,LRORetrysPutAsyncRelativeRetrySucceededHeadersInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
// Deserialize Response
if ((int)_statusCode == 200)
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
try
{
_result.Body = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<ProductInner>(_responseContent, Client.DeserializationSettings);
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the response.", _responseContent, ex);
}
}
try
{
_result.Headers = _httpResponse.GetHeadersAsJson().ToObject<LRORetrysPutAsyncRelativeRetrySucceededHeadersInner>(JsonSerializer.Create(Client.DeserializationSettings));
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the headers.", _httpResponse.GetHeadersAsJson().ToString(), ex);
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
/// <summary>
/// Long running delete request, service returns a 500, then a 202 to the
/// initial request, with an entity that contains ProvisioningState=’Accepted’.
/// Polls return this value until the last poll returns a ‘200’ with
/// ProvisioningState=’Succeeded’
/// </summary>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <exception cref="SerializationException">
/// Thrown when unable to deserialize the response
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationResponse<ProductInner,LRORetrysDeleteProvisioning202Accepted200SucceededHeadersInner>> BeginDeleteProvisioning202Accepted200SucceededWithHttpMessagesAsync(Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginDeleteProvisioning202Accepted200Succeeded", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/delete/provisioning/202/accepted/200/succeeded").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("DELETE");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 200 && (int)_statusCode != 202)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationResponse<ProductInner,LRORetrysDeleteProvisioning202Accepted200SucceededHeadersInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
// Deserialize Response
if ((int)_statusCode == 200)
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
try
{
_result.Body = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<ProductInner>(_responseContent, Client.DeserializationSettings);
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the response.", _responseContent, ex);
}
}
// Deserialize Response
if ((int)_statusCode == 202)
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
try
{
_result.Body = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<ProductInner>(_responseContent, Client.DeserializationSettings);
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the response.", _responseContent, ex);
}
}
try
{
_result.Headers = _httpResponse.GetHeadersAsJson().ToObject<LRORetrysDeleteProvisioning202Accepted200SucceededHeadersInner>(JsonSerializer.Create(Client.DeserializationSettings));
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the headers.", _httpResponse.GetHeadersAsJson().ToString(), ex);
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
/// <summary>
/// Long running delete request, service returns a 500, then a 202 to the
/// initial request. Polls return this value until the last poll returns a
/// ‘200’ with ProvisioningState=’Succeeded’
/// </summary>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationHeaderResponse<LRORetrysDelete202Retry200HeadersInner>> BeginDelete202Retry200WithHttpMessagesAsync(Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginDelete202Retry200", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/delete/202/retry/200").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("DELETE");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 202)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationHeaderResponse<LRORetrysDelete202Retry200HeadersInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
try
{
_result.Headers = _httpResponse.GetHeadersAsJson().ToObject<LRORetrysDelete202Retry200HeadersInner>(JsonSerializer.Create(Client.DeserializationSettings));
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the headers.", _httpResponse.GetHeadersAsJson().ToString(), ex);
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
/// <summary>
/// Long running delete request, service returns a 500, then a 202 to the
/// initial request. Poll the endpoint indicated in the Azure-AsyncOperation
/// header for operation status
/// </summary>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationHeaderResponse<LRORetrysDeleteAsyncRelativeRetrySucceededHeadersInner>> BeginDeleteAsyncRelativeRetrySucceededWithHttpMessagesAsync(Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginDeleteAsyncRelativeRetrySucceeded", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/deleteasync/retry/succeeded").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("DELETE");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 202)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationHeaderResponse<LRORetrysDeleteAsyncRelativeRetrySucceededHeadersInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
try
{
_result.Headers = _httpResponse.GetHeadersAsJson().ToObject<LRORetrysDeleteAsyncRelativeRetrySucceededHeadersInner>(JsonSerializer.Create(Client.DeserializationSettings));
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the headers.", _httpResponse.GetHeadersAsJson().ToString(), ex);
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
/// <summary>
/// Long running post request, service returns a 500, then a 202 to the initial
/// request, with 'Location' and 'Retry-After' headers, Polls return a 200 with
/// a response body after success
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationHeaderResponse<LRORetrysPost202Retry200HeadersInner>> BeginPost202Retry200WithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("product", product);
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginPost202Retry200", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/post/202/retry/200").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("POST");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
if(product != null)
{
_requestContent = Microsoft.Rest.Serialization.SafeJsonConvert.SerializeObject(product, Client.SerializationSettings);
_httpRequest.Content = new StringContent(_requestContent, System.Text.Encoding.UTF8);
_httpRequest.Content.Headers.ContentType =System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json; charset=utf-8");
}
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 202)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationHeaderResponse<LRORetrysPost202Retry200HeadersInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
try
{
_result.Headers = _httpResponse.GetHeadersAsJson().ToObject<LRORetrysPost202Retry200HeadersInner>(JsonSerializer.Create(Client.DeserializationSettings));
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the headers.", _httpResponse.GetHeadersAsJson().ToString(), ex);
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
/// <summary>
/// Long running post request, service returns a 500, then a 202 to the initial
/// request, with an entity that contains ProvisioningState=’Creating’. Poll
/// the endpoint indicated in the Azure-AsyncOperation header for operation
/// status
/// </summary>
/// <param name='product'>
/// Product to put
/// </param>
/// <param name='customHeaders'>
/// Headers that will be added to request.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
/// <exception cref="CloudException">
/// Thrown when the operation returned an invalid status code
/// </exception>
/// <return>
/// A response object containing the response body and response headers.
/// </return>
public async Task<AzureOperationHeaderResponse<LRORetrysPostAsyncRelativeRetrySucceededHeadersInner>> BeginPostAsyncRelativeRetrySucceededWithHttpMessagesAsync(ProductInner product = default(ProductInner), Dictionary<string, List<string>> customHeaders = null, CancellationToken cancellationToken = default(CancellationToken))
{
// Tracing
bool _shouldTrace = ServiceClientTracing.IsEnabled;
string _invocationId = null;
if (_shouldTrace)
{
_invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("product", product);
tracingParameters.Add("cancellationToken", cancellationToken);
ServiceClientTracing.Enter(_invocationId, this, "BeginPostAsyncRelativeRetrySucceeded", tracingParameters);
}
// Construct URL
var _baseUrl = Client.BaseUri.AbsoluteUri;
var _url = new System.Uri(new System.Uri(_baseUrl + (_baseUrl.EndsWith("/") ? "" : "/")), "lro/retryerror/postasync/retry/succeeded").ToString();
List<string> _queryParameters = new List<string>();
if (_queryParameters.Count > 0)
{
_url += (_url.Contains("?") ? "&" : "?") + string.Join("&", _queryParameters);
}
// Create HTTP transport objects
var _httpRequest = new HttpRequestMessage();
HttpResponseMessage _httpResponse = null;
_httpRequest.Method = new HttpMethod("POST");
_httpRequest.RequestUri = new System.Uri(_url);
// Set Headers
if (Client.GenerateClientRequestId != null && Client.GenerateClientRequestId.Value)
{
_httpRequest.Headers.TryAddWithoutValidation("x-ms-client-request-id", System.Guid.NewGuid().ToString());
}
if (Client.AcceptLanguage != null)
{
if (_httpRequest.Headers.Contains("accept-language"))
{
_httpRequest.Headers.Remove("accept-language");
}
_httpRequest.Headers.TryAddWithoutValidation("accept-language", Client.AcceptLanguage);
}
if (customHeaders != null)
{
foreach(var _header in customHeaders)
{
if (_httpRequest.Headers.Contains(_header.Key))
{
_httpRequest.Headers.Remove(_header.Key);
}
_httpRequest.Headers.TryAddWithoutValidation(_header.Key, _header.Value);
}
}
// Serialize Request
string _requestContent = null;
if(product != null)
{
_requestContent = Microsoft.Rest.Serialization.SafeJsonConvert.SerializeObject(product, Client.SerializationSettings);
_httpRequest.Content = new StringContent(_requestContent, System.Text.Encoding.UTF8);
_httpRequest.Content.Headers.ContentType =System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json; charset=utf-8");
}
// Set Credentials
if (Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await Client.Credentials.ProcessHttpRequestAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (_shouldTrace)
{
ServiceClientTracing.SendRequest(_invocationId, _httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
_httpResponse = await Client.HttpClient.SendAsync(_httpRequest, cancellationToken).ConfigureAwait(false);
if (_shouldTrace)
{
ServiceClientTracing.ReceiveResponse(_invocationId, _httpResponse);
}
HttpStatusCode _statusCode = _httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string _responseContent = null;
if ((int)_statusCode != 202)
{
var ex = new CloudException(string.Format("Operation returned an invalid status code '{0}'", _statusCode));
try
{
_responseContent = await _httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
CloudError _errorBody = Microsoft.Rest.Serialization.SafeJsonConvert.DeserializeObject<CloudError>(_responseContent, Client.DeserializationSettings);
if (_errorBody != null)
{
ex = new CloudException(_errorBody.Message);
ex.Body = _errorBody;
}
}
catch (JsonException)
{
// Ignore the exception
}
ex.Request = new HttpRequestMessageWrapper(_httpRequest, _requestContent);
ex.Response = new HttpResponseMessageWrapper(_httpResponse, _responseContent);
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
ex.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
if (_shouldTrace)
{
ServiceClientTracing.Error(_invocationId, ex);
}
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw ex;
}
// Create Result
var _result = new AzureOperationHeaderResponse<LRORetrysPostAsyncRelativeRetrySucceededHeadersInner>();
_result.Request = _httpRequest;
_result.Response = _httpResponse;
if (_httpResponse.Headers.Contains("x-ms-request-id"))
{
_result.RequestId = _httpResponse.Headers.GetValues("x-ms-request-id").FirstOrDefault();
}
try
{
_result.Headers = _httpResponse.GetHeadersAsJson().ToObject<LRORetrysPostAsyncRelativeRetrySucceededHeadersInner>(JsonSerializer.Create(Client.DeserializationSettings));
}
catch (JsonException ex)
{
_httpRequest.Dispose();
if (_httpResponse != null)
{
_httpResponse.Dispose();
}
throw new SerializationException("Unable to deserialize the headers.", _httpResponse.GetHeadersAsJson().ToString(), ex);
}
if (_shouldTrace)
{
ServiceClientTracing.Exit(_invocationId, _result);
}
return _result;
}
}
}
| mit |
rolandzwaga/DefinitelyTyped | types/enzyme/enzyme-tests.tsx | 25738 | import * as React from "react";
import {
shallow,
mount,
render,
ShallowWrapper,
ReactWrapper,
configure,
EnzymeAdapter,
ShallowRendererProps,
ComponentClass as EnzymeComponentClass
} from "enzyme";
import { Component, ReactElement, HTMLAttributes, ComponentClass, StatelessComponent } from "react";
// Help classes/interfaces
interface MyComponentProps {
stringProp: string;
numberProp: number;
}
interface AnotherComponentProps {
anotherStringProp?: string;
anotherNumberProp?: number;
}
interface StatelessProps {
stateless: string;
}
interface AnotherStatelessProps {
anotherStateless: string;
}
interface MyComponentState {
stateProperty: string;
}
class MyComponent extends Component<MyComponentProps, MyComponentState> {
setState(...args: any[]) {
console.log(args);
}
}
class AnotherComponent extends Component<AnotherComponentProps> {
setState(...args: any[]) {
console.log(args);
}
}
const MyStatelessComponent = (props: StatelessProps) => <span />;
const AnotherStatelessComponent = (props: AnotherStatelessProps) => <span />;
// Enzyme.configure
function configureTest() {
const configureAdapter: { adapter: EnzymeAdapter } = { adapter: {} };
configure(configureAdapter);
const configureAdapterAndDisableLifecycle: typeof configureAdapter & Pick<ShallowRendererProps, "disableLifecycleMethods"> = {
adapter: {},
disableLifecycleMethods: true,
};
configure(configureAdapterAndDisableLifecycle);
}
// ShallowWrapper
function ShallowWrapperTest() {
let shallowWrapper: ShallowWrapper<MyComponentProps, MyComponentState> =
shallow<MyComponentProps, MyComponentState>(<MyComponent stringProp="value" numberProp={1} />);
let reactElement: ReactElement<any>;
let reactElements: Array<ReactElement<any>>;
let domElement: Element;
let boolVal: boolean;
let stringVal: string;
let numOrStringVal: number | string | undefined;
let elementWrapper: ShallowWrapper<HTMLAttributes<{}>>;
let anotherStatelessWrapper: ShallowWrapper<AnotherStatelessProps, never>;
let anotherComponentWrapper: ShallowWrapper<AnotherComponentProps, any>;
function test_props_state_inferring() {
let wrapper: ShallowWrapper<MyComponentProps, MyComponentState>;
wrapper = shallow(<MyComponent stringProp="value" numberProp={1} />);
wrapper.state().stateProperty;
wrapper.props().stringProp.toUpperCase();
}
function test_shallow_options() {
shallow(<MyComponent stringProp="1" numberProp={1} />, {
context: {
test: "a",
},
lifecycleExperimental: true,
disableLifecycleMethods: true
});
}
function test_find() {
anotherComponentWrapper = shallowWrapper.find(AnotherComponent);
anotherStatelessWrapper = shallowWrapper.find(AnotherStatelessComponent);
shallowWrapper = shallowWrapper.find({ prop: 'value' });
elementWrapper = shallowWrapper.find('.selector');
// Since AnotherComponent does not have a constructor, it cannot match the
// previous selector overload of ComponentClass { new(props?, contenxt? ) }
const s1: EnzymeComponentClass<AnotherComponentProps> = AnotherComponent;
}
function test_findWhere() {
shallowWrapper =
shallowWrapper.findWhere((aShallowWrapper: ShallowWrapper<MyComponentProps, MyComponentState>) => true);
}
function test_filter() {
anotherComponentWrapper = shallowWrapper.filter(AnotherComponent);
anotherStatelessWrapper = shallowWrapper.filter(AnotherStatelessComponent);
// NOTE: The following calls to filter do not narrow down the possible type of the result based
// on the type of the param, so the return type should not be different than the original
// "this". This is a special case for "filter" vs other methods like "find", because "filter"
// is guaranteed to return only a subset of the existing list of components/elements without
// finding/adding more.
shallowWrapper = shallowWrapper.filter({ numberProp: 12 });
shallowWrapper = shallowWrapper.filter('.selector');
}
function test_filterWhere() {
shallowWrapper =
shallowWrapper.filterWhere(wrapper => {
wrapper.props().stringProp;
return true;
});
}
function test_contains() {
boolVal = shallowWrapper.contains(<div className="foo bar" />);
}
function test_containsMatchingElement() {
boolVal = shallowWrapper.contains(<div className="foo bar" />);
}
function test_containsAllMatchingElements() {
boolVal = shallowWrapper.containsAllMatchingElements([<div className="foo bar" />]);
}
function test_containsAnyMatchingElement() {
boolVal = shallowWrapper.containsAnyMatchingElements([<div className="foo bar" />]);
}
function test_dive() {
interface TmpProps {
foo: any;
}
interface TmpState {
bar: any;
}
const diveWrapper: ShallowWrapper<TmpProps, TmpState> = shallowWrapper.dive<TmpProps, TmpState>({ context: { foobar: 'barfoo' } });
}
function test_hostNodes() {
shallowWrapper.hostNodes();
}
function test_equals() {
boolVal = shallowWrapper.equals(<div className="foo bar" />);
}
function test_matchesElement() {
boolVal = shallowWrapper.matchesElement(<div className="foo bar" />);
}
function test_hasClass() {
boolVal = shallowWrapper.find('.my-button').hasClass('disabled');
}
function test_is() {
boolVal = shallowWrapper.is('.some-class');
}
function test_isEmpty() {
boolVal = shallowWrapper.isEmpty();
}
function test_exists() {
boolVal = shallowWrapper.exists();
}
function test_not() {
elementWrapper = shallowWrapper.find('.foo').not('.bar');
}
function test_children() {
shallowWrapper = shallowWrapper.children();
anotherComponentWrapper = shallowWrapper.children(AnotherComponent);
anotherStatelessWrapper = shallowWrapper.children(AnotherStatelessComponent);
shallowWrapper = shallowWrapper.children({ prop: 'value' });
elementWrapper = shallowWrapper.children('.selector');
}
function test_childAt() {
const childWrapper: ShallowWrapper<any, any> = shallowWrapper.childAt(0);
interface TmpType1 {
foo: any;
}
interface TmpType2 {
bar: any;
}
const childWrapper2: ShallowWrapper<TmpType1, TmpType2> = shallowWrapper.childAt<TmpType1, TmpType2>(0);
}
function test_parents() {
shallowWrapper = shallowWrapper.parents();
anotherComponentWrapper = shallowWrapper.parents(AnotherComponent);
anotherStatelessWrapper = shallowWrapper.parents(AnotherStatelessComponent);
shallowWrapper = shallowWrapper.parents({ prop: 'myprop' });
elementWrapper = shallowWrapper.parents('.selector');
}
function test_parent() {
shallowWrapper = shallowWrapper.parent();
}
function test_closest() {
anotherComponentWrapper = shallowWrapper.closest(AnotherComponent);
anotherStatelessWrapper = shallowWrapper.closest(AnotherStatelessComponent);
shallowWrapper = shallowWrapper.closest({ prop: 'myprop' });
elementWrapper = shallowWrapper.closest('.selector');
}
function test_shallow() {
shallowWrapper = shallowWrapper.shallow();
}
function test_unmount() {
shallowWrapper = shallowWrapper.unmount();
}
function test_text() {
stringVal = shallowWrapper.text();
}
function test_html() {
stringVal = shallowWrapper.html();
}
function test_get() {
reactElement = shallowWrapper.get(1);
}
function test_getNode() {
reactElement = shallowWrapper.getNode();
}
function test_getNodes() {
reactElements = shallowWrapper.getNodes();
}
function test_getElement() {
reactElement = shallowWrapper.getElement();
}
function test_getElements() {
reactElements = shallowWrapper.getElements();
}
function test_getDOMNode() {
domElement = shallowWrapper.getDOMNode();
}
function test_at() {
shallowWrapper = shallowWrapper.at(1);
}
function test_first() {
shallowWrapper = shallowWrapper.first();
}
function test_last() {
shallowWrapper = shallowWrapper.last();
}
function test_slice() {
shallowWrapper = shallowWrapper.slice(1);
shallowWrapper = shallowWrapper.slice(1, 3);
}
function test_tap() {
shallowWrapper.tap((intercepter) => { });
}
function test_state() {
const state: MyComponentState = shallowWrapper.state();
const prop: string = shallowWrapper.state('stateProperty');
const prop2: number = shallowWrapper.state<number>('key');
const prop3 = shallowWrapper.state('key');
}
function test_context() {
shallowWrapper.context();
shallowWrapper.context('key');
const tmp: string = shallowWrapper.context<string>('key');
}
function test_props() {
const props: MyComponentProps = shallowWrapper.props();
const props2: AnotherComponentProps = shallowWrapper.find(AnotherComponent).props();
const props3: AnotherStatelessProps = shallowWrapper.find(AnotherStatelessComponent).props();
const props4: HTMLAttributes<any> = shallowWrapper.find('.selector').props();
}
function test_prop() {
const tmp: number = shallowWrapper.prop('numberProp');
const tmp2: string = shallowWrapper.prop<string>('key');
const tmp3 = shallowWrapper.prop('key');
}
function test_key() {
stringVal = shallowWrapper.key();
}
function test_simulate(...args: any[]) {
shallowWrapper.simulate('click');
shallowWrapper.simulate('click', args);
}
function test_setState() {
shallowWrapper = shallowWrapper.setState({ stateProperty: 'state' }, () => console.log('state updated'));
}
function test_setProps() {
shallowWrapper = shallowWrapper.setProps({ stringProp: 'foo' });
}
function test_setContext() {
shallowWrapper = shallowWrapper.setContext({ name: 'baz' });
}
function test_instance() {
const myComponent: MyComponent = shallowWrapper.instance();
}
function test_update() {
shallowWrapper = shallowWrapper.update();
}
function test_debug() {
stringVal = shallowWrapper.debug();
}
function test_type() {
const type: string | StatelessComponent<MyComponentProps> | ComponentClass<MyComponentProps> = shallowWrapper.type();
}
function test_name() {
stringVal = shallowWrapper.name();
}
function test_forEach() {
shallowWrapper =
shallowWrapper.forEach(wrapper => wrapper.shallow().props().stringProp);
}
function test_map() {
const arrayNumbers: number[] =
shallowWrapper.map(wrapper => wrapper.props().numberProp);
}
function test_reduce() {
const total: number =
shallowWrapper.reduce(
(amount: number, n: ShallowWrapper<MyComponentProps, MyComponentState>) => amount + n.props().numberProp
);
}
function test_reduceRight() {
const total: number =
shallowWrapper.reduceRight<number>(
(amount: number, n: ShallowWrapper<MyComponentProps, MyComponentState>) => amount + n.prop('numberProp')
);
}
function test_some() {
boolVal = shallowWrapper.some(AnotherComponent);
boolVal = shallowWrapper.some(AnotherStatelessComponent);
boolVal = shallowWrapper.some({ prop: 'myprop' });
boolVal = shallowWrapper.some('.selector');
}
function test_someWhere() {
boolVal = shallowWrapper.someWhere((aShallowWrapper: ShallowWrapper<MyComponentProps, MyComponentState>) => true);
}
function test_every() {
boolVal = shallowWrapper.every(AnotherComponent);
boolVal = shallowWrapper.every(AnotherStatelessComponent);
boolVal = shallowWrapper.every({ prop: 'myprop' });
boolVal = shallowWrapper.every('.selector');
}
function test_everyWhere() {
boolVal = shallowWrapper.everyWhere((aShallowWrapper: ShallowWrapper<MyComponentProps, MyComponentState>) => true);
}
function test_isEmptyRender() {
boolVal = shallowWrapper.isEmptyRender();
}
function test_svg() {
numOrStringVal = shallowWrapper.find('svg').props().strokeWidth;
}
function test_constructor() {
let anyWrapper: ShallowWrapper;
anyWrapper = new ShallowWrapper(<MyComponent stringProp="1" numberProp={1} />);
shallowWrapper = new ShallowWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />);
shallowWrapper = new ShallowWrapper<MyComponentProps, MyComponentState>([<MyComponent stringProp="1" numberProp={1} />, <MyComponent stringProp="1" numberProp={1} />]);
shallowWrapper = new ShallowWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />, shallowWrapper);
shallowWrapper = new ShallowWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />, undefined, { lifecycleExperimental: true });
shallowWrapper = new ShallowWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />, shallowWrapper, { lifecycleExperimental: true });
}
}
// ReactWrapper
function ReactWrapperTest() {
let reactWrapper: ReactWrapper<MyComponentProps, MyComponentState> =
mount<MyComponentProps, MyComponentState>(<MyComponent stringProp="value" numberProp={1} />);
let reactElement: ReactElement<any>;
let reactElements: Array<ReactElement<any>>;
let domElement: Element;
let boolVal: boolean;
let stringVal: string;
let elementWrapper: ReactWrapper<HTMLAttributes<{}>>;
let anotherStatelessWrapper: ReactWrapper<AnotherStatelessProps, never>;
let anotherComponentWrapper: ReactWrapper<AnotherComponentProps, any>;
function test_prop_state_inferring() {
let wrapper: ReactWrapper<MyComponentProps, MyComponentState>;
wrapper = mount(<MyComponent stringProp="value" numberProp={1} />);
wrapper.state().stateProperty;
wrapper.props().stringProp.toUpperCase();
}
function test_unmount() {
reactWrapper = reactWrapper.unmount();
}
function test_mount() {
reactWrapper = reactWrapper.mount();
mount(<MyComponent stringProp='1' numberProp={1} />, {
attachTo: document.getElementById('test'),
context: {
a: "b"
}
});
}
function test_ref() {
reactWrapper = reactWrapper.ref('refName');
interface TmpType1 {
foo: string;
}
interface TmpType2 {
bar: string;
}
const tmp: ReactWrapper<TmpType1, TmpType2> = reactWrapper.ref<TmpType1, TmpType2>('refName');
}
function test_detach() {
reactWrapper.detach();
}
function test_hostNodes() {
reactWrapper.hostNodes();
}
function test_find() {
elementWrapper = reactWrapper.find('.selector');
anotherComponentWrapper = reactWrapper.find(AnotherComponent);
anotherStatelessWrapper = reactWrapper.find(AnotherStatelessComponent);
reactWrapper = reactWrapper.find({ prop: 'myprop' });
}
function test_findWhere() {
reactWrapper =
reactWrapper.findWhere((aReactWrapper: ReactWrapper<MyComponentProps, MyComponentState>) => true);
}
function test_filter() {
anotherComponentWrapper = reactWrapper.filter(AnotherComponent);
anotherStatelessWrapper = reactWrapper.filter(AnotherStatelessComponent);
// NOTE: The following calls to filter do not narrow down the possible type of the result based
// on the type of the param, so the return type should not be different than the original
// "this". This is a special case for "filter" vs other methods like "find", because "filter"
// is guaranteed to return only a subset of the existing list of components/elements without
// finding/adding more.
reactWrapper = reactWrapper.filter({ numberProp: 12 });
reactWrapper = reactWrapper.filter('.selector');
}
function test_filterWhere() {
reactWrapper =
reactWrapper.filterWhere(wrapper => {
wrapper.props().stringProp;
return true;
});
}
function test_contains() {
boolVal = reactWrapper.contains(<div className="foo bar" />);
}
function test_containsMatchingElement() {
boolVal = reactWrapper.contains(<div className="foo bar" />);
}
function test_containsAllMatchingElements() {
boolVal = reactWrapper.containsAllMatchingElements([<div className="foo bar" />]);
}
function test_containsAnyMatchingElement() {
boolVal = reactWrapper.containsAnyMatchingElements([<div className="foo bar" />]);
}
function test_equals() {
boolVal = reactWrapper.equals(<div className="foo bar" />);
}
function test_matchesElement() {
boolVal = reactWrapper.matchesElement(<div className="foo bar" />);
}
function test_hasClass() {
boolVal = reactWrapper.find('.my-button').hasClass('disabled');
}
function test_is() {
boolVal = reactWrapper.is('.some-class');
}
function test_isEmpty() {
boolVal = reactWrapper.isEmpty();
}
function test_not() {
elementWrapper = reactWrapper.find('.foo').not('.bar');
}
function test_children() {
reactWrapper = reactWrapper.children();
anotherComponentWrapper = reactWrapper.children(AnotherComponent);
anotherStatelessWrapper = reactWrapper.children(AnotherStatelessComponent);
reactWrapper = reactWrapper.children({ prop: 'myprop' });
elementWrapper = reactWrapper.children('.selector');
}
function test_childAt() {
const childWrapper: ReactWrapper<any, any> = reactWrapper.childAt(0);
interface TmpType1 {
foo: any;
}
interface TmpType2 {
bar: any;
}
const childWrapper2: ReactWrapper<TmpType1, TmpType2> = reactWrapper.childAt<TmpType1, TmpType2>(0);
}
function test_parents() {
reactWrapper = reactWrapper.parents();
anotherComponentWrapper = reactWrapper.parents(AnotherComponent);
anotherStatelessWrapper = reactWrapper.parents(AnotherStatelessComponent);
reactWrapper = reactWrapper.parents({ prop: 'myprop' });
elementWrapper = reactWrapper.parents('.selector');
}
function test_parent() {
reactWrapper = reactWrapper.parent();
}
function test_closest() {
anotherComponentWrapper = reactWrapper.closest(AnotherComponent);
anotherStatelessWrapper = reactWrapper.closest(AnotherStatelessComponent);
reactWrapper = reactWrapper.closest({ prop: 'myprop' });
elementWrapper = reactWrapper.closest('.selector');
}
function test_text() {
stringVal = reactWrapper.text();
}
function test_html() {
stringVal = reactWrapper.html();
}
function test_get() {
reactElement = reactWrapper.get(1);
}
function test_getNode() {
reactElement = reactWrapper.getNode();
}
function test_getNodes() {
reactElements = reactWrapper.getNodes();
}
function test_getElement() {
reactElement = reactWrapper.getElement();
}
function test_getElements() {
reactElements = reactWrapper.getElements();
}
function test_getDOMNode() {
domElement = reactWrapper.getDOMNode();
}
function test_at() {
reactWrapper = reactWrapper.at(1);
}
function test_first() {
reactWrapper = reactWrapper.first();
}
function test_last() {
reactWrapper = reactWrapper.last();
}
function test_slice() {
reactWrapper = reactWrapper.slice(1);
reactWrapper = reactWrapper.slice(1, 3);
}
function test_tap() {
reactWrapper.tap((intercepter) => { });
}
function test_state() {
const state: MyComponentState = reactWrapper.state();
const prop: string = reactWrapper.state('stateProperty');
const prop2: number = reactWrapper.state<number>('key');
const prop3 = reactWrapper.state('key');
}
function test_context() {
reactWrapper.context();
reactWrapper.context('key');
const tmp: string = reactWrapper.context<string>('key');
}
function test_props() {
const props: MyComponentProps = reactWrapper.props();
const props2: AnotherComponentProps = reactWrapper.find(AnotherComponent).props();
const props3: AnotherStatelessProps = reactWrapper.find(AnotherStatelessComponent).props();
const props4: HTMLAttributes<any> = reactWrapper.find('.selector').props();
}
function test_prop() {
const tmp: number = reactWrapper.prop('numberProp');
const tmp2: string = reactWrapper.prop<string>('key');
const tmp3 = reactWrapper.prop('key');
}
function test_key() {
stringVal = reactWrapper.key();
}
function test_simulate(...args: any[]) {
reactWrapper.simulate('click');
reactWrapper.simulate('click', args);
}
function test_setState() {
reactWrapper = reactWrapper.setState({ stateProperty: 'state' });
}
function test_setProps() {
reactWrapper = reactWrapper.setProps({ stringProp: 'foo' }, () => { });
}
function test_setContext() {
reactWrapper = reactWrapper.setContext({ name: 'baz' });
}
function test_instance() {
const myComponent: MyComponent = reactWrapper.instance();
}
function test_update() {
reactWrapper = reactWrapper.update();
}
function test_debug() {
stringVal = reactWrapper.debug();
}
function test_type() {
const type: string | StatelessComponent<MyComponentProps> | ComponentClass<MyComponentProps> = reactWrapper.type();
}
function test_name() {
stringVal = reactWrapper.name();
}
function test_forEach() {
reactWrapper =
reactWrapper.forEach(wrapper => wrapper.props().stringProp);
}
function test_map() {
const arrayNumbers: number[] =
reactWrapper.map(wrapper => wrapper.props().numberProp);
}
function test_reduce() {
const total: number =
reactWrapper.reduce<number>(
(amount: number, n: ReactWrapper<MyComponentProps, MyComponentState>) => amount + n.prop('numberProp')
);
}
function test_reduceRight() {
const total: number =
reactWrapper.reduceRight<number>(
(amount: number, n: ReactWrapper<MyComponentProps, MyComponentState>) => amount + n.prop('numberProp')
);
}
function test_some() {
boolVal = reactWrapper.some(AnotherComponent);
boolVal = reactWrapper.some(AnotherStatelessComponent);
boolVal = reactWrapper.some({ prop: 'myprop' });
boolVal = reactWrapper.some('.selector');
}
function test_someWhere() {
boolVal = reactWrapper.someWhere((aReactWrapper: ReactWrapper<MyComponentProps, MyComponentState>) => true);
}
function test_every() {
boolVal = reactWrapper.every(AnotherComponent);
boolVal = reactWrapper.every(AnotherStatelessComponent);
boolVal = reactWrapper.every({ prop: 'myprop' });
boolVal = reactWrapper.every('.selector');
}
function test_everyWhere() {
boolVal = reactWrapper.everyWhere((aReactWrapper: ReactWrapper<MyComponentProps, MyComponentState>) => true);
}
function test_isEmptyRender() {
boolVal = reactWrapper.isEmptyRender();
}
function test_constructor() {
let anyWrapper: ReactWrapper;
anyWrapper = new ReactWrapper(<MyComponent stringProp="1" numberProp={1} />);
reactWrapper = new ReactWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />);
reactWrapper = new ReactWrapper<MyComponentProps, MyComponentState>([<MyComponent stringProp="1" numberProp={1} />, <MyComponent stringProp="1" numberProp={1} />]);
reactWrapper = new ReactWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />, reactWrapper);
reactWrapper = new ReactWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />, undefined, { attachTo: document.createElement('div') });
reactWrapper = new ReactWrapper<MyComponentProps, MyComponentState>(<MyComponent stringProp="1" numberProp={1} />, reactWrapper, { attachTo: document.createElement('div') });
}
}
// CheerioWrapper
function CheerioWrapperTest() {
const wrapper: Cheerio =
shallow(<div />).render() ||
mount(<div />).render();
wrapper.toggleClass('className');
}
| mit |
borisyankov/DefinitelyTyped | types/amap-js-api/index.d.ts | 2198 | // Type definitions for non-npm package amap-js-api 1.4
// Project: https://lbs.amap.com/api/javascript-api/summary
// Definitions by: breeze9527 <https://github.com/breeze9527>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.8
/// <reference path="array-bounds.d.ts" />
/// <reference path="bounds.d.ts" />
/// <reference path="browser.d.ts" />
/// <reference path="common.d.ts" />
/// <reference path="convert-from.d.ts" />
/// <reference path="dom-util.d.ts" />
/// <reference path="event.d.ts" />
/// <reference path="geometry-util.d.ts" />
/// <reference path="lngLat.d.ts" />
/// <reference path="map.d.ts" />
/// <reference path="pixel.d.ts" />
/// <reference path="size.d.ts" />
/// <reference path="type-util.d.ts" />
/// <reference path="util.d.ts" />
/// <reference path="view2D.d.ts" />
/// <reference path="layer/building.d.ts" />
/// <reference path="layer/flexible.d.ts" />
/// <reference path="layer/labelsLayer.d.ts" />
/// <reference path="layer/layer.d.ts" />
/// <reference path="layer/layerGroup.d.ts" />
/// <reference path="layer/massMarks.d.ts" />
/// <reference path="layer/mediaLayer.d.ts" />
/// <reference path="layer/tileLayer.d.ts" />
/// <reference path="layer/wms.d.ts" />
/// <reference path="layer/wmts.d.ts" />
/// <reference path="overlay/bezierCurve.d.ts" />
/// <reference path="overlay/circle.d.ts" />
/// <reference path="overlay/circleMarker.d.ts" />
/// <reference path="overlay/contextMenu.d.ts" />
/// <reference path="overlay/ellipse.d.ts" />
/// <reference path="overlay/geoJSON.d.ts" />
/// <reference path="overlay/icon.d.ts" />
/// <reference path="overlay/infoWindow.d.ts" />
/// <reference path="overlay/labelMarker.d.ts" />
/// <reference path="overlay/marker.d.ts" />
/// <reference path="overlay/markerShape.d.ts" />
/// <reference path="overlay/overlay.d.ts" />
/// <reference path="overlay/overlayGroup.d.ts" />
/// <reference path="overlay/pathOverlay.d.ts" />
/// <reference path="overlay/polygon.d.ts" />
/// <reference path="overlay/polyline.d.ts" />
/// <reference path="overlay/rectangle.d.ts" />
/// <reference path="overlay/shapeOverlay.d.ts" />
/// <reference path="overlay/text.d.ts" />
| mit |
dgilroy77/Car.ly | node_modules/eslint-plugin-jsx-a11y/.eslintrc.js | 4092 | module.exports = {
"env": {
"node": true,
"commonjs": true,
"es6": true
},
"ecmaFeatures": {
"arrowFunctions": true,
"binaryLiterals": false,
"blockBindings": true,
"classes": false,
"defaultParams": true,
"destructuring": true,
"forOf": false,
"generators": false,
"modules": true,
"objectLiteralComputedProperties": false,
"objectLiteralDuplicateProperties": false,
"objectLiteralShorthandMethods": true,
"objectLiteralShorthandProperties": true,
"octalLiterals": false,
"regexUFlag": false,
"regexYFlag": false,
"restParams": true,
"spread": true,
"superInFunctions": true,
"templateStrings": true,
"unicodeCodePointEscapes": false,
"globalReturns": false,
"jsx": true,
"experimentalObjectRestSpread": true
},
"parser": "babel-eslint",
"rules": {
"array-bracket-spacing": [ 2, "always" ],
"arrow-body-style": [ 2, "as-needed" ],
"arrow-parens": [ 2, "as-needed" ],
"block-scoped-var": 2,
"block-spacing": [ 2, "always" ],
"brace-style": [ 2, "1tbs", {
"allowSingleLine": false
} ],
"comma-dangle": [ 2, "never" ],
"comma-spacing": [ 2, {
"after": true,
"before": false
} ],
"consistent-return": 2,
"consistent-this": [ 2, "self" ],
"constructor-super": 2,
"curly": [ 2, "all" ],
"dot-location": [ 2, "property" ],
"dot-notation": 2,
"eol-last": 2,
"indent": [ 2, 2, {
"SwitchCase": 1
} ],
"jsx-quotes": [ 2, "prefer-double" ],
"max-len": [ 2, 125, 2, {
"ignorePattern": "((^import[^;]+;$)|(^\\s*it\\())",
"ignoreUrls": true
} ],
"new-cap": 2,
"no-alert": 2,
"no-confusing-arrow": 2,
"no-caller": 2,
"no-class-assign": 2,
"no-cond-assign": [ 2, "always" ],
"no-console": 1,
"no-const-assign": 2,
"no-constant-condition": 2,
"no-control-regex": 2,
"no-debugger": 1,
"no-dupe-args": 2,
"no-dupe-class-members": 2,
"no-dupe-keys": 2,
"no-duplicate-case": 2,
"no-else-return": 2,
"no-empty": 2,
"no-empty-character-class": 2,
"no-eq-null": 2,
"no-eval": 2,
"no-ex-assign": 2,
"no-extend-native": 2,
"no-extra-bind": 2,
"no-extra-boolean-cast": 2,
"no-extra-parens": [ 2, "functions" ],
"no-extra-semi": 2,
"no-func-assign": 2,
"no-implicit-coercion": [ 2, {
"boolean": true,
"number": true,
"string": true
} ],
"no-implied-eval": 2,
"no-inner-declarations": [ 2, "both" ],
"no-invalid-regexp": 2,
"no-invalid-this": 2,
"no-irregular-whitespace": 2,
"no-lonely-if": 2,
"no-negated-condition": 2,
"no-negated-in-lhs": 2,
"no-new": 2,
"no-new-func": 2,
"no-new-object": 2,
"no-obj-calls": 2,
"no-proto": 2,
"no-redeclare": 2,
"no-regex-spaces": 2,
"no-return-assign": 2,
"no-self-compare": 2,
"no-sequences": 2,
"no-sparse-arrays": 2,
"no-this-before-super": 2,
"no-trailing-spaces": 2,
"no-undef": 2,
"no-unexpected-multiline": 2,
"no-unneeded-ternary": 2,
"no-unreachable": 2,
"no-unused-vars": [ 1, {
"args": "after-used",
"vars": "all",
"varsIgnorePattern": "React"
} ],
"no-use-before-define": 2,
"no-useless-call": 2,
"no-useless-concat": 2,
"no-var": 2,
"no-warning-comments": [ 1, {
"location": "anywhere",
"terms": [
"todo"
]
} ],
"no-with": 2,
"object-curly-spacing": [ 2, "always" ],
"object-shorthand": 2,
"one-var": [ 2, "never" ],
"prefer-arrow-callback": 2,
"prefer-const": 2,
"prefer-spread": 2,
"prefer-template": 2,
"radix": 2,
"semi": 2,
"sort-vars": [ 2, {
"ignoreCase": true
} ],
"keyword-spacing": 2,
"space-before-blocks": [ 2, "always" ],
"space-before-function-paren": [ 2, "never" ],
"space-infix-ops": [ 2, {
"int32Hint": false
} ],
"use-isnan": 2,
"valid-typeof": 2,
"vars-on-top": 2,
"yoda": [ 2, "never" ]
}
};
| mit |
Subsets and Splits