repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
carlobar/uclinux_leon3_UD | user/klibc/usr/include/arch/sparc64/klibc/archsignal.h | 296 | /*
* arch/sparc64/include/klibc/archsignal.h
*
* Architecture-specific signal definitions
*
*/
#ifndef _KLIBC_ARCHSIGNAL_H
#define _KLIBC_ARCHSIGNAL_H
#define __WANT_POSIX1B_SIGNALS__
#include <asm/signal.h>
/* Not actually used by the kernel... */
#define SA_RESTORER 0x80000000
#endif
| gpl-2.0 |
mickael-guene/gcc | libstdc++-v3/testsuite/25_algorithms/for_each/requirements/explicit_instantiation/2.cc | 1221 | // { dg-do compile }
// 2007-09-20 Benjamin Kosnik <[email protected]>
// Copyright (C) 2007-2017 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <algorithm>
#include <functional>
#include <testsuite_api.h>
namespace std
{
using __gnu_test::NonDefaultConstructible;
typedef NonDefaultConstructible value_type;
typedef value_type* iterator_type;
typedef std::pointer_to_unary_function<value_type, void> function_type;
template function_type for_each(iterator_type, iterator_type,
function_type);
}
| gpl-2.0 |
holyangel/LGE_G3 | lib/mpi/mpi-mul.c | 4642 | /* mpi-mul.c - MPI functions
* Copyright (C) 1994, 1996 Free Software Foundation, Inc.
* Copyright (C) 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
int mpi_mul_ui(MPI prod, MPI mult, unsigned long small_mult)
{
mpi_size_t size, prod_size;
mpi_ptr_t prod_ptr;
mpi_limb_t cy;
int sign;
size = mult->nlimbs;
sign = mult->sign;
if (!size || !small_mult) {
prod->nlimbs = 0;
prod->sign = 0;
return 0;
}
prod_size = size + 1;
if (prod->alloced < prod_size)
if (mpi_resize(prod, prod_size) < 0)
return -ENOMEM;
prod_ptr = prod->d;
cy = mpihelp_mul_1(prod_ptr, mult->d, size, (mpi_limb_t) small_mult);
if (cy)
prod_ptr[size++] = cy;
prod->nlimbs = size;
prod->sign = sign;
return 0;
}
int mpi_mul_2exp(MPI w, MPI u, unsigned long cnt)
{
mpi_size_t usize, wsize, limb_cnt;
mpi_ptr_t wp;
mpi_limb_t wlimb;
int usign, wsign;
usize = u->nlimbs;
usign = u->sign;
if (!usize) {
w->nlimbs = 0;
w->sign = 0;
return 0;
}
limb_cnt = cnt / BITS_PER_MPI_LIMB;
wsize = usize + limb_cnt + 1;
if (w->alloced < wsize)
if (mpi_resize(w, wsize) < 0)
return -ENOMEM;
wp = w->d;
wsize = usize + limb_cnt;
wsign = usign;
cnt %= BITS_PER_MPI_LIMB;
if (cnt) {
wlimb = mpihelp_lshift(wp + limb_cnt, u->d, usize, cnt);
if (wlimb) {
wp[wsize] = wlimb;
wsize++;
}
} else {
MPN_COPY_DECR(wp + limb_cnt, u->d, usize);
}
/*
*/
MPN_ZERO(wp, limb_cnt);
w->nlimbs = wsize;
w->sign = wsign;
return 0;
}
int mpi_mul(MPI w, MPI u, MPI v)
{
int rc = -ENOMEM;
mpi_size_t usize, vsize, wsize;
mpi_ptr_t up, vp, wp;
mpi_limb_t cy;
int usign, vsign, sign_product;
int assign_wp = 0;
mpi_ptr_t tmp_limb = NULL;
if (u->nlimbs < v->nlimbs) { /* */
usize = v->nlimbs;
usign = v->sign;
up = v->d;
vsize = u->nlimbs;
vsign = u->sign;
vp = u->d;
} else {
usize = u->nlimbs;
usign = u->sign;
up = u->d;
vsize = v->nlimbs;
vsign = v->sign;
vp = v->d;
}
sign_product = usign ^ vsign;
wp = w->d;
/* */
wsize = usize + vsize;
if (w->alloced < (size_t) wsize) {
if (wp == up || wp == vp) {
wp = mpi_alloc_limb_space(wsize);
if (!wp)
goto nomem;
assign_wp = 1;
} else {
if (mpi_resize(w, wsize) < 0)
goto nomem;
wp = w->d;
}
} else { /* */
if (wp == up) {
/* */
up = tmp_limb = mpi_alloc_limb_space(usize);
if (!up)
goto nomem;
/* */
if (wp == vp)
vp = up;
/* */
MPN_COPY(up, wp, usize);
} else if (wp == vp) {
/* */
vp = tmp_limb = mpi_alloc_limb_space(vsize);
if (!vp)
goto nomem;
/* */
MPN_COPY(vp, wp, vsize);
}
}
if (!vsize)
wsize = 0;
else {
if (mpihelp_mul(wp, up, usize, vp, vsize, &cy) < 0)
goto nomem;
wsize -= cy ? 0 : 1;
}
if (assign_wp)
mpi_assign_limb_space(w, wp, wsize);
w->nlimbs = wsize;
w->sign = sign_product;
rc = 0;
nomem:
if (tmp_limb)
mpi_free_limb_space(tmp_limb);
return rc;
}
int mpi_mulm(MPI w, MPI u, MPI v, MPI m)
{
if (mpi_mul(w, u, v) < 0)
return -ENOMEM;
return mpi_fdiv_r(w, w, m);
}
| gpl-2.0 |
seank/FreeScale-s12x-binutils-jm | ld/emulparams/armelf.sh | 1130 | MACHINE=
SCRIPT_NAME=elf
OUTPUT_FORMAT="elf32-littlearm"
BIG_OUTPUT_FORMAT="elf32-bigarm"
LITTLE_OUTPUT_FORMAT="elf32-littlearm"
TEXT_START_ADDR=0x8000
TEMPLATE_NAME=elf32
EXTRA_EM_FILE=armelf
OTHER_TEXT_SECTIONS='*(.glue_7t) *(.glue_7) *(.vfp11_veneer)'
OTHER_BSS_SYMBOLS='__bss_start__ = .;'
OTHER_BSS_END_SYMBOLS='_bss_end__ = . ; __bss_end__ = . ;'
OTHER_END_SYMBOLS='__end__ = . ;'
OTHER_SECTIONS='.note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }'
ATTRS_SECTIONS='.ARM.attributes 0 : { KEEP (*(.ARM.attributes)) KEEP (*(.gnu.attributes)) }'
OTHER_READONLY_SECTIONS="
.ARM.extab ${RELOCATING-0} : { *(.ARM.extab${RELOCATING+* .gnu.linkonce.armextab.*}) }
${RELOCATING+ __exidx_start = .; }
.ARM.exidx ${RELOCATING-0} : { *(.ARM.exidx${RELOCATING+* .gnu.linkonce.armexidx.*}) }
${RELOCATING+ __exidx_end = .; }"
DATA_START_SYMBOLS='__data_start = . ;';
GENERATE_SHLIB_SCRIPT=yes
ARCH=arm
MACHINE=
MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
ENTRY=_start
EMBEDDED=yes
# This sets the stack to the top of the simulator memory (2^19 bytes).
STACK_ADDR=0x80000
# ARM does not support .s* sections.
NO_SMALL_DATA=yes
| gpl-2.0 |
BCLinux/qga-bc | qemu-win/include/qemu/memfd.h | 764 | #ifndef QEMU_MEMFD_H
#define QEMU_MEMFD_H
#include "config-host.h"
#include <stdbool.h>
#ifndef F_LINUX_SPECIFIC_BASE
#define F_LINUX_SPECIFIC_BASE 1024
#endif
#ifndef F_ADD_SEALS
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
#endif
void *qemu_memfd_alloc(const char *name, size_t size, unsigned int seals,
int *fd);
void qemu_memfd_free(void *ptr, size_t size, int fd);
bool qemu_memfd_check(void);
#endif /* QEMU_MEMFD_H */
| gpl-2.0 |
CreativeCimmons/ORB-SLAM-Android-app | slam_ext/Thirdparty/g2o/g2o/types/slam3d/vertex_pointxyz.cpp | 3993 | // g2o - General Graph Optimization
// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "vertex_pointxyz.h"
#include <stdio.h>
#ifdef G2O_HAVE_OPENGL
#include "../../stuff/opengl_wrapper.h"
#endif
#include <typeinfo>
namespace g2o {
bool VertexPointXYZ::read(std::istream& is) {
Vector3d lv;
for (int i=0; i<3; i++)
is >> lv[i];
setEstimate(lv);
return true;
}
bool VertexPointXYZ::write(std::ostream& os) const {
Vector3d lv=estimate();
for (int i=0; i<3; i++){
os << lv[i] << " ";
}
return os.good();
}
#ifdef G2O_HAVE_OPENGL
VertexPointXYZDrawAction::VertexPointXYZDrawAction(): DrawAction(typeid(VertexPointXYZ).name()){
}
bool VertexPointXYZDrawAction::refreshPropertyPtrs(HyperGraphElementAction::Parameters* params_){
if (! DrawAction::refreshPropertyPtrs(params_))
return false;
if (_previousParams){
_pointSize = _previousParams->makeProperty<FloatProperty>(_typeName + "::POINT_SIZE", 1.);
} else {
_pointSize = 0;
}
return true;
}
HyperGraphElementAction* VertexPointXYZDrawAction::operator()(HyperGraph::HyperGraphElement* element,
HyperGraphElementAction::Parameters* params ){
if (typeid(*element).name()!=_typeName)
return 0;
refreshPropertyPtrs(params);
if (! _previousParams)
return this;
if (_show && !_show->value())
return this;
VertexPointXYZ* that = static_cast<VertexPointXYZ*>(element);
glPushAttrib(GL_ENABLE_BIT | GL_POINT_BIT);
glDisable(GL_LIGHTING);
glColor3f(0.8f,0.5f,0.3f);
if (_pointSize) {
glPointSize(_pointSize->value());
}
glBegin(GL_POINTS);
glVertex3f((float)that->estimate()(0),(float)that->estimate()(1),(float)that->estimate()(2));
glEnd();
glPopAttrib();
return this;
}
#endif
VertexPointXYZWriteGnuplotAction::VertexPointXYZWriteGnuplotAction() :
WriteGnuplotAction(typeid(VertexPointXYZ).name())
{
}
HyperGraphElementAction* VertexPointXYZWriteGnuplotAction::operator()(HyperGraph::HyperGraphElement* element, HyperGraphElementAction::Parameters* params_ )
{
if (typeid(*element).name()!=_typeName)
return 0;
WriteGnuplotAction::Parameters* params=static_cast<WriteGnuplotAction::Parameters*>(params_);
if (!params->os){
std::cerr << __PRETTY_FUNCTION__ << ": warning, no valid os specified" << std::endl;
return 0;
}
VertexPointXYZ* v = static_cast<VertexPointXYZ*>(element);
*(params->os) << v->estimate().x() << " " << v->estimate().y() << " " << v->estimate().z() << " " << std::endl;
return this;
}
}
| gpl-2.0 |
SlimRoms/kernel_cyanogen_msm8916 | drivers/mfd/moon-tables.c | 117373 | /*
* moon-tables.c -- data tables for MOON class codecs
*
* Copyright 2015 Cirrus Logic
*
* Author: Nikesh Oswal <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/registers.h>
#include <linux/device.h>
#include "arizona.h"
static const struct reg_sequence moon_reva_16_patch[] = {
{ 0x8A, 0x5555 },
{ 0x8A, 0xAAAA },
{ 0x4CF, 0x0700 },
{ 0x171, 0x0003 },
{ 0x101, 0x0444 },
{ 0x159, 0x0002 },
{ 0x120, 0x0444 },
{ 0x1D1, 0x0004 },
{ 0x1E0, 0xC084 },
{ 0x159, 0x0000 },
{ 0x120, 0x0404 },
{ 0x101, 0x0404 },
{ 0x171, 0x0002 },
{ 0x17A, 0x2906 },
{ 0x19A, 0x2906 },
{ 0x441, 0xC750 },
{ 0x340, 0x0001 },
{ 0x112, 0x0405 },
{ 0x124, 0x0C49 },
{ 0x1300, 0x050E },
{ 0x1302, 0x0101 },
{ 0x1380, 0x0425 },
{ 0x1381, 0xF6D8 },
{ 0x1382, 0x0632 },
{ 0x1383, 0xFEC8 },
{ 0x1390, 0x042F },
{ 0x1391, 0xF6CA },
{ 0x1392, 0x0637 },
{ 0x1393, 0xFEC8 },
{ 0x281, 0x0000 },
{ 0x282, 0x0000 },
{ 0x4EA, 0x0100 },
{ 0x8A, 0xCCCC },
{ 0x8A, 0x3333 },
};
/* We use a function so we can use ARRAY_SIZE() */
int moon_patch(struct arizona *arizona)
{
int ret;
const struct reg_default *patch16 = NULL;
unsigned int num16;
patch16 = moon_reva_16_patch;
num16 = ARRAY_SIZE(moon_reva_16_patch);
if (patch16) {
ret = regmap_register_patch(arizona->regmap, patch16, num16);
if (ret < 0) {
dev_err(arizona->dev,
"Error in applying 16-bit patch: %d\n", ret);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(moon_patch);
static const struct regmap_irq moon_irqs[ARIZONA_NUM_IRQ] = {
[ARIZONA_IRQ_BOOT_DONE] = { .reg_offset = 0,
.mask = CLEARWATER_BOOT_DONE_EINT1 },
[ARIZONA_IRQ_CTRLIF_ERR] = { .reg_offset = 0,
.mask = CLEARWATER_CTRLIF_ERR_EINT1 },
[ARIZONA_IRQ_FLL1_CLOCK_OK] = { .reg_offset = 1,
.mask = CLEARWATER_FLL1_LOCK_EINT1 },
[ARIZONA_IRQ_FLL2_CLOCK_OK] = { .reg_offset = 1,
.mask = CLEARWATER_FLL2_LOCK_EINT1},
[MOON_IRQ_FLLAO_CLOCK_OK] = { .reg_offset = 1,
.mask = MOON_FLLAO_LOCK_EINT1},
[ARIZONA_IRQ_MICDET] = { .reg_offset = 5,
.mask = CLEARWATER_MICDET_EINT1 },
[MOON_IRQ_MICDET2] = { .reg_offset = 5,
.mask = MOON_MICDET2_EINT1 },
[ARIZONA_IRQ_HPDET] = { .reg_offset = 5,
.mask = CLEARWATER_HPDET_EINT1},
[ARIZONA_IRQ_MICD_CLAMP_RISE] = { .reg_offset = 6,
.mask = CLEARWATER_MICD_CLAMP_RISE_EINT1 },
[ARIZONA_IRQ_MICD_CLAMP_FALL] = { .reg_offset = 6,
.mask = CLEARWATER_MICD_CLAMP_FALL_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .reg_offset = 6,
.mask = CLEARWATER_JD1_FALL_EINT1 },
[ARIZONA_IRQ_JD_RISE] = { .reg_offset = 6,
.mask = CLEARWATER_JD1_RISE_EINT1 },
[ARIZONA_IRQ_ASRC2_LOCK] = { .reg_offset = 8,
.mask = CLEARWATER_ASRC2_IN1_LOCK_EINT1 },
[ARIZONA_IRQ_ASRC1_LOCK] = { .reg_offset = 8,
.mask = CLEARWATER_ASRC1_IN1_LOCK_EINT1 },
[ARIZONA_IRQ_DRC2_SIG_DET] = { .reg_offset = 8,
.mask = CLEARWATER_DRC2_SIG_DET_EINT1 },
[ARIZONA_IRQ_DRC1_SIG_DET] = { .reg_offset = 8,
.mask = CLEARWATER_DRC1_SIG_DET_EINT1 },
[ARIZONA_IRQ_DSP_IRQ1] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ1_EINT1},
[ARIZONA_IRQ_DSP_IRQ2] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ2_EINT1},
[ARIZONA_IRQ_DSP_IRQ3] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ3_EINT1},
[ARIZONA_IRQ_DSP_IRQ4] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ4_EINT1},
[ARIZONA_IRQ_DSP_IRQ5] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ5_EINT1},
[ARIZONA_IRQ_DSP_IRQ6] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ6_EINT1},
[ARIZONA_IRQ_DSP_IRQ7] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ7_EINT1},
[ARIZONA_IRQ_DSP_IRQ8] = { .reg_offset = 10,
.mask = CLEARWATER_DSP_IRQ8_EINT1},
[ARIZONA_IRQ_GP1] = { .reg_offset = 16,
.mask = CLEARWATER_GP1_EINT1},
[ARIZONA_IRQ_GP2] = { .reg_offset = 16,
.mask = CLEARWATER_GP2_EINT1},
[ARIZONA_IRQ_GP3] = { .reg_offset = 16,
.mask = CLEARWATER_GP3_EINT1},
[ARIZONA_IRQ_GP4] = { .reg_offset = 16,
.mask = CLEARWATER_GP4_EINT1},
[ARIZONA_IRQ_GP5] = { .reg_offset = 16,
.mask = CLEARWATER_GP5_EINT1},
[ARIZONA_IRQ_GP6] = { .reg_offset = 16,
.mask = CLEARWATER_GP6_EINT1},
[ARIZONA_IRQ_GP7] = { .reg_offset = 16,
.mask = CLEARWATER_GP7_EINT1},
[ARIZONA_IRQ_GP8] = { .reg_offset = 16,
.mask = CLEARWATER_GP8_EINT1},
[MOON_IRQ_DSP1_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP1},
[MOON_IRQ_DSP2_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP2},
[MOON_IRQ_DSP3_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP3},
[MOON_IRQ_DSP4_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP4},
[MOON_IRQ_DSP5_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP5},
[MOON_IRQ_DSP6_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP6},
[MOON_IRQ_DSP7_BUS_ERROR] = { .reg_offset = 32,
.mask = MOON_ADSP_ERROR_STATUS_DSP7},
};
const struct regmap_irq_chip moon_irq = {
.name = "moon IRQ",
.status_base = CLEARWATER_IRQ1_STATUS_1,
.mask_base = CLEARWATER_IRQ1_MASK_1,
.ack_base = CLEARWATER_IRQ1_STATUS_1,
.num_regs = 33,
.irqs = moon_irqs,
.num_irqs = ARRAY_SIZE(moon_irqs),
};
EXPORT_SYMBOL_GPL(moon_irq);
static const struct reg_default moon_reg_default[] = {
{ 0x00000008, 0x0308 }, /* R8 - Ctrl IF CFG 1 */
{ 0x00000009, 0x0200 }, /* R9 - Ctrl IF CFG 2 */
{ 0x0000000A, 0x0308 }, /* R10 - Ctrl IF CFG 3 */
{ 0x00000020, 0x0000 }, /* R32 (0x20) - Tone Generator 1 */
{ 0x00000021, 0x1000 }, /* R33 (0x21) - Tone Generator 2 */
{ 0x00000022, 0x0000 }, /* R34 (0x22) - Tone Generator 3 */
{ 0x00000023, 0x1000 }, /* R35 (0x23) - Tone Generator 4 */
{ 0x00000024, 0x0000 }, /* R36 (0x24) - Tone Generator 5 */
{ 0x00000030, 0x0000 }, /* R48 (0x30) - PWM Drive 1 */
{ 0x00000031, 0x0100 }, /* R49 (0x31) - PWM Drive 2 */
{ 0x00000032, 0x0100 }, /* R50 (0x32) - PWM Drive 3 */
{ 0x00000041, 0x0000 }, /* R65 (0x41) - Sequence control */
{ 0x00000061, 0x01ff }, /* R97 (0x61) - Sample Rate Sequence Select 1 */
{ 0x00000062, 0x01ff }, /* R98 (0x62) - Sample Rate Sequence Select 2 */
{ 0x00000063, 0x01ff }, /* R99 (0x63) - Sample Rate Sequence Select 3 */
{ 0x00000064, 0x01ff }, /* R100 (0x64) - Sample Rate Sequence Select 4 */
{ 0x00000066, 0x01ff },
{ 0x00000067, 0x01ff },
{ 0x00000068, 0x01ff }, /* R104 (0x68) - Always On Triggers Sequence Select 1 */
{ 0x00000069, 0x01ff }, /* R105 (0x69) - Always On Triggers Sequence Select 2 */
{ 0x0000006a, 0x01ff }, /* R106 (0x6A) - Always On Triggers Sequence Select 3 */
{ 0x0000006b, 0x01ff }, /* R107 (0x6B) - Always On Triggers Sequence Select 4 */
{ 0x00000090, 0x0000 }, /* R144 (0x90) - Haptics Control 1 */
{ 0x00000091, 0x7fff }, /* R145 (0x91) - Haptics Control 2 */
{ 0x00000092, 0x0000 }, /* R146 (0x92) - Haptics phase 1 intensity */
{ 0x00000093, 0x0000 }, /* R147 (0x93) - Haptics phase 1 duration */
{ 0x00000094, 0x0000 }, /* R148 (0x94) - Haptics phase 2 intensity */
{ 0x00000095, 0x0000 }, /* R149 (0x95) - Haptics phase 2 duration */
{ 0x00000096, 0x0000 }, /* R150 (0x96) - Haptics phase 3 intensity */
{ 0x00000097, 0x0000 }, /* R151 (0x97) - Haptics phase 3 duration */
{ 0x000000A0, 0x0000 }, /* R160 (0xA0) - Clearwater Comfort Noise Generator */
{ 0x00000100, 0x0002 }, /* R256 (0x100) - Clock 32k 1 */
{ 0x00000101, 0x0404 }, /* R257 (0x101) - System Clock 1 */
{ 0x00000102, 0x0011 }, /* R258 (0x102) - Sample rate 1 */
{ 0x00000103, 0x0011 }, /* R259 (0x103) - Sample rate 2 */
{ 0x00000104, 0x0011 }, /* R260 (0x104) - Sample rate 3 */
{ 0x00000112, 0x0405 }, /* R274 (0x112) - Async clock 1 */
{ 0x00000113, 0x0011 }, /* R275 (0x113) - Async sample rate 1 */
{ 0x00000114, 0x0011 }, /* R276 (0x114) - Async sample rate 2 */
{ 0x00000120, 0x0404 },
{ 0x00000122, 0x0000 },
{ 0x00000149, 0x0000 }, /* R329 (0x149) - Output system clock */
{ 0x0000014a, 0x0000 }, /* R330 (0x14A) - Output async clock */
{ 0x00000152, 0x0000 }, /* R338 (0x152) - Rate Estimator 1 */
{ 0x00000153, 0x0000 }, /* R339 (0x153) - Rate Estimator 2 */
{ 0x00000154, 0x0000 }, /* R340 (0x154) - Rate Estimator 3 */
{ 0x00000155, 0x0000 }, /* R341 (0x155) - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 (0x156) - Rate Estimator 5 */
{ 0x00000171, 0x0002 }, /* R369 (0x171) - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 (0x172) - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 (0x173) - FLL1 Control 3 */
{ 0x00000174, 0x007d }, /* R372 (0x174) - FLL1 Control 4 */
{ 0x00000175, 0x0000 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 (0x176) - FLL1 Control 6 */
{ 0x00000177, 0x0281 }, /* R375 (0x177) - FLL1 Loop Filter Test 1 */
{ 0x00000178, 0x0000 },
{ 0x00000179, 0x0000 }, /* R377 (0x179) - FLL1 Control 7 */
{ 0x0000017a, 0x2906 }, /* R377 (0x17A) - FLL1 Efs 2 */
{ 0x00000181, 0x0000 }, /* R385 (0x181) - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 (0x182) - FLL1 Synchroniser 2 */
{ 0x00000183, 0x0000 }, /* R387 (0x183) - FLL1 Synchroniser 3 */
{ 0x00000184, 0x0000 }, /* R388 (0x184) - FLL1 Synchroniser 4 */
{ 0x00000185, 0x0000 }, /* R389 (0x185) - FLL1 Synchroniser 5 */
{ 0x00000186, 0x0000 }, /* R390 (0x186) - FLL1 Synchroniser 6 */
{ 0x00000187, 0x0001 }, /* R391 (0x187) - FLL1 Synchroniser 7 */
{ 0x00000189, 0x0000 }, /* R393 (0x189) - FLL1 Spread Spectrum */
{ 0x0000018a, 0x0004 }, /* R394 (0x18A) - FLL1 GPIO Clock */
{ 0x00000191, 0x0002 }, /* R401 (0x191) - FLL2 Control 1 */
{ 0x00000192, 0x0008 }, /* R402 (0x192) - FLL2 Control 2 */
{ 0x00000193, 0x0018 }, /* R403 (0x193) - FLL2 Control 3 */
{ 0x00000194, 0x007d }, /* R404 (0x194) - FLL2 Control 4 */
{ 0x00000195, 0x0000 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 (0x196) - FLL2 Control 6 */
{ 0x00000197, 0x0281 }, /* R407 (0x197) - FLL2 Loop Filter Test 1 */
{ 0x00000198, 0x0000 },
{ 0x00000199, 0x0000 }, /* R409 (0x199) - FLL2 Control 7 */
{ 0x0000019a, 0x2906 }, /* R410 (0x19A) - FLL2 Efs 2 */
{ 0x000001a1, 0x0000 }, /* R417 (0x1A1) - FLL2 Synchroniser 1 */
{ 0x000001a2, 0x0000 }, /* R418 (0x1A2) - FLL2 Synchroniser 2 */
{ 0x000001a3, 0x0000 }, /* R419 (0x1A3) - FLL2 Synchroniser 3 */
{ 0x000001a4, 0x0000 }, /* R420 (0x1A4) - FLL2 Synchroniser 4 */
{ 0x000001a5, 0x0000 }, /* R421 (0x1A5) - FLL2 Synchroniser 5 */
{ 0x000001a6, 0x0000 }, /* R422 (0x1A6) - FLL2 Synchroniser 6 */
{ 0x000001a7, 0x0001 }, /* R423 (0x1A7) - FLL2 Synchroniser 7 */
{ 0x000001a9, 0x0000 }, /* R425 (0x1A9) - FLL2 Spread Spectrum */
{ 0x000001aa, 0x0004 }, /* R426 (0x1AA) - FLL2 GPIO Clock */
{ 0x000001D1, 0x0004 }, /* R465 - FLLAO_CONTROL_1 */
{ 0x000001D2, 0x0004 }, /* R466 - FLLAO_CONTROL_2 */
{ 0x000001D3, 0x0000 }, /* R467 - FLLAO_CONTROL_3 */
{ 0x000001D4, 0x0000 }, /* R468 - FLLAO_CONTROL_4 */
{ 0x000001D5, 0x0001 }, /* R469 - FLLAO_CONTROL_5 */
{ 0x000001D6, 0x8004 }, /* R470 - FLLAO_CONTROL_6 */
{ 0x000001D8, 0x0000 }, /* R472 - FLLAO_CONTROL_7 */
{ 0x000001DA, 0x0070 }, /* R474 - FLLAO_CONTROL_8 */
{ 0x000001DB, 0x0000 }, /* R475 - FLLAO_CONTROL_9 */
{ 0x000001DC, 0x06DA }, /* R476 - FLLAO_CONTROL_10 */
{ 0x000001DD, 0x0011 }, /* R477 - FLLAO_CONTROL_11 */
{ 0x00000200, 0x0006 }, /* R512 (0x200) - Mic Charge Pump 1 */
{ 0x00000213, 0x03e4 }, /* R531 (0x213) - LDO2 Control 1 */
{ 0x00000218, 0x00e6 }, /* R536 (0x218) - Mic Bias Ctrl 1 */
{ 0x00000219, 0x00e6 }, /* R537 (0x219) - Mic Bias Ctrl 2 */
{ 0x0000021C, 0x2222 }, /* R540 - Mic Bias Ctrl 5 */
{ 0x0000021E, 0x2222 }, /* R542 - Mic Bias Ctrl 6 */
{ 0x0000027e, 0x0000 }, /* R638 (0x27E) - Clearwater EDRE HP stereo control */
{ 0x00000293, 0x0080 }, /* R659 (0x293) - Accessory Detect Mode 1 */
{ 0x00000299, 0x0000 }, /* R665 (0x299) - Headphone Detect 0 */
{ 0x0000029b, 0x0000 }, /* R667 (0x29B) - Headphone Detect 1 */
{ 0x000002a2, 0x0010 }, /* R674 (0x2A2) - Mic Detect 0 */
{ 0x000002a3, 0x1102 }, /* R675 (0x2A3) - Mic Detect 1 */
{ 0x000002a4, 0x009f }, /* R676 (0x2A4) - Mic Detect 2 */
{ 0x000002a6, 0x3d3d },
{ 0x000002a7, 0x3d3d },
{ 0x000002a8, 0x333d },
{ 0x000002a9, 0x202d },
{ 0x000002b2, 0x0010 }, /* R690 (0x2B2) - MicDetect2-0 */
{ 0x000002b3, 0x1102 }, /* R691 (0x2B3) - MicDetect2-1 */
{ 0x000002b4, 0x009f }, /* R692 (0x2B4) - MicDetect2-2 */
{ 0x000002b6, 0x3D3D },
{ 0x000002b7, 0x3D3D },
{ 0x000002b8, 0x333D },
{ 0x000002b9, 0x202D },
{ 0x000002c6, 0x0010 },
{ 0x000002c8, 0x0000 }, /* R712 (0x2C8) - GP switch 1 */
{ 0x000002d3, 0x0000 }, /* R723 (0x2D3) - Jack detect analogue */
{ 0x00000300, 0x0000 }, /* R666 (0x300) - input_enable */
{ 0x00000308, 0x0400 }, /* R776 (0x308) - Input Rate */
{ 0x00000309, 0x0022 }, /* R777 (0x309) - Input Volume Ramp */
{ 0x0000030c, 0x0002 }, /* R780 (0x30C) - HPF Control */
{ 0x00000310, 0x0080 }, /* R784 (0x310) - IN1L Control */
{ 0x00000311, 0x0180 }, /* R785 (0x311) - ADC Digital Volume 1L */
{ 0x00000312, 0x0500 }, /* R786 (0x312) - DMIC1L Control */
{ 0x00000313, 0x0000 }, /* R787 (0x313) - IN1L Rate Control */
{ 0x00000314, 0x0080 }, /* R788 (0x314) - IN1R Control */
{ 0x00000315, 0x0180 }, /* R789 (0x315) - ADC Digital Volume 1R */
{ 0x00000316, 0x0000 }, /* R790 (0x316) - DMIC1R Control */
{ 0x00000317, 0x0000 }, /* R791 (0x317) - IN1R Rate Control */
{ 0x00000318, 0x0080 }, /* R792 (0x318) - IN2L Control */
{ 0x00000319, 0x0180 }, /* R793 (0x319) - ADC Digital Volume 2L */
{ 0x0000031a, 0x0500 }, /* R794 (0x31A) - DMIC2L Control */
{ 0x0000031b, 0x0000 }, /* R795 (0x31B) - IN2L Rate Control */
{ 0x0000031c, 0x0080 }, /* R796 (0x31C) - IN2R Control */
{ 0x0000031d, 0x0180 }, /* R797 (0x31D) - ADC Digital Volume 2R */
{ 0x0000031e, 0x0000 }, /* R798 (0x31E) - DMIC2R Control */
{ 0x0000031f, 0x0000 }, /* R799 (0x31F) - IN2R Rate Control */
{ 0x00000320, 0x0000 }, /* R800 (0x320) - IN3L Control */
{ 0x00000321, 0x0180 }, /* R801 (0x321) - ADC Digital Volume 3L */
{ 0x00000322, 0x0500 }, /* R802 (0x322) - DMIC3L Control */
{ 0x00000323, 0x0000 }, /* R803 (0x323) - IN3L Rate Control */
{ 0x00000324, 0x0000 }, /* R804 (0x324) - IN3R Control */
{ 0x00000325, 0x0180 }, /* R805 (0x325) - ADC Digital Volume 3R */
{ 0x00000326, 0x0000 }, /* R806 (0x326) - DMIC3R Control */
{ 0x00000327, 0x0000 }, /* R807 (0x327) - IN3R Rate Control */
{ 0x00000328, 0x0000 }, /* R808 (0x328) - IN4 Control */
{ 0x00000329, 0x0180 }, /* R809 (0x329) - ADC Digital Volume 4L */
{ 0x0000032a, 0x0500 }, /* R810 (0x32A) - DMIC4L Control */
{ 0x0000032b, 0x0000 }, /* R811 (0x32B) - IN4L Rate Control */
{ 0x0000032c, 0x0000 }, /* R812 (0x32C) - IN4R Control */
{ 0x0000032d, 0x0180 }, /* R813 (0x32D) - ADC Digital Volume 4R */
{ 0x0000032e, 0x0000 }, /* R814 (0x32E) - DMIC4R Control */
{ 0x0000032f, 0x0000 }, /* R815 (0x32F) - IN4R Rate Control */
{ 0x00000330, 0x0000 }, /* R816 - IN5L Control */
{ 0x00000331, 0x0180 }, /* R817 - ADC Digital Volume 5L */
{ 0x00000332, 0x0500 }, /* R818 - DMIC5L Control */
{ 0x00000333, 0x0000 }, /* R819 (0x333) - IN5L Rate Control */
{ 0x00000334, 0x0000 }, /* R820 - IN5R Control */
{ 0x00000335, 0x0180 }, /* R821 - ADC Digital Volume 5R */
{ 0x00000336, 0x0000 }, /* R822 - DMIC5R Control */
{ 0x00000337, 0x0000 }, /* R823 (0x337) - IN5R Rate Control */
{ 0x00000400, 0x0000 }, /* R1024 (0x400) - Output Enables 1 */
{ 0x00000408, 0x0000 }, /* R1032 (0x408) - Output Rate 1 */
{ 0x00000409, 0x0022 }, /* R1033 (0x409) - Output Volume Ramp */
{ 0x00000410, 0x0080 }, /* R1040 (0x410) - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 (0x411) - DAC Digital Volume 1L */
{ 0x00000412, 0x0000 }, /* R1042 (0x412) - moon_out1_config */
{ 0x00000413, 0x0001 }, /* R1043 (0x413) - Noise Gate Select 1L */
{ 0x00000414, 0x0080 }, /* R1044 (0x414) - Output Path Config 1R */
{ 0x00000415, 0x0180 }, /* R1045 (0x415) - DAC Digital Volume 1R */
{ 0x00000417, 0x0002 }, /* R1047 (0x417) - Noise Gate Select 1R */
{ 0x00000418, 0x0080 }, /* R1048 (0x418) - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 (0x419) - DAC Digital Volume 2L */
{ 0x0000041A, 0x0002 }, /* R1050 (0x41A) - moon_out2_config */
{ 0x0000041b, 0x0004 }, /* R1051 (0x41B) - Noise Gate Select 2L */
{ 0x0000041c, 0x0080 }, /* R1052 (0x41C) - Output Path Config 2R */
{ 0x0000041d, 0x0180 }, /* R1053 (0x41D) - DAC Digital Volume 2R */
{ 0x0000041f, 0x0008 }, /* R1055 (0x41F) - Noise Gate Select 2R */
{ 0x00000420, 0x0080 }, /* R1056 (0x420) - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 (0x421) - DAC Digital Volume 3L */
{ 0x00000423, 0x0010 }, /* R1059 (0x423) - Noise Gate Select 3L */
{ 0x00000424, 0x0080 }, /* R1060 (0x424) - Output Path Config 3R */
{ 0x00000425, 0x0180 }, /* R1061 (0x425) - DAC Digital Volume 3R */
{ 0x00000427, 0x0020 },
{ 0x00000430, 0x0000 }, /* R1072 (0x430) - Output Path Config 5L */
{ 0x00000431, 0x0180 }, /* R1073 (0x431) - DAC Digital Volume 5L */
{ 0x00000433, 0x0100 }, /* R1075 (0x433) - Noise Gate Select 5L */
{ 0x00000434, 0x0000 }, /* R1076 (0x434) - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 (0x435) - DAC Digital Volume 5R */
{ 0x00000437, 0x0200 }, /* R1079 (0x437) - Noise Gate Select 5R */
{ 0x00000440, 0x003f }, /* R1088 (0x440) - DRE Enable */
{ 0x00000448, 0x003f }, /* R1096 (0x448) - eDRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
{ 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
{ 0x000004A0, 0x3080 }, /* R1184 - HP1 Short Circuit Ctrl */
{ 0x000004A1, 0x3000 }, /* R1185 - HP2 Short Circuit Ctrl */
{ 0x000004A2, 0x3000 }, /* R1186 - HP3 Short Circuit Ctrl */
{ 0x000004A8, 0x7120 }, /* R1192 - HP Test Ctrl 5 */
{ 0x000004A9, 0x7120 }, /* R1193 - HP Test Ctrl 6 */
{ 0x00000500, 0x000c }, /* R1280 (0x500) - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0000 }, /* R1281 (0x501) - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 (0x502) - AIF1 Rx Pin Ctrl */
{ 0x00000503, 0x0000 }, /* R1283 (0x503) - AIF1 Rate Ctrl */
{ 0x00000504, 0x0000 }, /* R1284 (0x504) - AIF1 Format */
{ 0x00000505, 0x0040 }, /* R1285 (0x505) - AIF1 Tx BCLK Rate */
{ 0x00000506, 0x0040 }, /* R1286 (0x506) - AIF1 Rx BCLK Rate */
{ 0x00000507, 0x1818 }, /* R1287 (0x507) - AIF1 Frame Ctrl 1 */
{ 0x00000508, 0x1818 }, /* R1288 (0x508) - AIF1 Frame Ctrl 2 */
{ 0x00000509, 0x0000 }, /* R1289 (0x509) - AIF1 Frame Ctrl 3 */
{ 0x0000050a, 0x0001 }, /* R1290 (0x50A) - AIF1 Frame Ctrl 4 */
{ 0x0000050b, 0x0002 }, /* R1291 (0x50B) - AIF1 Frame Ctrl 5 */
{ 0x0000050c, 0x0003 }, /* R1292 (0x50C) - AIF1 Frame Ctrl 6 */
{ 0x0000050d, 0x0004 }, /* R1293 (0x50D) - AIF1 Frame Ctrl 7 */
{ 0x0000050e, 0x0005 }, /* R1294 (0x50E) - AIF1 Frame Ctrl 8 */
{ 0x0000050f, 0x0006 }, /* R1295 (0x50F) - AIF1 Frame Ctrl 9 */
{ 0x00000510, 0x0007 }, /* R1296 (0x510) - AIF1 Frame Ctrl 10 */
{ 0x00000511, 0x0000 }, /* R1297 (0x511) - AIF1 Frame Ctrl 11 */
{ 0x00000512, 0x0001 }, /* R1298 (0x512) - AIF1 Frame Ctrl 12 */
{ 0x00000513, 0x0002 }, /* R1299 (0x513) - AIF1 Frame Ctrl 13 */
{ 0x00000514, 0x0003 }, /* R1300 (0x514) - AIF1 Frame Ctrl 14 */
{ 0x00000515, 0x0004 }, /* R1301 (0x515) - AIF1 Frame Ctrl 15 */
{ 0x00000516, 0x0005 }, /* R1302 (0x516) - AIF1 Frame Ctrl 16 */
{ 0x00000517, 0x0006 }, /* R1303 (0x517) - AIF1 Frame Ctrl 17 */
{ 0x00000518, 0x0007 }, /* R1304 (0x518) - AIF1 Frame Ctrl 18 */
{ 0x00000519, 0x0000 }, /* R1305 (0x519) - AIF1 Tx Enables */
{ 0x0000051a, 0x0000 }, /* R1306 (0x51A) - AIF1 Rx Enables */
{ 0x00000540, 0x000c }, /* R1344 (0x540) - AIF2 BCLK Ctrl */
{ 0x00000541, 0x0000 }, /* R1345 (0x541) - AIF2 Tx Pin Ctrl */
{ 0x00000542, 0x0000 }, /* R1346 (0x542) - AIF2 Rx Pin Ctrl */
{ 0x00000543, 0x0000 }, /* R1347 (0x543) - AIF2 Rate Ctrl */
{ 0x00000544, 0x0000 }, /* R1348 (0x544) - AIF2 Format */
{ 0x00000545, 0x0040 }, /* R1349 (0x545) - AIF2 Tx BCLK Rate */
{ 0x00000546, 0x0040 }, /* R1350 (0x546) - AIF2 Rx BCLK Rate */
{ 0x00000547, 0x1818 }, /* R1351 (0x547) - AIF2 Frame Ctrl 1 */
{ 0x00000548, 0x1818 }, /* R1352 (0x548) - AIF2 Frame Ctrl 2 */
{ 0x00000549, 0x0000 }, /* R1353 (0x549) - AIF2 Frame Ctrl 3 */
{ 0x0000054a, 0x0001 }, /* R1354 (0x54A) - AIF2 Frame Ctrl 4 */
{ 0x0000054b, 0x0002 }, /* R1355 (0x54B) - AIF2 Frame Ctrl 5 */
{ 0x0000054c, 0x0003 }, /* R1356 (0x54C) - AIF2 Frame Ctrl 6 */
{ 0x0000054d, 0x0004 }, /* R1357 (0x54D) - AIF2 Frame Ctrl 7 */
{ 0x0000054e, 0x0005 }, /* R1358 (0x54E) - AIF2 Frame Ctrl 8 */
{ 0x0000054F, 0x0006 }, /* R1359 - AIF2 Frame Ctrl 9 */
{ 0x00000550, 0x0007 }, /* R1360 - AIF2 Frame Ctrl 10 */
{ 0x00000551, 0x0000 }, /* R1361 (0x551) - AIF2 Frame Ctrl 11 */
{ 0x00000552, 0x0001 }, /* R1362 (0x552) - AIF2 Frame Ctrl 12 */
{ 0x00000553, 0x0002 }, /* R1363 (0x553) - AIF2 Frame Ctrl 13 */
{ 0x00000554, 0x0003 }, /* R1364 (0x554) - AIF2 Frame Ctrl 14 */
{ 0x00000555, 0x0004 }, /* R1365 (0x555) - AIF2 Frame Ctrl 15 */
{ 0x00000556, 0x0005 }, /* R1366 (0x556) - AIF2 Frame Ctrl 16 */
{ 0x00000557, 0x0006 }, /* R1367 - AIF2 Frame Ctrl 17 */
{ 0x00000558, 0x0007 }, /* R1368 - AIF2 Frame Ctrl 18 */
{ 0x00000559, 0x0000 }, /* R1369 (0x559) - AIF2 Tx Enables */
{ 0x0000055a, 0x0000 }, /* R1370 (0x55A) - AIF2 Rx Enables */
{ 0x00000580, 0x000c }, /* R1408 (0x580) - AIF3 BCLK Ctrl */
{ 0x00000581, 0x0000 }, /* R1409 (0x581) - AIF3 Tx Pin Ctrl */
{ 0x00000582, 0x0000 }, /* R1410 (0x582) - AIF3 Rx Pin Ctrl */
{ 0x00000583, 0x0000 }, /* R1411 (0x583) - AIF3 Rate Ctrl */
{ 0x00000584, 0x0000 }, /* R1412 (0x584) - AIF3 Format */
{ 0x00000585, 0x0040 }, /* R1413 (0x585) - AIF3 Tx BCLK Rate */
{ 0x00000586, 0x0040 }, /* R1414 (0x586) - AIF3 Rx BCLK Rate */
{ 0x00000587, 0x1818 }, /* R1415 (0x587) - AIF3 Frame Ctrl 1 */
{ 0x00000588, 0x1818 }, /* R1416 (0x588) - AIF3 Frame Ctrl 2 */
{ 0x00000589, 0x0000 }, /* R1417 (0x589) - AIF3 Frame Ctrl 3 */
{ 0x0000058a, 0x0001 }, /* R1418 (0x58A) - AIF3 Frame Ctrl 4 */
{ 0x00000591, 0x0000 }, /* R1425 (0x591) - AIF3 Frame Ctrl 11 */
{ 0x00000592, 0x0001 }, /* R1426 (0x592) - AIF3 Frame Ctrl 12 */
{ 0x00000599, 0x0000 }, /* R1433 (0x599) - AIF3 Tx Enables */
{ 0x0000059a, 0x0000 }, /* R1434 (0x59A) - AIF3 Rx Enables */
{ 0x000005a0, 0x000c }, /* R1440 - AIF4 BCLK Ctrl */
{ 0x000005a1, 0x0000 }, /* R1441 - AIF4 Tx Pin Ctrl */
{ 0x000005a2, 0x0000 }, /* R1442 - AIF4 Rx Pin Ctrl */
{ 0x000005a3, 0x0000 }, /* R1443 - AIF4 Rate Ctrl */
{ 0x000005a4, 0x0000 }, /* R1444 - AIF4 Format */
{ 0x000005a5, 0x0040 }, /* R1445 - AIF4 Tx BCLK Rate */
{ 0x000005a6, 0x0040 }, /* R1446 - AIF4 Rx BCLK Rate */
{ 0x000005a7, 0x1818 }, /* R1447 - AIF4 Frame Ctrl 1 */
{ 0x000005a8, 0x1818 }, /* R1448 - AIF4 Frame Ctrl 2 */
{ 0x000005a9, 0x0000 }, /* R1449 - AIF4 Frame Ctrl 3 */
{ 0x000005aa, 0x0001 }, /* R1450 - AIF4 Frame Ctrl 4 */
{ 0x000005b1, 0x0000 }, /* R1457 - AIF4 Frame Ctrl 11 */
{ 0x000005b2, 0x0001 }, /* R1458 - AIF4 Frame Ctrl 12 */
{ 0x000005b9, 0x0000 }, /* R1465 - AIF4 Tx Enables */
{ 0x000005ba, 0x0000 }, /* R1466 - AIF4 Rx Enables */
{ 0x000005C2, 0x0000 }, /* R1474 - SPD1 TX Control */
{ 0x000005e3, 0x0000 }, /* R1507 (0x5E3) - SLIMbus Framer Ref Gear */
{ 0x000005e5, 0x0000 }, /* R1509 (0x5E5) - SLIMbus Rates 1 */
{ 0x000005e6, 0x0000 }, /* R1510 (0x5E6) - SLIMbus Rates 2 */
{ 0x000005e7, 0x0000 }, /* R1511 (0x5E7) - SLIMbus Rates 3 */
{ 0x000005e8, 0x0000 }, /* R1512 (0x5E8) - SLIMbus Rates 4 */
{ 0x000005e9, 0x0000 }, /* R1513 (0x5E9) - SLIMbus Rates 5 */
{ 0x000005ea, 0x0000 }, /* R1514 (0x5EA) - SLIMbus Rates 6 */
{ 0x000005eb, 0x0000 }, /* R1515 (0x5EB) - SLIMbus Rates 7 */
{ 0x000005ec, 0x0000 }, /* R1516 (0x5EC) - SLIMbus Rates 8 */
{ 0x000005f5, 0x0000 }, /* R1525 (0x5F5) - SLIMbus RX Channel Enable */
{ 0x000005f6, 0x0000 }, /* R1526 (0x5F6) - SLIMbus TX Channel Enable */
{ 0x00000640, 0x0000 },
{ 0x00000641, 0x0080 },
{ 0x00000642, 0x0000 },
{ 0x00000643, 0x0080 },
{ 0x00000644, 0x0000 },
{ 0x00000645, 0x0080 },
{ 0x00000646, 0x0000 },
{ 0x00000647, 0x0080 },
{ 0x00000648, 0x0000 },
{ 0x00000649, 0x0080 },
{ 0x0000064a, 0x0000 },
{ 0x0000064b, 0x0080 },
{ 0x0000064c, 0x0000 },
{ 0x0000064d, 0x0080 },
{ 0x0000064e, 0x0000 },
{ 0x0000064f, 0x0080 },
{ 0x00000680, 0x0000 },
{ 0x00000681, 0x0080 },
{ 0x00000682, 0x0000 },
{ 0x00000683, 0x0080 },
{ 0x00000684, 0x0000 },
{ 0x00000685, 0x0080 },
{ 0x00000686, 0x0000 },
{ 0x00000687, 0x0080 },
{ 0x00000688, 0x0000 },
{ 0x00000689, 0x0080 },
{ 0x0000068a, 0x0000 },
{ 0x0000068b, 0x0080 },
{ 0x0000068c, 0x0000 },
{ 0x0000068d, 0x0080 },
{ 0x0000068e, 0x0000 },
{ 0x0000068f, 0x0080 },
{ 0x00000690, 0x0000 },
{ 0x00000691, 0x0080 },
{ 0x00000692, 0x0000 },
{ 0x00000693, 0x0080 },
{ 0x00000694, 0x0000 },
{ 0x00000695, 0x0080 },
{ 0x00000696, 0x0000 },
{ 0x00000697, 0x0080 },
{ 0x00000698, 0x0000 },
{ 0x00000699, 0x0080 },
{ 0x0000069a, 0x0000 },
{ 0x0000069b, 0x0080 },
{ 0x0000069c, 0x0000 },
{ 0x0000069d, 0x0080 },
{ 0x0000069e, 0x0000 },
{ 0x0000069f, 0x0080 },
{ 0x000006a0, 0x0000 },
{ 0x000006a1, 0x0080 },
{ 0x000006a2, 0x0000 },
{ 0x000006a3, 0x0080 },
{ 0x000006a4, 0x0000 },
{ 0x000006a5, 0x0080 },
{ 0x000006a6, 0x0000 },
{ 0x000006a7, 0x0080 },
{ 0x000006a8, 0x0000 },
{ 0x000006a9, 0x0080 },
{ 0x000006aa, 0x0000 },
{ 0x000006ab, 0x0080 },
{ 0x000006ac, 0x0000 },
{ 0x000006ad, 0x0080 },
{ 0x000006ae, 0x0000 },
{ 0x000006af, 0x0080 },
{ 0x000006c0, 0x0000 },
{ 0x000006c1, 0x0080 },
{ 0x000006c2, 0x0000 },
{ 0x000006c3, 0x0080 },
{ 0x000006c4, 0x0000 },
{ 0x000006c5, 0x0080 },
{ 0x000006c6, 0x0000 },
{ 0x000006c7, 0x0080 },
{ 0x000006c8, 0x0000 },
{ 0x000006c9, 0x0080 },
{ 0x000006ca, 0x0000 },
{ 0x000006cb, 0x0080 },
{ 0x000006cc, 0x0000 },
{ 0x000006cd, 0x0080 },
{ 0x000006ce, 0x0000 },
{ 0x000006cf, 0x0080 },
{ 0x00000700, 0x0000 },
{ 0x00000701, 0x0080 },
{ 0x00000702, 0x0000 },
{ 0x00000703, 0x0080 },
{ 0x00000704, 0x0000 },
{ 0x00000705, 0x0080 },
{ 0x00000706, 0x0000 },
{ 0x00000707, 0x0080 },
{ 0x00000708, 0x0000 },
{ 0x00000709, 0x0080 },
{ 0x0000070a, 0x0000 },
{ 0x0000070b, 0x0080 },
{ 0x0000070c, 0x0000 },
{ 0x0000070d, 0x0080 },
{ 0x0000070e, 0x0000 },
{ 0x0000070f, 0x0080 },
{ 0x00000710, 0x0000 },
{ 0x00000711, 0x0080 },
{ 0x00000712, 0x0000 },
{ 0x00000713, 0x0080 },
{ 0x00000714, 0x0000 },
{ 0x00000715, 0x0080 },
{ 0x00000716, 0x0000 },
{ 0x00000717, 0x0080 },
{ 0x00000718, 0x0000 },
{ 0x00000719, 0x0080 },
{ 0x0000071a, 0x0000 },
{ 0x0000071b, 0x0080 },
{ 0x0000071c, 0x0000 },
{ 0x0000071d, 0x0080 },
{ 0x0000071e, 0x0000 },
{ 0x0000071f, 0x0080 },
{ 0x00000720, 0x0000 },
{ 0x00000721, 0x0080 },
{ 0x00000722, 0x0000 },
{ 0x00000723, 0x0080 },
{ 0x00000724, 0x0000 },
{ 0x00000725, 0x0080 },
{ 0x00000726, 0x0000 },
{ 0x00000727, 0x0080 },
{ 0x00000728, 0x0000 },
{ 0x00000729, 0x0080 },
{ 0x0000072a, 0x0000 },
{ 0x0000072b, 0x0080 },
{ 0x0000072c, 0x0000 },
{ 0x0000072d, 0x0080 },
{ 0x0000072e, 0x0000 },
{ 0x0000072f, 0x0080 },
{ 0x00000730, 0x0000 },
{ 0x00000731, 0x0080 },
{ 0x00000732, 0x0000 },
{ 0x00000733, 0x0080 },
{ 0x00000734, 0x0000 },
{ 0x00000735, 0x0080 },
{ 0x00000736, 0x0000 },
{ 0x00000737, 0x0080 },
{ 0x00000738, 0x0000 },
{ 0x00000739, 0x0080 },
{ 0x0000073a, 0x0000 },
{ 0x0000073b, 0x0080 },
{ 0x0000073c, 0x0000 },
{ 0x0000073d, 0x0080 },
{ 0x0000073e, 0x0000 },
{ 0x0000073f, 0x0080 },
{ 0x00000740, 0x0000 },
{ 0x00000741, 0x0080 },
{ 0x00000742, 0x0000 },
{ 0x00000743, 0x0080 },
{ 0x00000744, 0x0000 },
{ 0x00000745, 0x0080 },
{ 0x00000746, 0x0000 },
{ 0x00000747, 0x0080 },
{ 0x00000748, 0x0000 },
{ 0x00000749, 0x0080 },
{ 0x0000074a, 0x0000 },
{ 0x0000074b, 0x0080 },
{ 0x0000074c, 0x0000 },
{ 0x0000074d, 0x0080 },
{ 0x0000074e, 0x0000 },
{ 0x0000074f, 0x0080 },
{ 0x00000750, 0x0000 },
{ 0x00000751, 0x0080 },
{ 0x00000752, 0x0000 },
{ 0x00000753, 0x0080 },
{ 0x00000754, 0x0000 },
{ 0x00000755, 0x0080 },
{ 0x00000756, 0x0000 },
{ 0x00000757, 0x0080 },
{ 0x00000758, 0x0000 },
{ 0x00000759, 0x0080 },
{ 0x0000075a, 0x0000 },
{ 0x0000075b, 0x0080 },
{ 0x0000075c, 0x0000 },
{ 0x0000075d, 0x0080 },
{ 0x0000075e, 0x0000 },
{ 0x0000075f, 0x0080 },
{ 0x00000760, 0x0000 },
{ 0x00000761, 0x0080 },
{ 0x00000762, 0x0000 },
{ 0x00000763, 0x0080 },
{ 0x00000764, 0x0000 },
{ 0x00000765, 0x0080 },
{ 0x00000766, 0x0000 },
{ 0x00000767, 0x0080 },
{ 0x00000768, 0x0000 },
{ 0x00000769, 0x0080 },
{ 0x0000076a, 0x0000 },
{ 0x0000076b, 0x0080 },
{ 0x0000076c, 0x0000 },
{ 0x0000076d, 0x0080 },
{ 0x0000076e, 0x0000 },
{ 0x0000076f, 0x0080 },
{ 0x00000770, 0x0000 },
{ 0x00000771, 0x0080 },
{ 0x00000772, 0x0000 },
{ 0x00000773, 0x0080 },
{ 0x00000774, 0x0000 },
{ 0x00000775, 0x0080 },
{ 0x00000776, 0x0000 },
{ 0x00000777, 0x0080 },
{ 0x00000778, 0x0000 },
{ 0x00000779, 0x0080 },
{ 0x0000077a, 0x0000 },
{ 0x0000077b, 0x0080 },
{ 0x0000077c, 0x0000 },
{ 0x0000077d, 0x0080 },
{ 0x0000077e, 0x0000 },
{ 0x0000077f, 0x0080 },
{ 0x00000780, 0x0000 },
{ 0x00000781, 0x0080 },
{ 0x00000782, 0x0000 },
{ 0x00000783, 0x0080 },
{ 0x00000784, 0x0000 },
{ 0x00000785, 0x0080 },
{ 0x00000786, 0x0000 },
{ 0x00000787, 0x0080 },
{ 0x00000788, 0x0000 },
{ 0x00000789, 0x0080 },
{ 0x0000078a, 0x0000 },
{ 0x0000078b, 0x0080 },
{ 0x0000078c, 0x0000 },
{ 0x0000078d, 0x0080 },
{ 0x0000078e, 0x0000 },
{ 0x0000078f, 0x0080 },
{ 0x000007a0, 0x0000 }, /* R1952 - AIF4TX1MIX Input 1 Source */
{ 0x000007a1, 0x0080 }, /* R1953 - AIF4TX1MIX Input 1 Volume */
{ 0x000007a2, 0x0000 }, /* R1954 - AIF4TX1MIX Input 2 Source */
{ 0x000007a3, 0x0080 }, /* R1955 - AIF4TX1MIX Input 2 Volume */
{ 0x000007a4, 0x0000 }, /* R1956 - AIF4TX1MIX Input 3 Source */
{ 0x000007a5, 0x0080 }, /* R1957 - AIF4TX1MIX Input 3 Volume */
{ 0x000007a6, 0x0000 }, /* R1958 - AIF4TX1MIX Input 4 Source */
{ 0x000007a7, 0x0080 }, /* R1959 - AIF4TX1MIX Input 4 Volume */
{ 0x000007a8, 0x0000 }, /* R1960 - AIF4TX2MIX Input 1 Source */
{ 0x000007a9, 0x0080 }, /* R1961 - AIF4TX2MIX Input 1 Volume */
{ 0x000007aa, 0x0000 }, /* R1962 - AIF4TX2MIX Input 2 Source */
{ 0x000007ab, 0x0080 }, /* R1963 - AIF4TX2MIX Input 2 Volume */
{ 0x000007ac, 0x0000 }, /* R1964 - AIF4TX2MIX Input 3 Source */
{ 0x000007ad, 0x0080 }, /* R1965 - AIF4TX2MIX Input 3 Volume */
{ 0x000007ae, 0x0000 }, /* R1966 - AIF4TX2MIX Input 4 Source */
{ 0x000007af, 0x0080 }, /* R1967 - AIF4TX2MIX Input 4 Volume */
{ 0x000007c0, 0x0000 },
{ 0x000007c1, 0x0080 },
{ 0x000007c2, 0x0000 },
{ 0x000007c3, 0x0080 },
{ 0x000007c4, 0x0000 },
{ 0x000007c5, 0x0080 },
{ 0x000007c6, 0x0000 },
{ 0x000007c7, 0x0080 },
{ 0x000007c8, 0x0000 },
{ 0x000007c9, 0x0080 },
{ 0x000007ca, 0x0000 },
{ 0x000007cb, 0x0080 },
{ 0x000007cc, 0x0000 },
{ 0x000007cd, 0x0080 },
{ 0x000007ce, 0x0000 },
{ 0x000007cf, 0x0080 },
{ 0x000007d0, 0x0000 },
{ 0x000007d1, 0x0080 },
{ 0x000007d2, 0x0000 },
{ 0x000007d3, 0x0080 },
{ 0x000007d4, 0x0000 },
{ 0x000007d5, 0x0080 },
{ 0x000007d6, 0x0000 },
{ 0x000007d7, 0x0080 },
{ 0x000007d8, 0x0000 },
{ 0x000007d9, 0x0080 },
{ 0x000007da, 0x0000 },
{ 0x000007db, 0x0080 },
{ 0x000007dc, 0x0000 },
{ 0x000007dd, 0x0080 },
{ 0x000007de, 0x0000 },
{ 0x000007df, 0x0080 },
{ 0x000007e0, 0x0000 },
{ 0x000007e1, 0x0080 },
{ 0x000007e2, 0x0000 },
{ 0x000007e3, 0x0080 },
{ 0x000007e4, 0x0000 },
{ 0x000007e5, 0x0080 },
{ 0x000007e6, 0x0000 },
{ 0x000007e7, 0x0080 },
{ 0x000007e8, 0x0000 },
{ 0x000007e9, 0x0080 },
{ 0x000007ea, 0x0000 },
{ 0x000007eb, 0x0080 },
{ 0x000007ec, 0x0000 },
{ 0x000007ed, 0x0080 },
{ 0x000007ee, 0x0000 },
{ 0x000007ef, 0x0080 },
{ 0x000007f0, 0x0000 },
{ 0x000007f1, 0x0080 },
{ 0x000007f2, 0x0000 },
{ 0x000007f3, 0x0080 },
{ 0x000007f4, 0x0000 },
{ 0x000007f5, 0x0080 },
{ 0x000007f6, 0x0000 },
{ 0x000007f7, 0x0080 },
{ 0x000007f8, 0x0000 },
{ 0x000007f9, 0x0080 },
{ 0x000007fa, 0x0000 },
{ 0x000007fb, 0x0080 },
{ 0x000007fc, 0x0000 },
{ 0x000007fd, 0x0080 },
{ 0x000007fe, 0x0000 },
{ 0x000007ff, 0x0080 },
{ 0x00000800, 0x0000 },
{ 0x00000801, 0x0080 },
{ 0x00000808, 0x0000 },
{ 0x00000809, 0x0080 },
{ 0x00000880, 0x0000 },
{ 0x00000881, 0x0080 },
{ 0x00000882, 0x0000 },
{ 0x00000883, 0x0080 },
{ 0x00000884, 0x0000 },
{ 0x00000885, 0x0080 },
{ 0x00000886, 0x0000 },
{ 0x00000887, 0x0080 },
{ 0x00000888, 0x0000 },
{ 0x00000889, 0x0080 },
{ 0x0000088a, 0x0000 },
{ 0x0000088b, 0x0080 },
{ 0x0000088c, 0x0000 },
{ 0x0000088d, 0x0080 },
{ 0x0000088e, 0x0000 },
{ 0x0000088f, 0x0080 },
{ 0x00000890, 0x0000 },
{ 0x00000891, 0x0080 },
{ 0x00000892, 0x0000 },
{ 0x00000893, 0x0080 },
{ 0x00000894, 0x0000 },
{ 0x00000895, 0x0080 },
{ 0x00000896, 0x0000 },
{ 0x00000897, 0x0080 },
{ 0x00000898, 0x0000 },
{ 0x00000899, 0x0080 },
{ 0x0000089a, 0x0000 },
{ 0x0000089b, 0x0080 },
{ 0x0000089c, 0x0000 },
{ 0x0000089d, 0x0080 },
{ 0x0000089e, 0x0000 },
{ 0x0000089f, 0x0080 },
{ 0x000008c0, 0x0000 },
{ 0x000008c1, 0x0080 },
{ 0x000008c2, 0x0000 },
{ 0x000008c3, 0x0080 },
{ 0x000008c4, 0x0000 },
{ 0x000008c5, 0x0080 },
{ 0x000008c6, 0x0000 },
{ 0x000008c7, 0x0080 },
{ 0x000008c8, 0x0000 },
{ 0x000008c9, 0x0080 },
{ 0x000008ca, 0x0000 },
{ 0x000008cb, 0x0080 },
{ 0x000008cc, 0x0000 },
{ 0x000008cd, 0x0080 },
{ 0x000008ce, 0x0000 },
{ 0x000008cf, 0x0080 },
{ 0x000008d0, 0x0000 },
{ 0x000008d1, 0x0080 },
{ 0x000008d2, 0x0000 },
{ 0x000008d3, 0x0080 },
{ 0x000008d4, 0x0000 },
{ 0x000008d5, 0x0080 },
{ 0x000008d6, 0x0000 },
{ 0x000008d7, 0x0080 },
{ 0x000008d8, 0x0000 },
{ 0x000008d9, 0x0080 },
{ 0x000008da, 0x0000 },
{ 0x000008db, 0x0080 },
{ 0x000008dc, 0x0000 },
{ 0x000008dd, 0x0080 },
{ 0x000008de, 0x0000 },
{ 0x000008df, 0x0080 },
{ 0x00000900, 0x0000 },
{ 0x00000901, 0x0080 },
{ 0x00000902, 0x0000 },
{ 0x00000903, 0x0080 },
{ 0x00000904, 0x0000 },
{ 0x00000905, 0x0080 },
{ 0x00000906, 0x0000 },
{ 0x00000907, 0x0080 },
{ 0x00000908, 0x0000 },
{ 0x00000909, 0x0080 },
{ 0x0000090a, 0x0000 },
{ 0x0000090b, 0x0080 },
{ 0x0000090c, 0x0000 },
{ 0x0000090d, 0x0080 },
{ 0x0000090e, 0x0000 },
{ 0x0000090f, 0x0080 },
{ 0x00000910, 0x0000 },
{ 0x00000911, 0x0080 },
{ 0x00000912, 0x0000 },
{ 0x00000913, 0x0080 },
{ 0x00000914, 0x0000 },
{ 0x00000915, 0x0080 },
{ 0x00000916, 0x0000 },
{ 0x00000917, 0x0080 },
{ 0x00000918, 0x0000 },
{ 0x00000919, 0x0080 },
{ 0x0000091a, 0x0000 },
{ 0x0000091b, 0x0080 },
{ 0x0000091c, 0x0000 },
{ 0x0000091d, 0x0080 },
{ 0x0000091e, 0x0000 },
{ 0x0000091f, 0x0080 },
{ 0x00000940, 0x0000 },
{ 0x00000941, 0x0080 },
{ 0x00000942, 0x0000 },
{ 0x00000943, 0x0080 },
{ 0x00000944, 0x0000 },
{ 0x00000945, 0x0080 },
{ 0x00000946, 0x0000 },
{ 0x00000947, 0x0080 },
{ 0x00000948, 0x0000 },
{ 0x00000949, 0x0080 },
{ 0x0000094a, 0x0000 },
{ 0x0000094b, 0x0080 },
{ 0x0000094c, 0x0000 },
{ 0x0000094d, 0x0080 },
{ 0x0000094e, 0x0000 },
{ 0x0000094f, 0x0080 },
{ 0x00000950, 0x0000 },
{ 0x00000958, 0x0000 },
{ 0x00000960, 0x0000 },
{ 0x00000968, 0x0000 },
{ 0x00000970, 0x0000 },
{ 0x00000978, 0x0000 },
{ 0x00000980, 0x0000 },
{ 0x00000981, 0x0080 },
{ 0x00000982, 0x0000 },
{ 0x00000983, 0x0080 },
{ 0x00000984, 0x0000 },
{ 0x00000985, 0x0080 },
{ 0x00000986, 0x0000 },
{ 0x00000987, 0x0080 },
{ 0x00000988, 0x0000 },
{ 0x00000989, 0x0080 },
{ 0x0000098a, 0x0000 },
{ 0x0000098b, 0x0080 },
{ 0x0000098c, 0x0000 },
{ 0x0000098d, 0x0080 },
{ 0x0000098e, 0x0000 },
{ 0x0000098f, 0x0080 },
{ 0x00000990, 0x0000 },
{ 0x00000998, 0x0000 },
{ 0x000009a0, 0x0000 },
{ 0x000009a8, 0x0000 },
{ 0x000009b0, 0x0000 },
{ 0x000009b8, 0x0000 },
{ 0x000009c0, 0x0000 },
{ 0x000009c1, 0x0080 },
{ 0x000009c2, 0x0000 },
{ 0x000009c3, 0x0080 },
{ 0x000009c4, 0x0000 },
{ 0x000009c5, 0x0080 },
{ 0x000009c6, 0x0000 },
{ 0x000009c7, 0x0080 },
{ 0x000009c8, 0x0000 },
{ 0x000009c9, 0x0080 },
{ 0x000009ca, 0x0000 },
{ 0x000009cb, 0x0080 },
{ 0x000009cc, 0x0000 },
{ 0x000009cd, 0x0080 },
{ 0x000009ce, 0x0000 },
{ 0x000009cf, 0x0080 },
{ 0x000009d0, 0x0000 },
{ 0x000009d8, 0x0000 },
{ 0x000009e0, 0x0000 },
{ 0x000009e8, 0x0000 },
{ 0x000009f0, 0x0000 },
{ 0x000009f8, 0x0000 },
{ 0x00000a00, 0x0000 },
{ 0x00000a01, 0x0080 },
{ 0x00000a02, 0x0000 },
{ 0x00000a03, 0x0080 },
{ 0x00000a04, 0x0000 },
{ 0x00000a05, 0x0080 },
{ 0x00000a06, 0x0000 },
{ 0x00000a07, 0x0080 },
{ 0x00000a08, 0x0000 },
{ 0x00000a09, 0x0080 },
{ 0x00000a0a, 0x0000 },
{ 0x00000a0b, 0x0080 },
{ 0x00000a0c, 0x0000 },
{ 0x00000a0d, 0x0080 },
{ 0x00000a0e, 0x0000 },
{ 0x00000a0f, 0x0080 },
{ 0x00000a10, 0x0000 },
{ 0x00000a18, 0x0000 },
{ 0x00000a20, 0x0000 },
{ 0x00000a28, 0x0000 },
{ 0x00000a30, 0x0000 },
{ 0x00000a38, 0x0000 },
{ 0x00000a40, 0x0000 },
{ 0x00000a41, 0x0080 },
{ 0x00000a42, 0x0000 },
{ 0x00000a43, 0x0080 },
{ 0x00000a44, 0x0000 },
{ 0x00000a45, 0x0080 },
{ 0x00000a46, 0x0000 },
{ 0x00000a47, 0x0080 },
{ 0x00000a48, 0x0000 },
{ 0x00000a49, 0x0080 },
{ 0x00000a4a, 0x0000 },
{ 0x00000a4b, 0x0080 },
{ 0x00000a4c, 0x0000 },
{ 0x00000a4d, 0x0080 },
{ 0x00000a4e, 0x0000 },
{ 0x00000a4f, 0x0080 },
{ 0x00000a50, 0x0000 },
{ 0x00000a58, 0x0000 },
{ 0x00000a60, 0x0000 },
{ 0x00000a68, 0x0000 },
{ 0x00000a70, 0x0000 },
{ 0x00000a78, 0x0000 },
{ 0x00000a80, 0x0000 },
{ 0x00000a88, 0x0000 },
{ 0x00000a90, 0x0000 },
{ 0x00000a98, 0x0000 },
{ 0x00000aa0, 0x0000 },
{ 0x00000aa8, 0x0000 },
{ 0x00000ab0, 0x0000 },
{ 0x00000ab8, 0x0000 },
{ 0x00000b00, 0x0000 },
{ 0x00000b08, 0x0000 },
{ 0x00000b10, 0x0000 },
{ 0x00000b18, 0x0000 },
{ 0x00000b20, 0x0000 },
{ 0x00000b28, 0x0000 },
{ 0x00000b30, 0x0000 },
{ 0x00000b38, 0x0000 },
{ 0x00000b40, 0x0000 },
{ 0x00000b48, 0x0000 },
{ 0x00000b50, 0x0000 },
{ 0x00000b58, 0x0000 },
{ 0x00000b60, 0x0000 },
{ 0x00000b68, 0x0000 },
{ 0x00000b70, 0x0000 },
{ 0x00000b78, 0x0000 },
{ 0x00000b80, 0x0000 },
{ 0x00000b88, 0x0000 },
{ 0x00000ba0, 0x0000 },
{ 0x00000ba8, 0x0000 },
{ 0x00000bc0, 0x0000 }, /* R3008 - ISRC4DEC1MIX Input 1 Source */
{ 0x00000bc8, 0x0000 }, /* R3016 - ISRC4DEC2MIX Input 1 Source */
{ 0x00000be0, 0x0000 }, /* R3040 - ISRC4INT1MIX Input 1 Source */
{ 0x00000be8, 0x0000 }, /* R3048 - ISRC4INT2MIX Input 1 Source */
{ 0x00000c00, 0x0000 },
{ 0x00000c01, 0x0080 },
{ 0x00000c02, 0x0000 },
{ 0x00000c03, 0x0080 },
{ 0x00000c04, 0x0000 },
{ 0x00000c05, 0x0080 },
{ 0x00000c06, 0x0000 },
{ 0x00000c07, 0x0080 },
{ 0x00000c08, 0x0000 },
{ 0x00000c09, 0x0080 },
{ 0x00000c0a, 0x0000 },
{ 0x00000c0b, 0x0080 },
{ 0x00000c0c, 0x0000 },
{ 0x00000c0d, 0x0080 },
{ 0x00000c0e, 0x0000 },
{ 0x00000c0f, 0x0080 },
{ 0x00000c10, 0x0000 }, /* R3088 (0xC10) - DSP6AUX1MIX Input 1 */
{ 0x00000c18, 0x0000 }, /* R3088 (0xC18) - DSP6AUX2MIX Input 1 */
{ 0x00000c20, 0x0000 }, /* R3088 (0xC20) - DSP6AUX3MIX Input 1 */
{ 0x00000c28, 0x0000 }, /* R3088 (0xC28) - DSP6AUX4MIX Input 1 */
{ 0x00000c30, 0x0000 }, /* R3088 (0xC30) - DSP6AUX5MIX Input 1 */
{ 0x00000c38, 0x0000 }, /* R3088 (0xC38) - DSP6AUX6MIX Input 1 */
{ 0x00000c40, 0x0000 },
{ 0x00000c41, 0x0080 },
{ 0x00000c42, 0x0000 },
{ 0x00000c43, 0x0080 },
{ 0x00000c44, 0x0000 },
{ 0x00000c45, 0x0080 },
{ 0x00000c46, 0x0000 },
{ 0x00000c47, 0x0080 },
{ 0x00000c48, 0x0000 },
{ 0x00000c49, 0x0080 },
{ 0x00000c4a, 0x0000 },
{ 0x00000c4b, 0x0080 },
{ 0x00000c4c, 0x0000 },
{ 0x00000c4d, 0x0080 },
{ 0x00000c4e, 0x0000 },
{ 0x00000c4f, 0x0080 },
{ 0x00000c50, 0x0000 },
{ 0x00000c58, 0x0000 },
{ 0x00000c60, 0x0000 },
{ 0x00000c68, 0x0000 },
{ 0x00000c70, 0x0000 },
{ 0x00000c78, 0x0000 },
{ 0x00000dc0, 0x0000 },
{ 0x00000dc8, 0x0000 },
{ 0x00000dd0, 0x0000 },
{ 0x00000dd8, 0x0000 },
{ 0x00000de0, 0x0000 },
{ 0x00000de8, 0x0000 },
{ 0x00000df0, 0x0000 },
{ 0x00000df8, 0x0000 },
{ 0x00000e00, 0x0000 }, /* R3584 (0xE00) - FX_Ctrl1 */
{ 0x00000e10, 0x6318 }, /* R3600 (0xE10) - EQ1_1 */
{ 0x00000e11, 0x6300 }, /* R3601 (0xE11) - EQ1_2 */
{ 0x00000e12, 0x0fc8 }, /* R3602 (0xE12) - EQ1_3 */
{ 0x00000e13, 0x03fe }, /* R3603 (0xE13) - EQ1_4 */
{ 0x00000e14, 0x00e0 }, /* R3604 (0xE14) - EQ1_5 */
{ 0x00000e15, 0x1ec4 }, /* R3605 (0xE15) - EQ1_6 */
{ 0x00000e16, 0xf136 }, /* R3606 (0xE16) - EQ1_7 */
{ 0x00000e17, 0x0409 }, /* R3607 (0xE17) - EQ1_8 */
{ 0x00000e18, 0x04cc }, /* R3608 (0xE18) - EQ1_9 */
{ 0x00000e19, 0x1c9b }, /* R3609 (0xE19) - EQ1_10 */
{ 0x00000e1a, 0xf337 }, /* R3610 (0xE1A) - EQ1_11 */
{ 0x00000e1b, 0x040b }, /* R3611 (0xE1B) - EQ1_12 */
{ 0x00000e1c, 0x0cbb }, /* R3612 (0xE1C) - EQ1_13 */
{ 0x00000e1d, 0x16f8 }, /* R3613 (0xE1D) - EQ1_14 */
{ 0x00000e1e, 0xf7d9 }, /* R3614 (0xE1E) - EQ1_15 */
{ 0x00000e1f, 0x040a }, /* R3615 (0xE1F) - EQ1_16 */
{ 0x00000e20, 0x1f14 }, /* R3616 (0xE20) - EQ1_17 */
{ 0x00000e21, 0x058c }, /* R3617 (0xE21) - EQ1_18 */
{ 0x00000e22, 0x0563 }, /* R3618 (0xE22) - EQ1_19 */
{ 0x00000e23, 0x4000 }, /* R3619 (0xE23) - EQ1_20 */
{ 0x00000e24, 0x0b75 }, /* R3620 (0xE24) - EQ1_21 */
{ 0x00000e26, 0x6318 }, /* R3622 (0xE26) - EQ2_1 */
{ 0x00000e27, 0x6300 }, /* R3623 (0xE27) - EQ2_2 */
{ 0x00000e28, 0x0fc8 }, /* R3624 (0xE28) - EQ2_3 */
{ 0x00000e29, 0x03fe }, /* R3625 (0xE29) - EQ2_4 */
{ 0x00000e2a, 0x00e0 }, /* R3626 (0xE2A) - EQ2_5 */
{ 0x00000e2b, 0x1ec4 }, /* R3627 (0xE2B) - EQ2_6 */
{ 0x00000e2c, 0xf136 }, /* R3628 (0xE2C) - EQ2_7 */
{ 0x00000e2d, 0x0409 }, /* R3629 (0xE2D) - EQ2_8 */
{ 0x00000e2e, 0x04cc }, /* R3630 (0xE2E) - EQ2_9 */
{ 0x00000e2f, 0x1c9b }, /* R3631 (0xE2F) - EQ2_10 */
{ 0x00000e30, 0xf337 }, /* R3632 (0xE30) - EQ2_11 */
{ 0x00000e31, 0x040b }, /* R3633 (0xE31) - EQ2_12 */
{ 0x00000e32, 0x0cbb }, /* R3634 (0xE32) - EQ2_13 */
{ 0x00000e33, 0x16f8 }, /* R3635 (0xE33) - EQ2_14 */
{ 0x00000e34, 0xf7d9 }, /* R3636 (0xE34) - EQ2_15 */
{ 0x00000e35, 0x040a }, /* R3637 (0xE35) - EQ2_16 */
{ 0x00000e36, 0x1f14 }, /* R3638 (0xE36) - EQ2_17 */
{ 0x00000e37, 0x058c }, /* R3639 (0xE37) - EQ2_18 */
{ 0x00000e38, 0x0563 }, /* R3640 (0xE38) - EQ2_19 */
{ 0x00000e39, 0x4000 }, /* R3641 (0xE39) - EQ2_20 */
{ 0x00000e3a, 0x0b75 }, /* R3642 (0xE3A) - EQ2_21 */
{ 0x00000e3c, 0x6318 }, /* R3644 (0xE3C) - EQ3_1 */
{ 0x00000e3d, 0x6300 }, /* R3645 (0xE3D) - EQ3_2 */
{ 0x00000e3e, 0x0fc8 }, /* R3646 (0xE3E) - EQ3_3 */
{ 0x00000e3f, 0x03fe }, /* R3647 (0xE3F) - EQ3_4 */
{ 0x00000e40, 0x00e0 }, /* R3648 (0xE40) - EQ3_5 */
{ 0x00000e41, 0x1ec4 }, /* R3649 (0xE41) - EQ3_6 */
{ 0x00000e42, 0xf136 }, /* R3650 (0xE42) - EQ3_7 */
{ 0x00000e43, 0x0409 }, /* R3651 (0xE43) - EQ3_8 */
{ 0x00000e44, 0x04cc }, /* R3652 (0xE44) - EQ3_9 */
{ 0x00000e45, 0x1c9b }, /* R3653 (0xE45) - EQ3_10 */
{ 0x00000e46, 0xf337 }, /* R3654 (0xE46) - EQ3_11 */
{ 0x00000e47, 0x040b }, /* R3655 (0xE47) - EQ3_12 */
{ 0x00000e48, 0x0cbb }, /* R3656 (0xE48) - EQ3_13 */
{ 0x00000e49, 0x16f8 }, /* R3657 (0xE49) - EQ3_14 */
{ 0x00000e4a, 0xf7d9 }, /* R3658 (0xE4A) - EQ3_15 */
{ 0x00000e4b, 0x040a }, /* R3659 (0xE4B) - EQ3_16 */
{ 0x00000e4c, 0x1f14 }, /* R3660 (0xE4C) - EQ3_17 */
{ 0x00000e4d, 0x058c }, /* R3661 (0xE4D) - EQ3_18 */
{ 0x00000e4e, 0x0563 }, /* R3662 (0xE4E) - EQ3_19 */
{ 0x00000e4f, 0x4000 }, /* R3663 (0xE4F) - EQ3_20 */
{ 0x00000e50, 0x0b75 }, /* R3664 (0xE50) - EQ3_21 */
{ 0x00000e52, 0x6318 }, /* R3666 (0xE52) - EQ4_1 */
{ 0x00000e53, 0x6300 }, /* R3667 (0xE53) - EQ4_2 */
{ 0x00000e54, 0x0fc8 }, /* R3668 (0xE54) - EQ4_3 */
{ 0x00000e55, 0x03fe }, /* R3669 (0xE55) - EQ4_4 */
{ 0x00000e56, 0x00e0 }, /* R3670 (0xE56) - EQ4_5 */
{ 0x00000e57, 0x1ec4 }, /* R3671 (0xE57) - EQ4_6 */
{ 0x00000e58, 0xf136 }, /* R3672 (0xE58) - EQ4_7 */
{ 0x00000e59, 0x0409 }, /* R3673 (0xE59) - EQ4_8 */
{ 0x00000e5a, 0x04cc }, /* R3674 (0xE5A) - EQ4_9 */
{ 0x00000e5b, 0x1c9b }, /* R3675 (0xE5B) - EQ4_10 */
{ 0x00000e5c, 0xf337 }, /* R3676 (0xE5C) - EQ4_11 */
{ 0x00000e5d, 0x040b }, /* R3677 (0xE5D) - EQ4_12 */
{ 0x00000e5e, 0x0cbb }, /* R3678 (0xE5E) - EQ4_13 */
{ 0x00000e5f, 0x16f8 }, /* R3679 (0xE5F) - EQ4_14 */
{ 0x00000e60, 0xf7d9 }, /* R3680 (0xE60) - EQ4_15 */
{ 0x00000e61, 0x040a }, /* R3681 (0xE61) - EQ4_16 */
{ 0x00000e62, 0x1f14 }, /* R3682 (0xE62) - EQ4_17 */
{ 0x00000e63, 0x058c }, /* R3683 (0xE63) - EQ4_18 */
{ 0x00000e64, 0x0563 }, /* R3684 (0xE64) - EQ4_19 */
{ 0x00000e65, 0x4000 }, /* R3685 (0xE65) - EQ4_20 */
{ 0x00000e66, 0x0b75 }, /* R3686 (0xE66) - EQ4_21 */
{ 0x00000e80, 0x0018 }, /* R3712 (0xE80) - DRC1 ctrl1 */
{ 0x00000e81, 0x0933 }, /* R3713 (0xE81) - DRC1 ctrl2 */
{ 0x00000e82, 0x0018 }, /* R3714 (0xE82) - DRC1 ctrl3 */
{ 0x00000e83, 0x0000 }, /* R3715 (0xE83) - DRC1 ctrl4 */
{ 0x00000e84, 0x0000 }, /* R3716 (0xE84) - DRC1 ctrl5 */
{ 0x00000e88, 0x0018 }, /* R3720 (0xE88) - DRC2 ctrl1 */
{ 0x00000e89, 0x0933 }, /* R3721 (0xE89) - DRC2 ctrl2 */
{ 0x00000e8a, 0x0018 }, /* R3722 (0xE8A) - DRC2 ctrl3 */
{ 0x00000e8b, 0x0000 }, /* R3723 (0xE8B) - DRC2 ctrl4 */
{ 0x00000e8c, 0x0000 }, /* R3724 (0xE8C) - DRC2 ctrl5 */
{ 0x00000ec0, 0x0000 }, /* R3776 (0xEC0) - HPLPF1_1 */
{ 0x00000ec1, 0x0000 }, /* R3777 (0xEC1) - HPLPF1_2 */
{ 0x00000ec4, 0x0000 }, /* R3780 (0xEC4) - HPLPF2_1 */
{ 0x00000ec5, 0x0000 }, /* R3781 (0xEC5) - HPLPF2_2 */
{ 0x00000ec8, 0x0000 }, /* R3784 (0xEC8) - HPLPF3_1 */
{ 0x00000ec9, 0x0000 }, /* R3785 (0xEC9) - HPLPF3_2 */
{ 0x00000ecc, 0x0000 }, /* R3788 (0xECC) - HPLPF4_1 */
{ 0x00000ecd, 0x0000 }, /* R3789 (0xECD) - HPLPF4_2 */
{ 0x00000ed0, 0x0000 }, /* R3792 (0xED0) - ASRC2_ENABLE */
{ 0x00000ed2, 0x0000 }, /* R3794 (0xED2) - ASRC2_RATE1 */
{ 0x00000ed3, 0x4000 }, /* R3795 (0xED3) - ASRC2_RATE2 */
{ 0x00000ee0, 0x0000 }, /* R3808 (0xEE0) - ASRC1_ENABLE */
{ 0x00000ee2, 0x0000 }, /* R3810 (0xEE2) - ASRC1_RATE1 */
{ 0x00000ee3, 0x4000 }, /* R3811 (0xEE3) - ASRC1_RATE2 */
{ 0x00000ef0, 0x0000 }, /* R3824 (0xEF0) - ISRC 1 CTRL 1 */
{ 0x00000ef1, 0x0001 }, /* R3825 (0xEF1) - ISRC 1 CTRL 2 */
{ 0x00000ef2, 0x0000 }, /* R3826 (0xEF2) - ISRC 1 CTRL 3 */
{ 0x00000ef3, 0x0000 }, /* R3827 (0xEF3) - ISRC 2 CTRL 1 */
{ 0x00000ef4, 0x0001 }, /* R3828 (0xEF4) - ISRC 2 CTRL 2 */
{ 0x00000ef5, 0x0000 }, /* R3829 (0xEF5) - ISRC 2 CTRL 3 */
{ 0x00000ef6, 0x0000 }, /* R3830 (0xEF6) - ISRC 3 CTRL 1 */
{ 0x00000ef7, 0x0001 }, /* R3831 (0xEF7) - ISRC 3 CTRL 2 */
{ 0x00000ef8, 0x0000 }, /* R3832 (0xEF8) - ISRC 3 CTRL 3 */
{ 0x00000ef9, 0x0000 }, /* R3833 - ISRC 4 CTRL 1 */
{ 0x00000efa, 0x0001 }, /* R3834 - ISRC 4 CTRL 2 */
{ 0x00000efb, 0x0000 }, /* R3835 - ISRC 4 CTRL 3 */
{ 0x00000F01, 0x0000 }, /* R3841 - ANC_SRC */
{ 0x00000F02, 0x0000 }, /* R3842 - Arizona DSP Status */
{ 0x00000F08, 0x001c }, /* R3848 - ANC Coefficient */
{ 0x00000F09, 0x0000 }, /* R3849 - ANC Coefficient */
{ 0x00000F0A, 0x0000 }, /* R3850 - ANC Coefficient */
{ 0x00000F0B, 0x0000 }, /* R3851 - ANC Coefficient */
{ 0x00000F0C, 0x0000 }, /* R3852 - ANC Coefficient */
{ 0x00000F0D, 0x0000 }, /* R3853 - ANC Coefficient */
{ 0x00000F0E, 0x0000 }, /* R3854 - ANC Coefficient */
{ 0x00000F0F, 0x0000 }, /* R3855 - ANC Coefficient */
{ 0x00000F10, 0x0000 }, /* R3856 - ANC Coefficient */
{ 0x00000F11, 0x0000 }, /* R3857 - ANC Coefficient */
{ 0x00000F12, 0x0000 }, /* R3858 - ANC Coefficient */
{ 0x00000F15, 0x0000 }, /* R3861 - FCL Filter Control */
{ 0x00000F17, 0x0004 }, /* R3863 - FCL ADC Reformatter Control */
{ 0x00000F18, 0x0004 }, /* R3864 - ANC Coefficient */
{ 0x00000F19, 0x0002 }, /* R3865 - ANC Coefficient */
{ 0x00000F1A, 0x0000 }, /* R3866 - ANC Coefficient */
{ 0x00000F1B, 0x0010 }, /* R3867 - ANC Coefficient */
{ 0x00000F1C, 0x0000 }, /* R3868 - ANC Coefficient */
{ 0x00000F1D, 0x0000 }, /* R3869 - ANC Coefficient */
{ 0x00000F1E, 0x0000 }, /* R3870 - ANC Coefficient */
{ 0x00000F1F, 0x0000 }, /* R3871 - ANC Coefficient */
{ 0x00000F20, 0x0000 }, /* R3872 - ANC Coefficient */
{ 0x00000F21, 0x0000 }, /* R3873 - ANC Coefficient */
{ 0x00000F22, 0x0000 }, /* R3874 - ANC Coefficient */
{ 0x00000F23, 0x0000 }, /* R3875 - ANC Coefficient */
{ 0x00000F24, 0x0000 }, /* R3876 - ANC Coefficient */
{ 0x00000F25, 0x0000 }, /* R3877 - ANC Coefficient */
{ 0x00000F26, 0x0000 }, /* R3878 - ANC Coefficient */
{ 0x00000F27, 0x0000 }, /* R3879 - ANC Coefficient */
{ 0x00000F28, 0x0000 }, /* R3880 - ANC Coefficient */
{ 0x00000F29, 0x0000 }, /* R3881 - ANC Coefficient */
{ 0x00000F2A, 0x0000 }, /* R3882 - ANC Coefficient */
{ 0x00000F2B, 0x0000 }, /* R3883 - ANC Coefficient */
{ 0x00000F2C, 0x0000 }, /* R3884 - ANC Coefficient */
{ 0x00000F2D, 0x0000 }, /* R3885 - ANC Coefficient */
{ 0x00000F2E, 0x0000 }, /* R3886 - ANC Coefficient */
{ 0x00000F2F, 0x0000 }, /* R3887 - ANC Coefficient */
{ 0x00000F30, 0x0000 }, /* R3888 - ANC Coefficient */
{ 0x00000F31, 0x0000 }, /* R3889 - ANC Coefficient */
{ 0x00000F32, 0x0000 }, /* R3890 - ANC Coefficient */
{ 0x00000F33, 0x0000 }, /* R3891 - ANC Coefficient */
{ 0x00000F34, 0x0000 }, /* R3892 - ANC Coefficient */
{ 0x00000F35, 0x0000 }, /* R3893 - ANC Coefficient */
{ 0x00000F36, 0x0000 }, /* R3894 - ANC Coefficient */
{ 0x00000F37, 0x0000 }, /* R3895 - ANC Coefficient */
{ 0x00000F38, 0x0000 }, /* R3896 - ANC Coefficient */
{ 0x00000F39, 0x0000 }, /* R3897 - ANC Coefficient */
{ 0x00000F3A, 0x0000 }, /* R3898 - ANC Coefficient */
{ 0x00000F3B, 0x0000 }, /* R3899 - ANC Coefficient */
{ 0x00000F3C, 0x0000 }, /* R3900 - ANC Coefficient */
{ 0x00000F3D, 0x0000 }, /* R3901 - ANC Coefficient */
{ 0x00000F3E, 0x0000 }, /* R3902 - ANC Coefficient */
{ 0x00000F3F, 0x0000 }, /* R3903 - ANC Coefficient */
{ 0x00000F40, 0x0000 }, /* R3904 - ANC Coefficient */
{ 0x00000F41, 0x0000 }, /* R3905 - ANC Coefficient */
{ 0x00000F42, 0x0000 }, /* R3906 - ANC Coefficient */
{ 0x00000F43, 0x0000 }, /* R3907 - ANC Coefficient */
{ 0x00000F44, 0x0000 }, /* R3908 - ANC Coefficient */
{ 0x00000F45, 0x0000 }, /* R3909 - ANC Coefficient */
{ 0x00000F46, 0x0000 }, /* R3910 - ANC Coefficient */
{ 0x00000F47, 0x0000 }, /* R3911 - ANC Coefficient */
{ 0x00000F48, 0x0000 }, /* R3912 - ANC Coefficient */
{ 0x00000F49, 0x0000 }, /* R3913 - ANC Coefficient */
{ 0x00000F4A, 0x0000 }, /* R3914 - ANC Coefficient */
{ 0x00000F4B, 0x0000 }, /* R3915 - ANC Coefficient */
{ 0x00000F4C, 0x0000 }, /* R3916 - ANC Coefficient */
{ 0x00000F4D, 0x0000 }, /* R3917 - ANC Coefficient */
{ 0x00000F4E, 0x0000 }, /* R3918 - ANC Coefficient */
{ 0x00000F4F, 0x0000 }, /* R3919 - ANC Coefficient */
{ 0x00000F50, 0x0000 }, /* R3920 - ANC Coefficient */
{ 0x00000F51, 0x0000 }, /* R3921 - ANC Coefficient */
{ 0x00000F52, 0x0000 }, /* R3922 - ANC Coefficient */
{ 0x00000F53, 0x0000 }, /* R3923 - ANC Coefficient */
{ 0x00000F54, 0x0000 }, /* R3924 - ANC Coefficient */
{ 0x00000F55, 0x0000 }, /* R3925 - ANC Coefficient */
{ 0x00000F56, 0x0000 }, /* R3926 - ANC Coefficient */
{ 0x00000F57, 0x0000 }, /* R3927 - ANC Coefficient */
{ 0x00000F58, 0x0000 }, /* R3928 - ANC Coefficient */
{ 0x00000F59, 0x0000 }, /* R3929 - ANC Coefficient */
{ 0x00000F5A, 0x0000 }, /* R3930 - ANC Coefficient */
{ 0x00000F5B, 0x0000 }, /* R3931 - ANC Coefficient */
{ 0x00000F5C, 0x0000 }, /* R3932 - ANC Coefficient */
{ 0x00000F5D, 0x0000 }, /* R3933 - ANC Coefficient */
{ 0x00000F5E, 0x0000 }, /* R3934 - ANC Coefficient */
{ 0x00000F5F, 0x0000 }, /* R3935 - ANC Coefficient */
{ 0x00000F60, 0x0000 }, /* R3936 - ANC Coefficient */
{ 0x00000F61, 0x0000 }, /* R3937 - ANC Coefficient */
{ 0x00000F62, 0x0000 }, /* R3938 - ANC Coefficient */
{ 0x00000F63, 0x0000 }, /* R3939 - ANC Coefficient */
{ 0x00000F64, 0x0000 }, /* R3940 - ANC Coefficient */
{ 0x00000F65, 0x0000 }, /* R3941 - ANC Coefficient */
{ 0x00000F66, 0x0000 }, /* R3942 - ANC Coefficient */
{ 0x00000F67, 0x0000 }, /* R3943 - ANC Coefficient */
{ 0x00000F68, 0x0000 }, /* R3944 - ANC Coefficient */
{ 0x00000F69, 0x0000 }, /* R3945 - ANC Coefficient */
{ 0x00000F71, 0x0000 }, /* R3953 - FCR Filter Control */
{ 0x00000F73, 0x0004 }, /* R3955 - FCR ADC Reformatter Control */
{ 0x00000F74, 0x0004 }, /* R3956 - ANC Coefficient */
{ 0x00000F75, 0x0002 }, /* R3957 - ANC Coefficient */
{ 0x00000F76, 0x0000 }, /* R3958 - ANC Coefficient */
{ 0x00000F77, 0x0010 }, /* R3959 - ANC Coefficient */
{ 0x00000F78, 0x0000 }, /* R3960 - ANC Coefficient */
{ 0x00000F79, 0x0000 }, /* R3961 - ANC Coefficient */
{ 0x00000F7A, 0x0000 }, /* R3962 - ANC Coefficient */
{ 0x00000F7B, 0x0000 }, /* R3963 - ANC Coefficient */
{ 0x00000F7C, 0x0000 }, /* R3964 - ANC Coefficient */
{ 0x00000F7D, 0x0000 }, /* R3965 - ANC Coefficient */
{ 0x00000F7E, 0x0000 }, /* R3966 - ANC Coefficient */
{ 0x00000F7F, 0x0000 }, /* R3967 - ANC Coefficient */
{ 0x00000F80, 0x0000 }, /* R3968 - ANC Coefficient */
{ 0x00000F81, 0x0000 }, /* R3969 - ANC Coefficient */
{ 0x00000F82, 0x0000 }, /* R3970 - ANC Coefficient */
{ 0x00000F83, 0x0000 }, /* R3971 - ANC Coefficient */
{ 0x00000F84, 0x0000 }, /* R3972 - ANC Coefficient */
{ 0x00000F85, 0x0000 }, /* R3973 - ANC Coefficient */
{ 0x00000F86, 0x0000 }, /* R3974 - ANC Coefficient */
{ 0x00000F87, 0x0000 }, /* R3975 - ANC Coefficient */
{ 0x00000F88, 0x0000 }, /* R3976 - ANC Coefficient */
{ 0x00000F89, 0x0000 }, /* R3977 - ANC Coefficient */
{ 0x00000F8A, 0x0000 }, /* R3978 - ANC Coefficient */
{ 0x00000F8B, 0x0000 }, /* R3979 - ANC Coefficient */
{ 0x00000F8C, 0x0000 }, /* R3980 - ANC Coefficient */
{ 0x00000F8D, 0x0000 }, /* R3981 - ANC Coefficient */
{ 0x00000F8E, 0x0000 }, /* R3982 - ANC Coefficient */
{ 0x00000F8F, 0x0000 }, /* R3983 - ANC Coefficient */
{ 0x00000F90, 0x0000 }, /* R3984 - ANC Coefficient */
{ 0x00000F91, 0x0000 }, /* R3985 - ANC Coefficient */
{ 0x00000F92, 0x0000 }, /* R3986 - ANC Coefficient */
{ 0x00000F93, 0x0000 }, /* R3987 - ANC Coefficient */
{ 0x00000F94, 0x0000 }, /* R3988 - ANC Coefficient */
{ 0x00000F95, 0x0000 }, /* R3989 - ANC Coefficient */
{ 0x00000F96, 0x0000 }, /* R3990 - ANC Coefficient */
{ 0x00000F97, 0x0000 }, /* R3991 - ANC Coefficient */
{ 0x00000F98, 0x0000 }, /* R3992 - ANC Coefficient */
{ 0x00000F99, 0x0000 }, /* R3993 - ANC Coefficient */
{ 0x00000F9A, 0x0000 }, /* R3994 - ANC Coefficient */
{ 0x00000F9B, 0x0000 }, /* R3995 - ANC Coefficient */
{ 0x00000F9C, 0x0000 }, /* R3996 - ANC Coefficient */
{ 0x00000F9D, 0x0000 }, /* R3997 - ANC Coefficient */
{ 0x00000F9E, 0x0000 }, /* R3998 - ANC Coefficient */
{ 0x00000F9F, 0x0000 }, /* R3999 - ANC Coefficient */
{ 0x00000FA0, 0x0000 }, /* R4000 - ANC Coefficient */
{ 0x00000FA1, 0x0000 }, /* R4001 - ANC Coefficient */
{ 0x00000FA2, 0x0000 }, /* R4002 - ANC Coefficient */
{ 0x00000FA3, 0x0000 }, /* R4003 - ANC Coefficient */
{ 0x00000FA4, 0x0000 }, /* R4004 - ANC Coefficient */
{ 0x00000FA5, 0x0000 }, /* R4005 - ANC Coefficient */
{ 0x00000FA6, 0x0000 }, /* R4006 - ANC Coefficient */
{ 0x00000FA7, 0x0000 }, /* R4007 - ANC Coefficient */
{ 0x00000FA8, 0x0000 }, /* R4008 - ANC Coefficient */
{ 0x00000FA9, 0x0000 }, /* R4009 - ANC Coefficient */
{ 0x00000FAA, 0x0000 }, /* R4010 - ANC Coefficient */
{ 0x00000FAB, 0x0000 }, /* R4011 - ANC Coefficient */
{ 0x00000FAC, 0x0000 }, /* R4012 - ANC Coefficient */
{ 0x00000FAD, 0x0000 }, /* R4013 - ANC Coefficient */
{ 0x00000FAE, 0x0000 }, /* R4014 - ANC Coefficient */
{ 0x00000FAF, 0x0000 }, /* R4015 - ANC Coefficient */
{ 0x00000FB0, 0x0000 }, /* R4016 - ANC Coefficient */
{ 0x00000FB1, 0x0000 }, /* R4017 - ANC Coefficient */
{ 0x00000FB2, 0x0000 }, /* R4018 - ANC Coefficient */
{ 0x00000FB3, 0x0000 }, /* R4019 - ANC Coefficient */
{ 0x00000FB4, 0x0000 }, /* R4020 - ANC Coefficient */
{ 0x00000FB5, 0x0000 }, /* R4021 - ANC Coefficient */
{ 0x00000FB6, 0x0000 }, /* R4022 - ANC Coefficient */
{ 0x00000FB7, 0x0000 }, /* R4023 - ANC Coefficient */
{ 0x00000FB8, 0x0000 }, /* R4024 - ANC Coefficient */
{ 0x00000FB9, 0x0000 }, /* R4025 - ANC Coefficient */
{ 0x00000FBA, 0x0000 }, /* R4026 - ANC Coefficient */
{ 0x00000FBB, 0x0000 }, /* R4027 - ANC Coefficient */
{ 0x00000FBC, 0x0000 }, /* R4028 - ANC Coefficient */
{ 0x00000FBD, 0x0000 }, /* R4029 - ANC Coefficient */
{ 0x00000FBE, 0x0000 }, /* R4030 - ANC Coefficient */
{ 0x00000FBF, 0x0000 }, /* R4031 - ANC Coefficient */
{ 0x00000FC0, 0x0000 }, /* R4032 - ANC Coefficient */
{ 0x00000FC1, 0x0000 }, /* R4033 - ANC Coefficient */
{ 0x00000FC2, 0x0000 }, /* R4034 - ANC Coefficient */
{ 0x00000FC3, 0x0000 }, /* R4035 - ANC Coefficient */
{ 0x00000FC4, 0x0000 }, /* R4036 - ANC Coefficient */
{ 0x00000FC5, 0x0000 }, /* R4037 - ANC Coefficient */
{ 0x00001300, 0x050E }, /* R4864 - DAC Comp 1 */
{ 0x00001302, 0x0101 }, /* R4866 - DAC Comp 2 */
{ 0x00001380, 0x0425 },
{ 0x00001381, 0xF6D8 },
{ 0x00001382, 0x0632 },
{ 0x00001383, 0xFEC8 },
{ 0x00001390, 0x042F },
{ 0x00001391, 0xF6CA },
{ 0x00001392, 0x0637 },
{ 0x00001393, 0xFEC8 },
{ 0x000013a0, 0x0000 },
{ 0x000013a1, 0x0000 },
{ 0x000013a2, 0x0000 },
{ 0x000013a3, 0x0000 },
{ 0x000013b0, 0x0000 },
{ 0x000013b1, 0x0000 },
{ 0x000013b2, 0x0000 },
{ 0x000013b3, 0x0000 },
{ 0x000013c0, 0x0000 },
{ 0x000013c1, 0x0000 },
{ 0x000013c2, 0x0000 },
{ 0x000013c3, 0x0000 },
{ 0x000013d0, 0x0000 },
{ 0x000013d1, 0x0000 },
{ 0x000013d2, 0x0000 },
{ 0x000013d3, 0x0000 },
{ 0x00001400, 0x0000 },
{ 0x00001401, 0x0000 },
{ 0x00001402, 0x0000 },
{ 0x00001403, 0x0000 },
{ 0x00001410, 0x0000 },
{ 0x00001411, 0x0000 },
{ 0x00001412, 0x0000 },
{ 0x00001413, 0x0000 },
{ 0x00001480, 0x0000 }, /*R5248 - DFC1_CTRL*/
{ 0x00001482, 0x1F00 }, /*R5250 - DFC1_RX*/
{ 0x00001484, 0x1F00 }, /*R5252 - DFC1_TX*/
{ 0x00001486, 0x0000 },
{ 0x00001488, 0x1F00 },
{ 0x0000148A, 0x1F00 },
{ 0x0000148C, 0x0000 },
{ 0x0000148E, 0x1F00 },
{ 0x00001490, 0x1F00 },
{ 0x00001492, 0x0000 },
{ 0x00001494, 0x1F00 },
{ 0x00001496, 0x1F00 },
{ 0x00001498, 0x0000 },
{ 0x0000149A, 0x1F00 },
{ 0x0000149C, 0x1F00 },
{ 0x0000149E, 0x0000 },
{ 0x000014A0, 0x1F00 },
{ 0x000014A2, 0x1F00 },
{ 0x000014A4, 0x0000 },
{ 0x000014A6, 0x1F00 },
{ 0x000014A8, 0x1F00 },
{ 0x000014AA, 0x0000 },
{ 0x000014AC, 0x1F00 },
{ 0x000014AE, 0x1F00 }, /*R5294 - DFC8_TX */
{ 0x00001701, 0xF000 }, /* R5889 - GPIO1 Control 2 */
{ 0x00001703, 0xF000 }, /* R5891 - GPIO2 Control 2 */
{ 0x00001705, 0xF000 }, /* R5893 - GPIO3 Control 2 */
{ 0x00001707, 0xF000 }, /* R5895 - GPIO4 Control 2 */
{ 0x00001709, 0xF000 }, /* R5897 - GPIO5 Control 2 */
{ 0x0000170B, 0xF000 }, /* R5899 - GPIO6 Control 2 */
{ 0x0000170D, 0xF000 }, /* R5901 - GPIO7 Control 2 */
{ 0x0000170F, 0xF000 }, /* R5903 - GPIO8 Control 2 */
{ 0x00001711, 0xF000 }, /* R5905 - GPIO9 Control 2 */
{ 0x00001713, 0xF000 }, /* R5907 - GPIO10 Control 2 */
{ 0x00001715, 0xF000 }, /* R5909 - GPIO11 Control 2 */
{ 0x00001717, 0xF000 }, /* R5911 - GPIO12 Control 2 */
{ 0x00001719, 0xF000 }, /* R5913 - GPIO13 Control 2 */
{ 0x0000171B, 0xF000 }, /* R5915 - GPIO14 Control 2 */
{ 0x0000171D, 0xF000 }, /* R5917 - GPIO15 Control 2 */
{ 0x0000171F, 0xF000 }, /* R5919 - GPIO16 Control 2 */
{ 0x00001721, 0xF000 }, /* R5921 - GPIO17 Control 2 */
{ 0x00001723, 0xF000 }, /* R5923 - GPIO18 Control 2 */
{ 0x00001725, 0xF000 }, /* R5925 - GPIO19 Control 2 */
{ 0x00001727, 0xF000 }, /* R5927 - GPIO20 Control 2 */
{ 0x00001729, 0xF000 }, /* R5929 - GPIO21 Control 2 */
{ 0x0000172B, 0xF000 }, /* R5931 - GPIO22 Control 2 */
{ 0x0000172D, 0xF000 }, /* R5933 - GPIO23 Control 2 */
{ 0x0000172F, 0xF000 }, /* R5935 - GPIO24 Control 2 */
{ 0x00001731, 0xF000 }, /* R5937 - GPIO25 Control 2 */
{ 0x00001733, 0xF000 }, /* R5939 - GPIO26 Control 2 */
{ 0x00001735, 0xF000 }, /* R5941 - GPIO27 Control 2 */
{ 0x00001737, 0xF000 }, /* R5943 - GPIO28 Control 2 */
{ 0x00001739, 0xF000 }, /* R5945 - GPIO29 Control 2 */
{ 0x0000173B, 0xF000 }, /* R5947 - GPIO30 Control 2 */
{ 0x0000173D, 0xF000 }, /* R5949 - GPIO31 Control 2 */
{ 0x0000173F, 0xF000 }, /* R5951 - GPIO32 Control 2 */
{ 0x00001741, 0xF000 }, /* R5953 - GPIO33 Control 2 */
{ 0x00001743, 0xF000 }, /* R5955 - GPIO34 Control 2 */
{ 0x00001745, 0xF000 }, /* R5957 - GPIO35 Control 2 */
{ 0x00001747, 0xF000 }, /* R5959 - GPIO36 Control 2 */
{ 0x00001749, 0xF000 }, /* R5961 - GPIO37 Control 2 */
{ 0x0000174B, 0xF000 }, /* R5963 - GPIO38 Control 2 */
{ 0x00001840, 0x9200 }, /* R6208 - IRQ1 Mask 1 */
{ 0x00001841, 0xFB00 }, /* R6209 - IRQ1 Mask 2 */
{ 0x00001842, 0xFFFF }, /* R6210 - IRQ1 Mask 3 */
{ 0x00001843, 0xFFFF }, /* R6211 - IRQ1 Mask 4 */
{ 0x00001844, 0xFFFF }, /* R6212 - IRQ1 Mask 5 */
{ 0x00001845, 0x0301 }, /* R6213 - IRQ1 Mask 6 */
{ 0x00001846, 0x003F }, /* R6214 - IRQ1 Mask 7 */
{ 0x00001847, 0xFFFF }, /* R6215 - IRQ1 Mask 8 */
{ 0x00001848, 0x0F07 }, /* R6216 - IRQ1 Mask 9 */
{ 0x00001849, 0xFFFF }, /* R6217 - IRQ1 Mask 10 */
{ 0x0000184A, 0xFFFF }, /* R6218 - IRQ1 Mask 11 */
{ 0x0000184B, 0x003F }, /* R6219 - IRQ1 Mask 12 */
{ 0x0000184C, 0x003F }, /* R6220 - IRQ1 Mask 13 */
{ 0x0000184D, 0x003F }, /* R6221 - IRQ1 Mask 14 */
{ 0x0000184E, 0xFFFF }, /* R6222 - IRQ1 Mask 15 */
{ 0x0000184F, 0xFFFF }, /* R6223 - IRQ1 Mask 16 */
{ 0x00001850, 0xFFFF }, /* R6224 - IRQ1 Mask 17 */
{ 0x00001851, 0xFFFF }, /* R6225 - IRQ1 Mask 18 */
{ 0x00001852, 0x003F }, /* R6226 - IRQ1 Mask 19 */
{ 0x00001853, 0xFFFF }, /* R6227 - IRQ1 Mask 20 */
{ 0x00001854, 0x00FF }, /* R6228 - IRQ1 Mask 21 */
{ 0x00001855, 0x00FF }, /* R6229 - IRQ1 Mask 22 */
{ 0x00001856, 0x00FF }, /* R6230 - IRQ1 Mask 23 */
{ 0x00001857, 0x00FF }, /* R6231 - IRQ1 Mask 24 */
{ 0x00001858, 0x007F }, /* R6232 - IRQ1 Mask 25 */
{ 0x00001859, 0xFFFF }, /* R6233 - IRQ1 Mask 26 */
{ 0x0000185A, 0x007F }, /* R6234 - IRQ1 Mask 27 */
{ 0x0000185B, 0x007F }, /* R6235 - IRQ1 Mask 28 */
{ 0x0000185C, 0xFFFF }, /* R6236 - IRQ1 Mask 29 */
{ 0x0000185D, 0x007F }, /* R6237 - IRQ1 Mask 30 */
{ 0x0000185E, 0x0007 }, /* R6238 - IRQ1 Mask 31 */
{ 0x0000185F, 0x0007 }, /* R6239 - IRQ1 Mask 32 */
{ 0x00001860, 0x007F }, /* R6240 - IRQ1 Mask 33 */
{ 0x00001948, 0x0F07 }, /* R6472 - IRQ2 Mask 9 */
{ 0x00001A06, 0x0000 }, /* R6662 - Interrupt Debounce 7 */
{ 0x00001A80, 0x4400 }, /* R6784 - IRQ1 CTRL */
};
static bool moon_is_adsp_memory(struct device *dev, unsigned int reg)
{
if ((reg >= 0x080000 && reg <= 0x088ffe) ||
(reg >= 0x0a0000 && reg <= 0x0a9ffe) ||
(reg >= 0x0c0000 && reg <= 0x0c3ffe) ||
(reg >= 0x0e0000 && reg <= 0x0e1ffe) ||
(reg >= 0x100000 && reg <= 0x10effe) ||
(reg >= 0x120000 && reg <= 0x12bffe) ||
(reg >= 0x136000 && reg <= 0x137ffe) ||
(reg >= 0x140000 && reg <= 0x14bffe) ||
(reg >= 0x160000 && reg <= 0x161ffe) ||
(reg >= 0x180000 && reg <= 0x18effe) ||
(reg >= 0x1a0000 && reg <= 0x1b1ffe) ||
(reg >= 0x1b6000 && reg <= 0x1b7ffe) ||
(reg >= 0x1c0000 && reg <= 0x1cbffe) ||
(reg >= 0x1e0000 && reg <= 0x1e1ffe) ||
(reg >= 0x200000 && reg <= 0x208ffe) ||
(reg >= 0x220000 && reg <= 0x229ffe) ||
(reg >= 0x240000 && reg <= 0x243ffe) ||
(reg >= 0x260000 && reg <= 0x261ffe) ||
(reg >= 0x280000 && reg <= 0x288ffe) ||
(reg >= 0x2a0000 && reg <= 0x2a9ffe) ||
(reg >= 0x2c0000 && reg <= 0x2c3ffe) ||
(reg >= 0x2e0000 && reg <= 0x2e1ffe) ||
(reg >= 0x300000 && reg <= 0x308ffe) ||
(reg >= 0x320000 && reg <= 0x333ffe) ||
(reg >= 0x340000 && reg <= 0x353ffe) ||
(reg >= 0x360000 && reg <= 0x361ffe) ||
(reg >= 0x380000 && reg <= 0x388ffe) ||
(reg >= 0x3a0000 && reg <= 0x3b3ffe) ||
(reg >= 0x3c0000 && reg <= 0x3d3ffe) ||
(reg >= 0x3e0000 && reg <= 0x3e1ffe))
return true;
else
return false;
}
static bool moon_16bit_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_CTRL_IF_SPI_CFG_1:
case ARIZONA_CTRL_IF_I2C1_CFG_1:
case ARIZONA_CTRL_IF_I2C2_CFG_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_0:
case ARIZONA_WRITE_SEQUENCER_CTRL_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_2:
case ARIZONA_TONE_GENERATOR_1:
case ARIZONA_TONE_GENERATOR_2:
case ARIZONA_TONE_GENERATOR_3:
case ARIZONA_TONE_GENERATOR_4:
case ARIZONA_TONE_GENERATOR_5:
case ARIZONA_PWM_DRIVE_1:
case ARIZONA_PWM_DRIVE_2:
case ARIZONA_PWM_DRIVE_3:
case ARIZONA_SEQUENCE_CONTROL:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
case ARIZONA_HAPTICS_CONTROL_1:
case ARIZONA_HAPTICS_CONTROL_2:
case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
case ARIZONA_HAPTICS_PHASE_1_DURATION:
case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
case ARIZONA_HAPTICS_PHASE_2_DURATION:
case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
case ARIZONA_HAPTICS_PHASE_3_DURATION:
case ARIZONA_HAPTICS_STATUS:
case CLEARWATER_COMFORT_NOISE_GENERATOR:
case ARIZONA_CLOCK_32K_1:
case ARIZONA_SYSTEM_CLOCK_1:
case ARIZONA_SAMPLE_RATE_1:
case ARIZONA_SAMPLE_RATE_2:
case ARIZONA_SAMPLE_RATE_3:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_ASYNC_CLOCK_1:
case ARIZONA_ASYNC_SAMPLE_RATE_1:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_2:
case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
case CLEARWATER_DSP_CLOCK_1:
case CLEARWATER_DSP_CLOCK_2:
case ARIZONA_OUTPUT_SYSTEM_CLOCK:
case ARIZONA_OUTPUT_ASYNC_CLOCK:
case ARIZONA_RATE_ESTIMATOR_1:
case ARIZONA_RATE_ESTIMATOR_2:
case ARIZONA_RATE_ESTIMATOR_3:
case ARIZONA_RATE_ESTIMATOR_4:
case ARIZONA_RATE_ESTIMATOR_5:
case ARIZONA_FLL1_CONTROL_1:
case ARIZONA_FLL1_CONTROL_2:
case ARIZONA_FLL1_CONTROL_3:
case ARIZONA_FLL1_CONTROL_4:
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
case ARIZONA_FLL1_EFS_2:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
case ARIZONA_FLL1_SYNCHRONISER_4:
case ARIZONA_FLL1_SYNCHRONISER_5:
case ARIZONA_FLL1_SYNCHRONISER_6:
case ARIZONA_FLL1_SYNCHRONISER_7:
case ARIZONA_FLL1_SPREAD_SPECTRUM:
case ARIZONA_FLL1_GPIO_CLOCK:
case ARIZONA_FLL2_CONTROL_1:
case ARIZONA_FLL2_CONTROL_2:
case ARIZONA_FLL2_CONTROL_3:
case ARIZONA_FLL2_CONTROL_4:
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
case ARIZONA_FLL2_EFS_2:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
case ARIZONA_FLL2_SYNCHRONISER_4:
case ARIZONA_FLL2_SYNCHRONISER_5:
case ARIZONA_FLL2_SYNCHRONISER_6:
case ARIZONA_FLL2_SYNCHRONISER_7:
case ARIZONA_FLL2_SPREAD_SPECTRUM:
case ARIZONA_FLL2_GPIO_CLOCK:
case MOON_FLLAO_CONTROL_1:
case MOON_FLLAO_CONTROL_2:
case MOON_FLLAO_CONTROL_3:
case MOON_FLLAO_CONTROL_4:
case MOON_FLLAO_CONTROL_5:
case MOON_FLLAO_CONTROL_6:
case MOON_FLLAO_CONTROL_7:
case MOON_FLLAO_CONTROL_8:
case MOON_FLLAO_CONTROL_9:
case MOON_FLLAO_CONTROL_10:
case MOON_FLLAO_CONTROL_11:
case ARIZONA_MIC_CHARGE_PUMP_1:
case ARIZONA_LDO2_CONTROL_1:
case ARIZONA_MIC_BIAS_CTRL_1:
case ARIZONA_MIC_BIAS_CTRL_2:
case ARIZONA_MIC_BIAS_CTRL_5:
case ARIZONA_MIC_BIAS_CTRL_6:
case ARIZONA_HP_CTRL_1L:
case ARIZONA_HP_CTRL_1R:
case ARIZONA_HP_CTRL_2L:
case ARIZONA_HP_CTRL_2R:
case ARIZONA_HP_CTRL_3L:
case ARIZONA_HP_CTRL_3R:
case CLEARWATER_EDRE_HP_STEREO_CONTROL:
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case MOON_HEADPHONE_DETECT_0:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HEADPHONE_DETECT_3:
case ARIZONA_HP_DACVAL:
case CLEARWATER_MICD_CLAMP_CONTROL:
case MOON_MIC_DETECT_0:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
case ARIZONA_MIC_DETECT_4:
case ARIZONA_MIC_DETECT_LEVEL_1:
case ARIZONA_MIC_DETECT_LEVEL_2:
case ARIZONA_MIC_DETECT_LEVEL_3:
case ARIZONA_MIC_DETECT_LEVEL_4:
case MOON_MICDET2_CONTROL_0:
case MOON_MICDET2_CONTROL_1:
case MOON_MICDET2_CONTROL_2:
case MOON_MICDET2_CONTROL_3:
case MOON_MICDET2_CONTROL_4:
case MOON_MICDET2_LEVEL_1:
case MOON_MICDET2_LEVEL_2:
case MOON_MICDET2_LEVEL_3:
case MOON_MICDET2_LEVEL_4:
case CLEARWATER_GP_SWITCH_1:
case ARIZONA_JACK_DETECT_ANALOGUE:
case ARIZONA_INPUT_ENABLES:
case ARIZONA_INPUT_ENABLES_STATUS:
case ARIZONA_INPUT_RATE:
case ARIZONA_INPUT_VOLUME_RAMP:
case ARIZONA_HPF_CONTROL:
case ARIZONA_IN1L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_1L:
case ARIZONA_DMIC1L_CONTROL:
case MOON_IN1L_RATE_CONTROL:
case ARIZONA_IN1R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_1R:
case ARIZONA_DMIC1R_CONTROL:
case MOON_IN1R_RATE_CONTROL:
case ARIZONA_IN2L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_2L:
case ARIZONA_DMIC2L_CONTROL:
case MOON_IN2L_RATE_CONTROL:
case ARIZONA_IN2R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_2R:
case ARIZONA_DMIC2R_CONTROL:
case MOON_IN2R_RATE_CONTROL:
case ARIZONA_IN3L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_3L:
case ARIZONA_DMIC3L_CONTROL:
case MOON_IN3L_RATE_CONTROL:
case ARIZONA_IN3R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_3R:
case ARIZONA_DMIC3R_CONTROL:
case MOON_IN3R_RATE_CONTROL:
case ARIZONA_IN4L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_4L:
case ARIZONA_DMIC4L_CONTROL:
case MOON_IN4L_RATE_CONTROL:
case ARIZONA_IN4R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_4R:
case ARIZONA_DMIC4R_CONTROL:
case MOON_IN4R_RATE_CONTROL:
case ARIZONA_IN5L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_5L:
case ARIZONA_DMIC5L_CONTROL:
case MOON_IN5L_RATE_CONTROL:
case ARIZONA_IN5R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_5R:
case ARIZONA_DMIC5R_CONTROL:
case MOON_IN5R_RATE_CONTROL:
case ARIZONA_OUTPUT_ENABLES_1:
case ARIZONA_OUTPUT_STATUS_1:
case ARIZONA_RAW_OUTPUT_STATUS_1:
case ARIZONA_OUTPUT_RATE_1:
case ARIZONA_OUTPUT_VOLUME_RAMP:
case ARIZONA_OUTPUT_PATH_CONFIG_1L:
case ARIZONA_DAC_DIGITAL_VOLUME_1L:
case MOON_OUT1_CONFIG:
case ARIZONA_NOISE_GATE_SELECT_1L:
case ARIZONA_OUTPUT_PATH_CONFIG_1R:
case ARIZONA_DAC_DIGITAL_VOLUME_1R:
case ARIZONA_NOISE_GATE_SELECT_1R:
case ARIZONA_OUTPUT_PATH_CONFIG_2L:
case ARIZONA_DAC_DIGITAL_VOLUME_2L:
case MOON_OUT2_CONFIG:
case ARIZONA_NOISE_GATE_SELECT_2L:
case ARIZONA_OUTPUT_PATH_CONFIG_2R:
case ARIZONA_DAC_DIGITAL_VOLUME_2R:
case ARIZONA_NOISE_GATE_SELECT_2R:
case ARIZONA_OUTPUT_PATH_CONFIG_3L:
case ARIZONA_DAC_DIGITAL_VOLUME_3L:
case ARIZONA_NOISE_GATE_SELECT_3L:
case ARIZONA_OUTPUT_PATH_CONFIG_3R:
case ARIZONA_DAC_DIGITAL_VOLUME_3R:
case ARIZONA_NOISE_GATE_SELECT_3R:
case ARIZONA_OUTPUT_PATH_CONFIG_5L:
case ARIZONA_DAC_DIGITAL_VOLUME_5L:
case ARIZONA_NOISE_GATE_SELECT_5L:
case ARIZONA_OUTPUT_PATH_CONFIG_5R:
case ARIZONA_DAC_DIGITAL_VOLUME_5R:
case ARIZONA_NOISE_GATE_SELECT_5R:
case ARIZONA_DRE_ENABLE:
case CLEARWATER_EDRE_ENABLE:
case ARIZONA_DAC_AEC_CONTROL_1:
case ARIZONA_NOISE_GATE_CONTROL:
case ARIZONA_PDM_SPK1_CTRL_1:
case ARIZONA_PDM_SPK1_CTRL_2:
case ARIZONA_HP1_SHORT_CIRCUIT_CTRL:
case ARIZONA_HP2_SHORT_CIRCUIT_CTRL:
case ARIZONA_HP3_SHORT_CIRCUIT_CTRL:
case ARIZONA_HP_TEST_CTRL_5:
case ARIZONA_HP_TEST_CTRL_6:
case ARIZONA_AIF1_BCLK_CTRL:
case ARIZONA_AIF1_TX_PIN_CTRL:
case ARIZONA_AIF1_RX_PIN_CTRL:
case ARIZONA_AIF1_RATE_CTRL:
case ARIZONA_AIF1_FORMAT:
case ARIZONA_AIF1_TX_BCLK_RATE:
case ARIZONA_AIF1_RX_BCLK_RATE:
case ARIZONA_AIF1_FRAME_CTRL_1:
case ARIZONA_AIF1_FRAME_CTRL_2:
case ARIZONA_AIF1_FRAME_CTRL_3:
case ARIZONA_AIF1_FRAME_CTRL_4:
case ARIZONA_AIF1_FRAME_CTRL_5:
case ARIZONA_AIF1_FRAME_CTRL_6:
case ARIZONA_AIF1_FRAME_CTRL_7:
case ARIZONA_AIF1_FRAME_CTRL_8:
case ARIZONA_AIF1_FRAME_CTRL_9:
case ARIZONA_AIF1_FRAME_CTRL_10:
case ARIZONA_AIF1_FRAME_CTRL_11:
case ARIZONA_AIF1_FRAME_CTRL_12:
case ARIZONA_AIF1_FRAME_CTRL_13:
case ARIZONA_AIF1_FRAME_CTRL_14:
case ARIZONA_AIF1_FRAME_CTRL_15:
case ARIZONA_AIF1_FRAME_CTRL_16:
case ARIZONA_AIF1_FRAME_CTRL_17:
case ARIZONA_AIF1_FRAME_CTRL_18:
case ARIZONA_AIF1_TX_ENABLES:
case ARIZONA_AIF1_RX_ENABLES:
case ARIZONA_AIF2_BCLK_CTRL:
case ARIZONA_AIF2_TX_PIN_CTRL:
case ARIZONA_AIF2_RX_PIN_CTRL:
case ARIZONA_AIF2_RATE_CTRL:
case ARIZONA_AIF2_FORMAT:
case ARIZONA_AIF2_TX_BCLK_RATE:
case ARIZONA_AIF2_RX_BCLK_RATE:
case ARIZONA_AIF2_FRAME_CTRL_1:
case ARIZONA_AIF2_FRAME_CTRL_2:
case ARIZONA_AIF2_FRAME_CTRL_3:
case ARIZONA_AIF2_FRAME_CTRL_4:
case ARIZONA_AIF2_FRAME_CTRL_5:
case ARIZONA_AIF2_FRAME_CTRL_6:
case ARIZONA_AIF2_FRAME_CTRL_7:
case ARIZONA_AIF2_FRAME_CTRL_8:
case ARIZONA_AIF2_FRAME_CTRL_9:
case ARIZONA_AIF2_FRAME_CTRL_10:
case ARIZONA_AIF2_FRAME_CTRL_11:
case ARIZONA_AIF2_FRAME_CTRL_12:
case ARIZONA_AIF2_FRAME_CTRL_13:
case ARIZONA_AIF2_FRAME_CTRL_14:
case ARIZONA_AIF2_FRAME_CTRL_15:
case ARIZONA_AIF2_FRAME_CTRL_16:
case ARIZONA_AIF2_FRAME_CTRL_17:
case ARIZONA_AIF2_FRAME_CTRL_18:
case ARIZONA_AIF2_TX_ENABLES:
case ARIZONA_AIF2_RX_ENABLES:
case ARIZONA_AIF3_BCLK_CTRL:
case ARIZONA_AIF3_TX_PIN_CTRL:
case ARIZONA_AIF3_RX_PIN_CTRL:
case ARIZONA_AIF3_RATE_CTRL:
case ARIZONA_AIF3_FORMAT:
case ARIZONA_AIF3_TX_BCLK_RATE:
case ARIZONA_AIF3_RX_BCLK_RATE:
case ARIZONA_AIF3_FRAME_CTRL_1:
case ARIZONA_AIF3_FRAME_CTRL_2:
case ARIZONA_AIF3_FRAME_CTRL_3:
case ARIZONA_AIF3_FRAME_CTRL_4:
case ARIZONA_AIF3_FRAME_CTRL_11:
case ARIZONA_AIF3_FRAME_CTRL_12:
case ARIZONA_AIF3_TX_ENABLES:
case ARIZONA_AIF3_RX_ENABLES:
case ARIZONA_AIF4_BCLK_CTRL:
case ARIZONA_AIF4_TX_PIN_CTRL:
case ARIZONA_AIF4_RX_PIN_CTRL:
case ARIZONA_AIF4_RATE_CTRL:
case ARIZONA_AIF4_FORMAT:
case ARIZONA_AIF4_TX_BCLK_RATE:
case ARIZONA_AIF4_RX_BCLK_RATE:
case ARIZONA_AIF4_FRAME_CTRL_1:
case ARIZONA_AIF4_FRAME_CTRL_2:
case ARIZONA_AIF4_FRAME_CTRL_3:
case ARIZONA_AIF4_FRAME_CTRL_4:
case ARIZONA_AIF4_FRAME_CTRL_11:
case ARIZONA_AIF4_FRAME_CTRL_12:
case ARIZONA_AIF4_TX_ENABLES:
case ARIZONA_AIF4_RX_ENABLES:
case ARIZONA_SPD1_TX_CONTROL:
case ARIZONA_SPD1_TX_CHANNEL_STATUS_1:
case ARIZONA_SPD1_TX_CHANNEL_STATUS_2:
case ARIZONA_SPD1_TX_CHANNEL_STATUS_3:
case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
case ARIZONA_SLIMBUS_RATES_1:
case ARIZONA_SLIMBUS_RATES_2:
case ARIZONA_SLIMBUS_RATES_3:
case ARIZONA_SLIMBUS_RATES_4:
case ARIZONA_SLIMBUS_RATES_5:
case ARIZONA_SLIMBUS_RATES_6:
case ARIZONA_SLIMBUS_RATES_7:
case ARIZONA_SLIMBUS_RATES_8:
case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
case ARIZONA_SLIMBUS_RX_PORT_STATUS:
case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT3RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT3RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT3RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT3RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT3RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT3RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT3RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT3RMIX_INPUT_4_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX7MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX7MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX7MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX7MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX7MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX7MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX7MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX7MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX8MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX8MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX8MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX8MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX8MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX8MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX8MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX8MIX_INPUT_4_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
case ARIZONA_AIF4TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF4TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF4TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF4TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF4TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF4TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF4TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF4TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF4TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF4TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF4TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF4TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF4TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF4TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF4TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF4TX2MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME:
case ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE:
case ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME:
case ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE:
case ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME:
case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
case ARIZONA_EQ1MIX_INPUT_2_SOURCE:
case ARIZONA_EQ1MIX_INPUT_2_VOLUME:
case ARIZONA_EQ1MIX_INPUT_3_SOURCE:
case ARIZONA_EQ1MIX_INPUT_3_VOLUME:
case ARIZONA_EQ1MIX_INPUT_4_SOURCE:
case ARIZONA_EQ1MIX_INPUT_4_VOLUME:
case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
case ARIZONA_EQ2MIX_INPUT_2_SOURCE:
case ARIZONA_EQ2MIX_INPUT_2_VOLUME:
case ARIZONA_EQ2MIX_INPUT_3_SOURCE:
case ARIZONA_EQ2MIX_INPUT_3_VOLUME:
case ARIZONA_EQ2MIX_INPUT_4_SOURCE:
case ARIZONA_EQ2MIX_INPUT_4_VOLUME:
case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
case ARIZONA_EQ3MIX_INPUT_2_SOURCE:
case ARIZONA_EQ3MIX_INPUT_2_VOLUME:
case ARIZONA_EQ3MIX_INPUT_3_SOURCE:
case ARIZONA_EQ3MIX_INPUT_3_VOLUME:
case ARIZONA_EQ3MIX_INPUT_4_SOURCE:
case ARIZONA_EQ3MIX_INPUT_4_VOLUME:
case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
case ARIZONA_EQ4MIX_INPUT_2_SOURCE:
case ARIZONA_EQ4MIX_INPUT_2_VOLUME:
case ARIZONA_EQ4MIX_INPUT_3_SOURCE:
case ARIZONA_EQ4MIX_INPUT_3_VOLUME:
case ARIZONA_EQ4MIX_INPUT_4_SOURCE:
case ARIZONA_EQ4MIX_INPUT_4_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_2_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_2_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_3_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_3_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_4_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_4_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_2_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_2_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_3_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_1_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_1_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_2_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_2_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_3_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_3_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_4_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_4_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_1_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_1_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_2_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_2_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_3_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_3_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_4_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_4_VOLUME:
case ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE:
case ARIZONA_DSP2LMIX_INPUT_1_SOURCE:
case ARIZONA_DSP2LMIX_INPUT_1_VOLUME:
case ARIZONA_DSP2LMIX_INPUT_2_SOURCE:
case ARIZONA_DSP2LMIX_INPUT_2_VOLUME:
case ARIZONA_DSP2LMIX_INPUT_3_SOURCE:
case ARIZONA_DSP2LMIX_INPUT_3_VOLUME:
case ARIZONA_DSP2LMIX_INPUT_4_SOURCE:
case ARIZONA_DSP2LMIX_INPUT_4_VOLUME:
case ARIZONA_DSP2RMIX_INPUT_1_SOURCE:
case ARIZONA_DSP2RMIX_INPUT_1_VOLUME:
case ARIZONA_DSP2RMIX_INPUT_2_SOURCE:
case ARIZONA_DSP2RMIX_INPUT_2_VOLUME:
case ARIZONA_DSP2RMIX_INPUT_3_SOURCE:
case ARIZONA_DSP2RMIX_INPUT_3_VOLUME:
case ARIZONA_DSP2RMIX_INPUT_4_SOURCE:
case ARIZONA_DSP2RMIX_INPUT_4_VOLUME:
case ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE:
case ARIZONA_DSP2AUX2MIX_INPUT_1_SOURCE:
case ARIZONA_DSP2AUX3MIX_INPUT_1_SOURCE:
case ARIZONA_DSP2AUX4MIX_INPUT_1_SOURCE:
case ARIZONA_DSP2AUX5MIX_INPUT_1_SOURCE:
case ARIZONA_DSP2AUX6MIX_INPUT_1_SOURCE:
case ARIZONA_DSP3LMIX_INPUT_1_SOURCE:
case ARIZONA_DSP3LMIX_INPUT_1_VOLUME:
case ARIZONA_DSP3LMIX_INPUT_2_SOURCE:
case ARIZONA_DSP3LMIX_INPUT_2_VOLUME:
case ARIZONA_DSP3LMIX_INPUT_3_SOURCE:
case ARIZONA_DSP3LMIX_INPUT_3_VOLUME:
case ARIZONA_DSP3LMIX_INPUT_4_SOURCE:
case ARIZONA_DSP3LMIX_INPUT_4_VOLUME:
case ARIZONA_DSP3RMIX_INPUT_1_SOURCE:
case ARIZONA_DSP3RMIX_INPUT_1_VOLUME:
case ARIZONA_DSP3RMIX_INPUT_2_SOURCE:
case ARIZONA_DSP3RMIX_INPUT_2_VOLUME:
case ARIZONA_DSP3RMIX_INPUT_3_SOURCE:
case ARIZONA_DSP3RMIX_INPUT_3_VOLUME:
case ARIZONA_DSP3RMIX_INPUT_4_SOURCE:
case ARIZONA_DSP3RMIX_INPUT_4_VOLUME:
case ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE:
case ARIZONA_DSP3AUX2MIX_INPUT_1_SOURCE:
case ARIZONA_DSP3AUX3MIX_INPUT_1_SOURCE:
case ARIZONA_DSP3AUX4MIX_INPUT_1_SOURCE:
case ARIZONA_DSP3AUX5MIX_INPUT_1_SOURCE:
case ARIZONA_DSP3AUX6MIX_INPUT_1_SOURCE:
case ARIZONA_DSP4LMIX_INPUT_1_SOURCE:
case ARIZONA_DSP4LMIX_INPUT_1_VOLUME:
case ARIZONA_DSP4LMIX_INPUT_2_SOURCE:
case ARIZONA_DSP4LMIX_INPUT_2_VOLUME:
case ARIZONA_DSP4LMIX_INPUT_3_SOURCE:
case ARIZONA_DSP4LMIX_INPUT_3_VOLUME:
case ARIZONA_DSP4LMIX_INPUT_4_SOURCE:
case ARIZONA_DSP4LMIX_INPUT_4_VOLUME:
case ARIZONA_DSP4RMIX_INPUT_1_SOURCE:
case ARIZONA_DSP4RMIX_INPUT_1_VOLUME:
case ARIZONA_DSP4RMIX_INPUT_2_SOURCE:
case ARIZONA_DSP4RMIX_INPUT_2_VOLUME:
case ARIZONA_DSP4RMIX_INPUT_3_SOURCE:
case ARIZONA_DSP4RMIX_INPUT_3_VOLUME:
case ARIZONA_DSP4RMIX_INPUT_4_SOURCE:
case ARIZONA_DSP4RMIX_INPUT_4_VOLUME:
case ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE:
case ARIZONA_DSP4AUX2MIX_INPUT_1_SOURCE:
case ARIZONA_DSP4AUX3MIX_INPUT_1_SOURCE:
case ARIZONA_DSP4AUX4MIX_INPUT_1_SOURCE:
case ARIZONA_DSP4AUX5MIX_INPUT_1_SOURCE:
case ARIZONA_DSP4AUX6MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5LMIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5LMIX_INPUT_1_VOLUME:
case CLEARWATER_DSP5LMIX_INPUT_2_SOURCE:
case CLEARWATER_DSP5LMIX_INPUT_2_VOLUME:
case CLEARWATER_DSP5LMIX_INPUT_3_SOURCE:
case CLEARWATER_DSP5LMIX_INPUT_3_VOLUME:
case CLEARWATER_DSP5LMIX_INPUT_4_SOURCE:
case CLEARWATER_DSP5LMIX_INPUT_4_VOLUME:
case CLEARWATER_DSP5RMIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5RMIX_INPUT_1_VOLUME:
case CLEARWATER_DSP5RMIX_INPUT_2_SOURCE:
case CLEARWATER_DSP5RMIX_INPUT_2_VOLUME:
case CLEARWATER_DSP5RMIX_INPUT_3_SOURCE:
case CLEARWATER_DSP5RMIX_INPUT_3_VOLUME:
case CLEARWATER_DSP5RMIX_INPUT_4_SOURCE:
case CLEARWATER_DSP5RMIX_INPUT_4_VOLUME:
case CLEARWATER_DSP5AUX1MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5AUX2MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5AUX3MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5AUX4MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5AUX5MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP5AUX6MIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC1_1LMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC1_1RMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC1_2LMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC1_2RMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC2_1LMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC2_1RMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC2_2LMIX_INPUT_1_SOURCE:
case CLEARWATER_ASRC2_2RMIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC4DEC1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC4DEC2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC4INT1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC4INT2MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6LMIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6LMIX_INPUT_1_VOLUME:
case CLEARWATER_DSP6LMIX_INPUT_2_SOURCE:
case CLEARWATER_DSP6LMIX_INPUT_2_VOLUME:
case CLEARWATER_DSP6LMIX_INPUT_3_SOURCE:
case CLEARWATER_DSP6LMIX_INPUT_3_VOLUME:
case CLEARWATER_DSP6LMIX_INPUT_4_SOURCE:
case CLEARWATER_DSP6LMIX_INPUT_4_VOLUME:
case CLEARWATER_DSP6RMIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6RMIX_INPUT_1_VOLUME:
case CLEARWATER_DSP6RMIX_INPUT_2_SOURCE:
case CLEARWATER_DSP6RMIX_INPUT_2_VOLUME:
case CLEARWATER_DSP6RMIX_INPUT_3_SOURCE:
case CLEARWATER_DSP6RMIX_INPUT_3_VOLUME:
case CLEARWATER_DSP6RMIX_INPUT_4_SOURCE:
case CLEARWATER_DSP6RMIX_INPUT_4_VOLUME:
case CLEARWATER_DSP6AUX1MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6AUX2MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6AUX3MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6AUX4MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6AUX5MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP6AUX6MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7LMIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7LMIX_INPUT_1_VOLUME:
case CLEARWATER_DSP7LMIX_INPUT_2_SOURCE:
case CLEARWATER_DSP7LMIX_INPUT_2_VOLUME:
case CLEARWATER_DSP7LMIX_INPUT_3_SOURCE:
case CLEARWATER_DSP7LMIX_INPUT_3_VOLUME:
case CLEARWATER_DSP7LMIX_INPUT_4_SOURCE:
case CLEARWATER_DSP7LMIX_INPUT_4_VOLUME:
case CLEARWATER_DSP7RMIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7RMIX_INPUT_1_VOLUME:
case CLEARWATER_DSP7RMIX_INPUT_2_SOURCE:
case CLEARWATER_DSP7RMIX_INPUT_2_VOLUME:
case CLEARWATER_DSP7RMIX_INPUT_3_SOURCE:
case CLEARWATER_DSP7RMIX_INPUT_3_VOLUME:
case CLEARWATER_DSP7RMIX_INPUT_4_SOURCE:
case CLEARWATER_DSP7RMIX_INPUT_4_VOLUME:
case CLEARWATER_DSP7AUX1MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7AUX2MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7AUX3MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7AUX4MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7AUX5MIX_INPUT_1_SOURCE:
case CLEARWATER_DSP7AUX6MIX_INPUT_1_SOURCE:
case MOON_DFC1MIX_INPUT_1_SOURCE:
case MOON_DFC2MIX_INPUT_1_SOURCE:
case MOON_DFC3MIX_INPUT_1_SOURCE:
case MOON_DFC4MIX_INPUT_1_SOURCE:
case MOON_DFC5MIX_INPUT_1_SOURCE:
case MOON_DFC6MIX_INPUT_1_SOURCE:
case MOON_DFC7MIX_INPUT_1_SOURCE:
case MOON_DFC8MIX_INPUT_1_SOURCE:
case ARIZONA_FX_CTRL1:
case ARIZONA_FX_CTRL2:
case ARIZONA_EQ1_1:
case ARIZONA_EQ1_2:
case ARIZONA_EQ1_3:
case ARIZONA_EQ1_4:
case ARIZONA_EQ1_5:
case ARIZONA_EQ1_6:
case ARIZONA_EQ1_7:
case ARIZONA_EQ1_8:
case ARIZONA_EQ1_9:
case ARIZONA_EQ1_10:
case ARIZONA_EQ1_11:
case ARIZONA_EQ1_12:
case ARIZONA_EQ1_13:
case ARIZONA_EQ1_14:
case ARIZONA_EQ1_15:
case ARIZONA_EQ1_16:
case ARIZONA_EQ1_17:
case ARIZONA_EQ1_18:
case ARIZONA_EQ1_19:
case ARIZONA_EQ1_20:
case ARIZONA_EQ1_21:
case ARIZONA_EQ2_1:
case ARIZONA_EQ2_2:
case ARIZONA_EQ2_3:
case ARIZONA_EQ2_4:
case ARIZONA_EQ2_5:
case ARIZONA_EQ2_6:
case ARIZONA_EQ2_7:
case ARIZONA_EQ2_8:
case ARIZONA_EQ2_9:
case ARIZONA_EQ2_10:
case ARIZONA_EQ2_11:
case ARIZONA_EQ2_12:
case ARIZONA_EQ2_13:
case ARIZONA_EQ2_14:
case ARIZONA_EQ2_15:
case ARIZONA_EQ2_16:
case ARIZONA_EQ2_17:
case ARIZONA_EQ2_18:
case ARIZONA_EQ2_19:
case ARIZONA_EQ2_20:
case ARIZONA_EQ2_21:
case ARIZONA_EQ3_1:
case ARIZONA_EQ3_2:
case ARIZONA_EQ3_3:
case ARIZONA_EQ3_4:
case ARIZONA_EQ3_5:
case ARIZONA_EQ3_6:
case ARIZONA_EQ3_7:
case ARIZONA_EQ3_8:
case ARIZONA_EQ3_9:
case ARIZONA_EQ3_10:
case ARIZONA_EQ3_11:
case ARIZONA_EQ3_12:
case ARIZONA_EQ3_13:
case ARIZONA_EQ3_14:
case ARIZONA_EQ3_15:
case ARIZONA_EQ3_16:
case ARIZONA_EQ3_17:
case ARIZONA_EQ3_18:
case ARIZONA_EQ3_19:
case ARIZONA_EQ3_20:
case ARIZONA_EQ3_21:
case ARIZONA_EQ4_1:
case ARIZONA_EQ4_2:
case ARIZONA_EQ4_3:
case ARIZONA_EQ4_4:
case ARIZONA_EQ4_5:
case ARIZONA_EQ4_6:
case ARIZONA_EQ4_7:
case ARIZONA_EQ4_8:
case ARIZONA_EQ4_9:
case ARIZONA_EQ4_10:
case ARIZONA_EQ4_11:
case ARIZONA_EQ4_12:
case ARIZONA_EQ4_13:
case ARIZONA_EQ4_14:
case ARIZONA_EQ4_15:
case ARIZONA_EQ4_16:
case ARIZONA_EQ4_17:
case ARIZONA_EQ4_18:
case ARIZONA_EQ4_19:
case ARIZONA_EQ4_20:
case ARIZONA_EQ4_21:
case ARIZONA_DRC1_CTRL1:
case ARIZONA_DRC1_CTRL2:
case ARIZONA_DRC1_CTRL3:
case ARIZONA_DRC1_CTRL4:
case ARIZONA_DRC1_CTRL5:
case CLEARWATER_DRC2_CTRL1:
case CLEARWATER_DRC2_CTRL2:
case CLEARWATER_DRC2_CTRL3:
case CLEARWATER_DRC2_CTRL4:
case CLEARWATER_DRC2_CTRL5:
case ARIZONA_HPLPF1_1:
case ARIZONA_HPLPF1_2:
case ARIZONA_HPLPF2_1:
case ARIZONA_HPLPF2_2:
case ARIZONA_HPLPF3_1:
case ARIZONA_HPLPF3_2:
case ARIZONA_HPLPF4_1:
case ARIZONA_HPLPF4_2:
case CLEARWATER_ASRC1_ENABLE:
case CLEARWATER_ASRC1_STATUS:
case CLEARWATER_ASRC1_RATE1:
case CLEARWATER_ASRC1_RATE2:
case CLEARWATER_ASRC2_ENABLE:
case CLEARWATER_ASRC2_STATUS:
case CLEARWATER_ASRC2_RATE1:
case CLEARWATER_ASRC2_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:
case ARIZONA_ISRC_2_CTRL_1:
case ARIZONA_ISRC_2_CTRL_2:
case ARIZONA_ISRC_2_CTRL_3:
case ARIZONA_ISRC_3_CTRL_1:
case ARIZONA_ISRC_3_CTRL_2:
case ARIZONA_ISRC_3_CTRL_3:
case ARIZONA_ISRC_4_CTRL_1:
case ARIZONA_ISRC_4_CTRL_2:
case ARIZONA_ISRC_4_CTRL_3:
case ARIZONA_CLOCK_CONTROL:
case ARIZONA_ANC_SRC:
case ARIZONA_DSP_STATUS:
case ARIZONA_ANC_COEFF_START ... ARIZONA_ANC_COEFF_END:
case ARIZONA_FCL_FILTER_CONTROL:
case ARIZONA_FCL_ADC_REFORMATTER_CONTROL:
case ARIZONA_FCL_COEFF_START ... ARIZONA_FCL_COEFF_END:
case CLEARWATER_FCR_FILTER_CONTROL:
case CLEARWATER_FCR_ADC_REFORMATTER_CONTROL:
case CLEARWATER_FCR_COEFF_START ... CLEARWATER_FCR_COEFF_END:
case CLEARWATER_DAC_COMP_1:
case CLEARWATER_DAC_COMP_2:
case CLEARWATER_FRF_COEFFICIENT_1L_1:
case CLEARWATER_FRF_COEFFICIENT_1L_2:
case CLEARWATER_FRF_COEFFICIENT_1L_3:
case CLEARWATER_FRF_COEFFICIENT_1L_4:
case CLEARWATER_FRF_COEFFICIENT_1R_1:
case CLEARWATER_FRF_COEFFICIENT_1R_2:
case CLEARWATER_FRF_COEFFICIENT_1R_3:
case CLEARWATER_FRF_COEFFICIENT_1R_4:
case CLEARWATER_FRF_COEFFICIENT_2L_1:
case CLEARWATER_FRF_COEFFICIENT_2L_2:
case CLEARWATER_FRF_COEFFICIENT_2L_3:
case CLEARWATER_FRF_COEFFICIENT_2L_4:
case CLEARWATER_FRF_COEFFICIENT_2R_1:
case CLEARWATER_FRF_COEFFICIENT_2R_2:
case CLEARWATER_FRF_COEFFICIENT_2R_3:
case CLEARWATER_FRF_COEFFICIENT_2R_4:
case CLEARWATER_FRF_COEFFICIENT_3L_1:
case CLEARWATER_FRF_COEFFICIENT_3L_2:
case CLEARWATER_FRF_COEFFICIENT_3L_3:
case CLEARWATER_FRF_COEFFICIENT_3L_4:
case CLEARWATER_FRF_COEFFICIENT_3R_1:
case CLEARWATER_FRF_COEFFICIENT_3R_2:
case CLEARWATER_FRF_COEFFICIENT_3R_3:
case CLEARWATER_FRF_COEFFICIENT_3R_4:
case CLEARWATER_FRF_COEFFICIENT_5L_1:
case CLEARWATER_FRF_COEFFICIENT_5L_2:
case CLEARWATER_FRF_COEFFICIENT_5L_3:
case CLEARWATER_FRF_COEFFICIENT_5L_4:
case CLEARWATER_FRF_COEFFICIENT_5R_1:
case CLEARWATER_FRF_COEFFICIENT_5R_2:
case CLEARWATER_FRF_COEFFICIENT_5R_3:
case CLEARWATER_FRF_COEFFICIENT_5R_4:
case MOON_DFC1_CTRL:
case MOON_DFC1_RX:
case MOON_DFC1_TX:
case MOON_DFC2_CTRL:
case MOON_DFC2_RX:
case MOON_DFC2_TX:
case MOON_DFC3_CTRL:
case MOON_DFC3_RX:
case MOON_DFC3_TX:
case MOON_DFC4_CTRL:
case MOON_DFC4_RX:
case MOON_DFC4_TX:
case MOON_DFC5_CTRL:
case MOON_DFC5_RX:
case MOON_DFC5_TX:
case MOON_DFC6_CTRL:
case MOON_DFC6_RX:
case MOON_DFC6_TX:
case MOON_DFC7_CTRL:
case MOON_DFC7_RX:
case MOON_DFC7_TX:
case MOON_DFC8_CTRL:
case MOON_DFC8_RX:
case MOON_DFC8_TX:
case MOON_DFC_STATUS:
case CLEARWATER_GPIO1_CTRL_1:
case CLEARWATER_GPIO1_CTRL_2:
case CLEARWATER_GPIO2_CTRL_1:
case CLEARWATER_GPIO2_CTRL_2:
case CLEARWATER_GPIO3_CTRL_1:
case CLEARWATER_GPIO3_CTRL_2:
case CLEARWATER_GPIO4_CTRL_1:
case CLEARWATER_GPIO4_CTRL_2:
case CLEARWATER_GPIO5_CTRL_1:
case CLEARWATER_GPIO5_CTRL_2:
case CLEARWATER_GPIO6_CTRL_1:
case CLEARWATER_GPIO6_CTRL_2:
case CLEARWATER_GPIO7_CTRL_1:
case CLEARWATER_GPIO7_CTRL_2:
case CLEARWATER_GPIO8_CTRL_1:
case CLEARWATER_GPIO8_CTRL_2:
case CLEARWATER_GPIO9_CTRL_1:
case CLEARWATER_GPIO9_CTRL_2:
case CLEARWATER_GPIO10_CTRL_1:
case CLEARWATER_GPIO10_CTRL_2:
case CLEARWATER_GPIO11_CTRL_1:
case CLEARWATER_GPIO11_CTRL_2:
case CLEARWATER_GPIO12_CTRL_1:
case CLEARWATER_GPIO12_CTRL_2:
case CLEARWATER_GPIO13_CTRL_1:
case CLEARWATER_GPIO13_CTRL_2:
case CLEARWATER_GPIO14_CTRL_1:
case CLEARWATER_GPIO14_CTRL_2:
case CLEARWATER_GPIO15_CTRL_1:
case CLEARWATER_GPIO15_CTRL_2:
case CLEARWATER_GPIO16_CTRL_1:
case CLEARWATER_GPIO16_CTRL_2:
case CLEARWATER_GPIO17_CTRL_1:
case CLEARWATER_GPIO17_CTRL_2:
case CLEARWATER_GPIO18_CTRL_1:
case CLEARWATER_GPIO18_CTRL_2:
case CLEARWATER_GPIO19_CTRL_1:
case CLEARWATER_GPIO19_CTRL_2:
case CLEARWATER_GPIO20_CTRL_1:
case CLEARWATER_GPIO20_CTRL_2:
case CLEARWATER_GPIO21_CTRL_1:
case CLEARWATER_GPIO21_CTRL_2:
case CLEARWATER_GPIO22_CTRL_1:
case CLEARWATER_GPIO22_CTRL_2:
case CLEARWATER_GPIO23_CTRL_1:
case CLEARWATER_GPIO23_CTRL_2:
case CLEARWATER_GPIO24_CTRL_1:
case CLEARWATER_GPIO24_CTRL_2:
case CLEARWATER_GPIO25_CTRL_1:
case CLEARWATER_GPIO25_CTRL_2:
case CLEARWATER_GPIO26_CTRL_1:
case CLEARWATER_GPIO26_CTRL_2:
case CLEARWATER_GPIO27_CTRL_1:
case CLEARWATER_GPIO27_CTRL_2:
case CLEARWATER_GPIO28_CTRL_1:
case CLEARWATER_GPIO28_CTRL_2:
case CLEARWATER_GPIO29_CTRL_1:
case CLEARWATER_GPIO29_CTRL_2:
case CLEARWATER_GPIO30_CTRL_1:
case CLEARWATER_GPIO30_CTRL_2:
case CLEARWATER_GPIO31_CTRL_1:
case CLEARWATER_GPIO31_CTRL_2:
case CLEARWATER_GPIO32_CTRL_1:
case CLEARWATER_GPIO32_CTRL_2:
case CLEARWATER_GPIO33_CTRL_1:
case CLEARWATER_GPIO33_CTRL_2:
case CLEARWATER_GPIO34_CTRL_1:
case CLEARWATER_GPIO34_CTRL_2:
case CLEARWATER_GPIO35_CTRL_1:
case CLEARWATER_GPIO35_CTRL_2:
case CLEARWATER_GPIO36_CTRL_1:
case CLEARWATER_GPIO36_CTRL_2:
case CLEARWATER_GPIO37_CTRL_1:
case CLEARWATER_GPIO37_CTRL_2:
case CLEARWATER_GPIO38_CTRL_1:
case CLEARWATER_GPIO38_CTRL_2:
case CLEARWATER_IRQ1_STATUS_1:
case CLEARWATER_IRQ1_STATUS_2:
case CLEARWATER_IRQ1_STATUS_6:
case CLEARWATER_IRQ1_STATUS_7:
case CLEARWATER_IRQ1_STATUS_9:
case CLEARWATER_IRQ1_STATUS_11:
case CLEARWATER_IRQ1_STATUS_12:
case CLEARWATER_IRQ1_STATUS_13:
case CLEARWATER_IRQ1_STATUS_14:
case CLEARWATER_IRQ1_STATUS_15:
case CLEARWATER_IRQ1_STATUS_17:
case CLEARWATER_IRQ1_STATUS_18:
case CLEARWATER_IRQ1_STATUS_19:
case CLEARWATER_IRQ1_STATUS_21:
case CLEARWATER_IRQ1_STATUS_22:
case CLEARWATER_IRQ1_STATUS_23:
case CLEARWATER_IRQ1_STATUS_24:
case CLEARWATER_IRQ1_STATUS_25:
case CLEARWATER_IRQ1_STATUS_27:
case CLEARWATER_IRQ1_STATUS_28:
case CLEARWATER_IRQ1_STATUS_30:
case CLEARWATER_IRQ1_STATUS_31:
case CLEARWATER_IRQ1_STATUS_32:
case MOON_IRQ1_STATUS_33:
case CLEARWATER_IRQ1_MASK_1:
case CLEARWATER_IRQ1_MASK_2:
case CLEARWATER_IRQ1_MASK_3:
case CLEARWATER_IRQ1_MASK_4:
case CLEARWATER_IRQ1_MASK_5:
case CLEARWATER_IRQ1_MASK_6:
case CLEARWATER_IRQ1_MASK_7:
case CLEARWATER_IRQ1_MASK_8:
case CLEARWATER_IRQ1_MASK_9:
case CLEARWATER_IRQ1_MASK_10:
case CLEARWATER_IRQ1_MASK_11:
case CLEARWATER_IRQ1_MASK_12:
case CLEARWATER_IRQ1_MASK_13:
case CLEARWATER_IRQ1_MASK_14:
case CLEARWATER_IRQ1_MASK_15:
case MOON_IRQ1_MASK_16:
case CLEARWATER_IRQ1_MASK_17:
case CLEARWATER_IRQ1_MASK_18:
case CLEARWATER_IRQ1_MASK_19:
case MOON_IRQ1_MASK_20:
case CLEARWATER_IRQ1_MASK_21:
case CLEARWATER_IRQ1_MASK_22:
case CLEARWATER_IRQ1_MASK_23:
case CLEARWATER_IRQ1_MASK_24:
case CLEARWATER_IRQ1_MASK_25:
case MOON_IRQ1_MASK_26:
case CLEARWATER_IRQ1_MASK_27:
case CLEARWATER_IRQ1_MASK_28:
case MOON_IRQ1_MASK_29:
case CLEARWATER_IRQ1_MASK_30:
case CLEARWATER_IRQ1_MASK_31:
case CLEARWATER_IRQ1_MASK_32:
case MOON_IRQ1_MASK_33:
case CLEARWATER_IRQ1_RAW_STATUS_1:
case CLEARWATER_IRQ1_RAW_STATUS_2:
case CLEARWATER_IRQ1_RAW_STATUS_7:
case CLEARWATER_IRQ1_RAW_STATUS_9:
case CLEARWATER_IRQ1_RAW_STATUS_11:
case CLEARWATER_IRQ1_RAW_STATUS_12:
case CLEARWATER_IRQ1_RAW_STATUS_13:
case CLEARWATER_IRQ1_RAW_STATUS_14:
case CLEARWATER_IRQ1_RAW_STATUS_15:
case CLEARWATER_IRQ1_RAW_STATUS_17:
case CLEARWATER_IRQ1_RAW_STATUS_18:
case CLEARWATER_IRQ1_RAW_STATUS_19:
case CLEARWATER_IRQ1_RAW_STATUS_21:
case CLEARWATER_IRQ1_RAW_STATUS_22:
case CLEARWATER_IRQ1_RAW_STATUS_23:
case CLEARWATER_IRQ1_RAW_STATUS_24:
case CLEARWATER_IRQ1_RAW_STATUS_25:
case CLEARWATER_IRQ1_RAW_STATUS_30:
case CLEARWATER_IRQ1_RAW_STATUS_31:
case CLEARWATER_IRQ1_RAW_STATUS_32:
case CLEARWATER_IRQ2_STATUS_9:
case CLEARWATER_IRQ2_MASK_9:
case CLEARWATER_IRQ2_RAW_STATUS_9:
case CLEARWATER_INTERRUPT_DEBOUNCE_7:
case CLEARWATER_IRQ1_CTRL:
return true;
default:
return false;
}
}
static bool moon_16bit_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_WRITE_SEQUENCER_CTRL_0:
case ARIZONA_WRITE_SEQUENCER_CTRL_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_2:
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
case ARIZONA_HP_CTRL_1L:
case ARIZONA_HP_CTRL_1R:
case ARIZONA_HP_CTRL_2L:
case ARIZONA_HP_CTRL_2R:
case ARIZONA_HP_CTRL_3L:
case ARIZONA_HP_CTRL_3R:
case ARIZONA_MIC_DETECT_3:
case ARIZONA_MIC_DETECT_4:
case MOON_MICDET2_CONTROL_3:
case MOON_MICDET2_CONTROL_4:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HEADPHONE_DETECT_3:
case ARIZONA_HP_DACVAL:
case ARIZONA_INPUT_ENABLES_STATUS:
case ARIZONA_OUTPUT_STATUS_1:
case ARIZONA_RAW_OUTPUT_STATUS_1:
case ARIZONA_SPD1_TX_CHANNEL_STATUS_1:
case ARIZONA_SPD1_TX_CHANNEL_STATUS_2:
case ARIZONA_SPD1_TX_CHANNEL_STATUS_3:
case ARIZONA_SLIMBUS_RX_PORT_STATUS:
case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_FX_CTRL2:
case CLEARWATER_ASRC2_STATUS:
case CLEARWATER_ASRC1_STATUS:
case ARIZONA_CLOCK_CONTROL:
case MOON_DFC_STATUS:
case CLEARWATER_GPIO1_CTRL_1:
case CLEARWATER_GPIO2_CTRL_1:
case CLEARWATER_GPIO3_CTRL_1:
case CLEARWATER_GPIO4_CTRL_1:
case CLEARWATER_GPIO5_CTRL_1:
case CLEARWATER_GPIO6_CTRL_1:
case CLEARWATER_GPIO7_CTRL_1:
case CLEARWATER_GPIO8_CTRL_1:
case CLEARWATER_GPIO9_CTRL_1:
case CLEARWATER_GPIO10_CTRL_1:
case CLEARWATER_GPIO11_CTRL_1:
case CLEARWATER_GPIO12_CTRL_1:
case CLEARWATER_GPIO13_CTRL_1:
case CLEARWATER_GPIO14_CTRL_1:
case CLEARWATER_GPIO15_CTRL_1:
case CLEARWATER_GPIO16_CTRL_1:
case CLEARWATER_GPIO17_CTRL_1:
case CLEARWATER_GPIO18_CTRL_1:
case CLEARWATER_GPIO19_CTRL_1:
case CLEARWATER_GPIO20_CTRL_1:
case CLEARWATER_GPIO21_CTRL_1:
case CLEARWATER_GPIO22_CTRL_1:
case CLEARWATER_GPIO23_CTRL_1:
case CLEARWATER_GPIO24_CTRL_1:
case CLEARWATER_GPIO25_CTRL_1:
case CLEARWATER_GPIO26_CTRL_1:
case CLEARWATER_GPIO27_CTRL_1:
case CLEARWATER_GPIO28_CTRL_1:
case CLEARWATER_GPIO29_CTRL_1:
case CLEARWATER_GPIO30_CTRL_1:
case CLEARWATER_GPIO31_CTRL_1:
case CLEARWATER_GPIO32_CTRL_1:
case CLEARWATER_GPIO33_CTRL_1:
case CLEARWATER_GPIO34_CTRL_1:
case CLEARWATER_GPIO35_CTRL_1:
case CLEARWATER_GPIO36_CTRL_1:
case CLEARWATER_GPIO37_CTRL_1:
case CLEARWATER_GPIO38_CTRL_1:
case CLEARWATER_IRQ1_STATUS_1:
case CLEARWATER_IRQ1_STATUS_2:
case CLEARWATER_IRQ1_STATUS_3:
case CLEARWATER_IRQ1_STATUS_4:
case CLEARWATER_IRQ1_STATUS_5:
case CLEARWATER_IRQ1_STATUS_6:
case CLEARWATER_IRQ1_STATUS_7:
case CLEARWATER_IRQ1_STATUS_8:
case CLEARWATER_IRQ1_STATUS_9:
case CLEARWATER_IRQ1_STATUS_10:
case CLEARWATER_IRQ1_STATUS_11:
case CLEARWATER_IRQ1_STATUS_12:
case CLEARWATER_IRQ1_STATUS_13:
case CLEARWATER_IRQ1_STATUS_14:
case CLEARWATER_IRQ1_STATUS_15:
case CLEARWATER_IRQ1_STATUS_16:
case CLEARWATER_IRQ1_STATUS_17:
case CLEARWATER_IRQ1_STATUS_18:
case CLEARWATER_IRQ1_STATUS_19:
case CLEARWATER_IRQ1_STATUS_20:
case CLEARWATER_IRQ1_STATUS_21:
case CLEARWATER_IRQ1_STATUS_22:
case CLEARWATER_IRQ1_STATUS_23:
case CLEARWATER_IRQ1_STATUS_24:
case CLEARWATER_IRQ1_STATUS_25:
case CLEARWATER_IRQ1_STATUS_26:
case CLEARWATER_IRQ1_STATUS_27:
case CLEARWATER_IRQ1_STATUS_28:
case CLEARWATER_IRQ1_STATUS_29:
case CLEARWATER_IRQ1_STATUS_30:
case CLEARWATER_IRQ1_STATUS_31:
case CLEARWATER_IRQ1_STATUS_32:
case MOON_IRQ1_STATUS_33:
case CLEARWATER_IRQ1_RAW_STATUS_1:
case CLEARWATER_IRQ1_RAW_STATUS_2:
case CLEARWATER_IRQ1_RAW_STATUS_7:
case CLEARWATER_IRQ1_RAW_STATUS_9:
case CLEARWATER_IRQ1_RAW_STATUS_11:
case CLEARWATER_IRQ1_RAW_STATUS_12:
case CLEARWATER_IRQ1_RAW_STATUS_13:
case CLEARWATER_IRQ1_RAW_STATUS_14:
case CLEARWATER_IRQ1_RAW_STATUS_15:
case CLEARWATER_IRQ1_RAW_STATUS_17:
case CLEARWATER_IRQ1_RAW_STATUS_18:
case CLEARWATER_IRQ1_RAW_STATUS_19:
case CLEARWATER_IRQ1_RAW_STATUS_21:
case CLEARWATER_IRQ1_RAW_STATUS_22:
case CLEARWATER_IRQ1_RAW_STATUS_23:
case CLEARWATER_IRQ1_RAW_STATUS_24:
case CLEARWATER_IRQ1_RAW_STATUS_25:
case CLEARWATER_IRQ1_RAW_STATUS_30:
case CLEARWATER_IRQ1_RAW_STATUS_31:
case CLEARWATER_IRQ1_RAW_STATUS_32:
case CLEARWATER_IRQ2_STATUS_9:
case CLEARWATER_IRQ2_RAW_STATUS_9:
return true;
default:
return false;
}
}
static bool moon_32bit_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case ARIZONA_WSEQ_SEQUENCE_1 ... ARIZONA_WSEQ_SEQUENCE_508:
case MOON_OTP_HPDET_CALIB_1 ... MOON_OTP_HPDET_CALIB_2:
case CLEARWATER_DSP1_CONFIG ... MOON_DSP1_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP2_CONFIG ... MOON_DSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP3_CONFIG ... MOON_DSP3_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP4_CONFIG ... MOON_DSP4_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP5_CONFIG ... MOON_DSP5_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP6_CONFIG ... MOON_DSP6_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP7_CONFIG ... MOON_DSP7_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
return true;
default:
return moon_is_adsp_memory(dev, reg);
}
}
static bool moon_32bit_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case ARIZONA_WSEQ_SEQUENCE_1 ... ARIZONA_WSEQ_SEQUENCE_508:
case MOON_OTP_HPDET_CALIB_1 ... MOON_OTP_HPDET_CALIB_2:
case CLEARWATER_DSP1_CONFIG ... MOON_DSP1_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP2_CONFIG ... MOON_DSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP3_CONFIG ... MOON_DSP3_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP4_CONFIG ... MOON_DSP4_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP5_CONFIG ... MOON_DSP5_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP6_CONFIG ... MOON_DSP6_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
case CLEARWATER_DSP7_CONFIG ... MOON_DSP7_PMEM_ERR_ADDR_XMEM_ERR_ADDR:
return true;
default:
return moon_is_adsp_memory(dev, reg);
}
}
const struct regmap_config moon_16bit_spi_regmap = {
.name = "moon_16bit",
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
.max_register = CLEARWATER_INTERRUPT_RAW_STATUS_1,
.readable_reg = moon_16bit_readable_register,
.volatile_reg = moon_16bit_volatile_register,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = moon_reg_default,
.num_reg_defaults = ARRAY_SIZE(moon_reg_default),
};
EXPORT_SYMBOL_GPL(moon_16bit_spi_regmap);
const struct regmap_config moon_16bit_i2c_regmap = {
.name = "moon_16bit",
.reg_bits = 32,
.val_bits = 16,
.max_register = CLEARWATER_INTERRUPT_RAW_STATUS_1,
.readable_reg = moon_16bit_readable_register,
.volatile_reg = moon_16bit_volatile_register,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = moon_reg_default,
.num_reg_defaults = ARRAY_SIZE(moon_reg_default),
};
EXPORT_SYMBOL_GPL(moon_16bit_i2c_regmap);
const struct regmap_config moon_32bit_spi_regmap = {
.name = "moon_32bit",
.reg_bits = 32,
.reg_stride = 2,
.pad_bits = 16,
.val_bits = 32,
.max_register = MOON_DSP7_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
.readable_reg = moon_32bit_readable_register,
.volatile_reg = moon_32bit_volatile_register,
.cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL_GPL(moon_32bit_spi_regmap);
const struct regmap_config moon_32bit_i2c_regmap = {
.name = "moon_32bit",
.reg_bits = 32,
.reg_stride = 2,
.val_bits = 32,
.max_register = MOON_DSP7_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
.readable_reg = moon_32bit_readable_register,
.volatile_reg = moon_32bit_volatile_register,
.cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL_GPL(moon_32bit_i2c_regmap);
| gpl-2.0 |
Shihta/gdisk | gpttext.h | 2515 | /*
Implementation of GPTData class derivative with basic text-mode interaction
Copyright (C) 2010-2013 Roderick W. Smith
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __GPTDATATEXT_H
#define __GPTDATATEXT_H
#include "gpt.h"
using namespace std;
class GPTDataTextUI : public GPTData {
protected:
public:
GPTDataTextUI(void);
GPTDataTextUI(string filename);
~GPTDataTextUI(void);
// This one needs to be explicitly defined, even though it does nothing new....
// const GPTPart & operator[](uint32_t partNum) {return GPTData::operator[](partNum);}
// Extended (interactive) versions of some base-class functions
WhichToUse UseWhichPartitions(void);
int XFormDisklabel(void);
// Request information from the user (& possibly do something with it)
uint32_t GetPartNum(void);
void ResizePartitionTable(void);
void CreatePartition(void);
void DeletePartition(void);
void ChangePartType(void);
void ChangeUniqueGuid(void);
void SetAttributes(uint32_t partNum);
int SetName(uint32_t partNum);
int SwapPartitions(void);
int DestroyGPTwPrompt(void); // Returns 1 if user proceeds
void ShowDetails(void);
void MakeHybrid(void);
int XFormToMBR(void); // convert GPT to MBR, wiping GPT afterwards. Returns 1 if successful
// An informational function....
void WarnAboutIffyMBRPart(int partNum);
// Main menu functions
void MainMenu(string filename);
void ShowCommands(void);
void ExpertsMenu(string filename);
void ShowExpertCommands(void);
void RecoveryMenu(string filename);
void ShowRecoveryCommands(void);
}; // class GPTDataTextUI
int GetMBRTypeCode(int defType);
UnicodeString ReadUString(void);
#endif // __GPTDATATEXT_H
| gpl-2.0 |
AhmedSayedAhmed/MM_Portal | wp-content/plugins/floating-social-bar/floating-social-bar.php | 2794 | <?php
/**
* Floating Social Bar is the best social media plugin for WordPress
* that adds a floating bar with share buttons to your content
* without slowing down your site.
*
* @package Floating Social Bar
* @author Syed Balkhi
* @author Thomas Griffin
* @license GPL-2.0+
* @link http://wpbeginner.com/floating-social-bar/
* @copyright 2013 WPBeginner. All rights reserved.
*
* @wordpress-plugin
* Plugin Name: Floating Social Bar
* Plugin URI: http://wpbeginner.com/floating-social-bar/
* Description: Floating Social Bar is the best social media plugin for WordPress that adds a floating bar with share buttons to your content without slowing down your site.
* Version: 1.1.7
* Author: Syed Balkhi and Thomas Griffin
* Author URI: http://wpbeginner.com/
* Text Domain: fsb
* Contributors: smub, griffinjt
* License: GPL-2.0+
* License URI: http://www.gnu.org/licenses/gpl-2.0.txt
* Domain Path: /lang
*/
// If this file is called directly, abort.
if ( ! defined( 'WPINC' ) ) die;
// Load the main plugin class and widget class.
require_once( plugin_dir_path( __FILE__ ) . 'class-floating-social-bar.php' );
// Register hooks for activation, deactivation and uninstall instances.
register_activation_hook( __FILE__, array( 'floating_social_bar', 'activate' ) );
register_deactivation_hook( __FILE__, array( 'floating_social_bar', 'deactivate' ) );
register_uninstall_hook( __FILE__, array( 'floating_social_bar', 'uninstall' ) );
// Initialize the plugin.
$floating_social_bar = floating_social_bar::get_instance();
// Generate a template tag for use in template files.
if ( ! function_exists( 'floating_social_bar' ) ) {
/**
* Floating Social Bar template tag.
*
* Allows you to insert a floating social bar anywhere in your template files.
* The keys currently available are 'facebook', 'twitter', 'google',
* 'linkedin', and 'pinterest'. The value should be set to true if you want to
* display that social service in the bar. Services will be output in the order
* that you specify in the $args array.
*
* @package Floating Social Bar
* @param array $args Args used for the floating social bar.
* @param bool $return Flag for returning or echoing the slider content.
*/
function floating_social_bar( $args = array(), $return = false ) {
// Prepare the args to be output into query string shortcode format.
$output_args = '';
foreach ( $args as $k => $v )
$output_args .= $k . '=' . $v . ' ';
// Return or echo the content via shortcode.
if ( $return )
return do_shortcode( '[fsb-social-bar ' . trim( $output_args ) . ']' );
else
echo do_shortcode( '[fsb-social-bar ' . trim( $output_args ) . ']' );
}
} | gpl-2.0 |
embecosm/avr32-gcc | libgfortran/generated/unpack_c10.c | 8739 | /* Specific implementation of the UNPACK intrinsic
Copyright 2008, 2009 Free Software Foundation, Inc.
Contributed by Thomas Koenig <[email protected]>, based on
unpack_generic.c by Paul Brook <[email protected]>.
This file is part of the GNU Fortran 95 runtime library (libgfortran).
Libgfortran is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
Ligbfortran is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "libgfortran.h"
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#if defined (HAVE_GFC_COMPLEX_10)
void
unpack0_c10 (gfc_array_c10 *ret, const gfc_array_c10 *vector,
const gfc_array_l1 *mask, const GFC_COMPLEX_10 *fptr)
{
/* r.* indicates the return array. */
index_type rstride[GFC_MAX_DIMENSIONS];
index_type rstride0;
index_type rs;
GFC_COMPLEX_10 * restrict rptr;
/* v.* indicates the vector array. */
index_type vstride0;
GFC_COMPLEX_10 *vptr;
/* Value for field, this is constant. */
const GFC_COMPLEX_10 fval = *fptr;
/* m.* indicates the mask array. */
index_type mstride[GFC_MAX_DIMENSIONS];
index_type mstride0;
const GFC_LOGICAL_1 *mptr;
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type n;
index_type dim;
int empty;
int mask_kind;
empty = 0;
mptr = mask->data;
/* Use the same loop for all logical types, by using GFC_LOGICAL_1
and using shifting to address size and endian issues. */
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
#ifdef HAVE_GFC_LOGICAL_16
|| mask_kind == 16
#endif
)
{
/* Do not convert a NULL pointer as we use test for NULL below. */
if (mptr)
mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
}
else
runtime_error ("Funny sized logical array");
if (ret->data == NULL)
{
/* The front end has signalled that we need to populate the
return array descriptor. */
dim = GFC_DESCRIPTOR_RANK (mask);
rs = 1;
for (n = 0; n < dim; n++)
{
count[n] = 0;
ret->dim[n].stride = rs;
ret->dim[n].lbound = 0;
ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
extent[n] = ret->dim[n].ubound + 1;
empty = empty || extent[n] <= 0;
rstride[n] = ret->dim[n].stride;
mstride[n] = mask->dim[n].stride * mask_kind;
rs *= extent[n];
}
ret->offset = 0;
ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_10));
}
else
{
dim = GFC_DESCRIPTOR_RANK (ret);
for (n = 0; n < dim; n++)
{
count[n] = 0;
extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
empty = empty || extent[n] <= 0;
rstride[n] = ret->dim[n].stride;
mstride[n] = mask->dim[n].stride * mask_kind;
}
if (rstride[0] == 0)
rstride[0] = 1;
}
if (empty)
return;
if (mstride[0] == 0)
mstride[0] = 1;
vstride0 = vector->dim[0].stride;
if (vstride0 == 0)
vstride0 = 1;
rstride0 = rstride[0];
mstride0 = mstride[0];
rptr = ret->data;
vptr = vector->data;
while (rptr)
{
if (*mptr)
{
/* From vector. */
*rptr = *vptr;
vptr += vstride0;
}
else
{
/* From field. */
*rptr = fval;
}
/* Advance to the next element. */
rptr += rstride0;
mptr += mstride0;
count[0]++;
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
rptr -= rstride[n] * extent[n];
mptr -= mstride[n] * extent[n];
n++;
if (n >= dim)
{
/* Break out of the loop. */
rptr = NULL;
break;
}
else
{
count[n]++;
rptr += rstride[n];
mptr += mstride[n];
}
}
}
}
void
unpack1_c10 (gfc_array_c10 *ret, const gfc_array_c10 *vector,
const gfc_array_l1 *mask, const gfc_array_c10 *field)
{
/* r.* indicates the return array. */
index_type rstride[GFC_MAX_DIMENSIONS];
index_type rstride0;
index_type rs;
GFC_COMPLEX_10 * restrict rptr;
/* v.* indicates the vector array. */
index_type vstride0;
GFC_COMPLEX_10 *vptr;
/* f.* indicates the field array. */
index_type fstride[GFC_MAX_DIMENSIONS];
index_type fstride0;
const GFC_COMPLEX_10 *fptr;
/* m.* indicates the mask array. */
index_type mstride[GFC_MAX_DIMENSIONS];
index_type mstride0;
const GFC_LOGICAL_1 *mptr;
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type n;
index_type dim;
int empty;
int mask_kind;
empty = 0;
mptr = mask->data;
/* Use the same loop for all logical types, by using GFC_LOGICAL_1
and using shifting to address size and endian issues. */
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
#ifdef HAVE_GFC_LOGICAL_16
|| mask_kind == 16
#endif
)
{
/* Do not convert a NULL pointer as we use test for NULL below. */
if (mptr)
mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
}
else
runtime_error ("Funny sized logical array");
if (ret->data == NULL)
{
/* The front end has signalled that we need to populate the
return array descriptor. */
dim = GFC_DESCRIPTOR_RANK (mask);
rs = 1;
for (n = 0; n < dim; n++)
{
count[n] = 0;
ret->dim[n].stride = rs;
ret->dim[n].lbound = 0;
ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
extent[n] = ret->dim[n].ubound + 1;
empty = empty || extent[n] <= 0;
rstride[n] = ret->dim[n].stride;
fstride[n] = field->dim[n].stride;
mstride[n] = mask->dim[n].stride * mask_kind;
rs *= extent[n];
}
ret->offset = 0;
ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_10));
}
else
{
dim = GFC_DESCRIPTOR_RANK (ret);
for (n = 0; n < dim; n++)
{
count[n] = 0;
extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
empty = empty || extent[n] <= 0;
rstride[n] = ret->dim[n].stride;
fstride[n] = field->dim[n].stride;
mstride[n] = mask->dim[n].stride * mask_kind;
}
if (rstride[0] == 0)
rstride[0] = 1;
}
if (empty)
return;
if (fstride[0] == 0)
fstride[0] = 1;
if (mstride[0] == 0)
mstride[0] = 1;
vstride0 = vector->dim[0].stride;
if (vstride0 == 0)
vstride0 = 1;
rstride0 = rstride[0];
fstride0 = fstride[0];
mstride0 = mstride[0];
rptr = ret->data;
fptr = field->data;
vptr = vector->data;
while (rptr)
{
if (*mptr)
{
/* From vector. */
*rptr = *vptr;
vptr += vstride0;
}
else
{
/* From field. */
*rptr = *fptr;
}
/* Advance to the next element. */
rptr += rstride0;
fptr += fstride0;
mptr += mstride0;
count[0]++;
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
rptr -= rstride[n] * extent[n];
fptr -= fstride[n] * extent[n];
mptr -= mstride[n] * extent[n];
n++;
if (n >= dim)
{
/* Break out of the loop. */
rptr = NULL;
break;
}
else
{
count[n]++;
rptr += rstride[n];
fptr += fstride[n];
mptr += mstride[n];
}
}
}
}
#endif
| gpl-2.0 |
damienyong/Kernel-3.0.8 | kernel/crypto/cryptd.c | 25547 | /*
* Software async crypto daemon.
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*
* Added AEAD support to cryptd.
* Authors: Tadeusz Struk ([email protected])
* Adrian Hoban <[email protected]>
* Gabriele Paoloni <[email protected]>
* Aidan O'Mahony ([email protected])
* Copyright (c) 2010, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#define CRYPTD_MAX_CPU_QLEN 100
struct cryptd_cpu_queue {
struct crypto_queue queue;
struct work_struct work;
};
struct cryptd_queue {
struct cryptd_cpu_queue __percpu *cpu_queue;
};
struct cryptd_instance_ctx {
struct crypto_spawn spawn;
struct cryptd_queue *queue;
};
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn;
struct cryptd_queue *queue;
};
struct aead_instance_ctx {
struct crypto_aead_spawn aead_spawn;
struct cryptd_queue *queue;
};
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child;
};
struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
struct cryptd_hash_ctx {
struct crypto_shash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
struct shash_desc desc;
};
struct cryptd_aead_ctx {
struct crypto_aead *child;
};
struct cryptd_aead_request_ctx {
crypto_completion_t complete;
};
static void cryptd_queue_worker(struct work_struct *work);
static int cryptd_init_queue(struct cryptd_queue *queue,
unsigned int max_cpu_qlen)
{
int cpu;
struct cryptd_cpu_queue *cpu_queue;
queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
if (!queue->cpu_queue)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
}
return 0;
}
static void cryptd_fini_queue(struct cryptd_queue *queue)
{
int cpu;
struct cryptd_cpu_queue *cpu_queue;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
BUG_ON(cpu_queue->queue.qlen);
}
free_percpu(queue->cpu_queue);
}
static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct crypto_async_request *request)
{
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
return err;
}
/* Called in workqueue context, do one real cryption work (via
* req->complete) and reschedule itself if there are more work to
* do. */
static void cryptd_queue_worker(struct work_struct *work)
{
struct cryptd_cpu_queue *cpu_queue;
struct crypto_async_request *req, *backlog;
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
* preempt_disable/enable is used to prevent being preempted by
* cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
* cryptd_enqueue_request() being accessed from software interrupts.
*/
local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
local_bh_enable();
if (!req)
return;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req->complete(req, 0);
if (cpu_queue->queue.qlen)
queue_work(kcrypto_wq, &cpu_queue->work);
}
static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
return ictx->queue;
}
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
struct crypto_blkcipher *child = ctx->child;
int err;
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_blkcipher_setkey(child, key, keylen);
crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
struct crypto_blkcipher *child,
int err,
int (*crypt)(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int len))
{
struct cryptd_blkcipher_request_ctx *rctx;
struct blkcipher_desc desc;
rctx = ablkcipher_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.info = req->info;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypt(&desc, req->dst, req->src, req->nbytes);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
struct crypto_blkcipher *child = ctx->child;
cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
crypto_blkcipher_crt(child)->encrypt);
}
static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
struct crypto_blkcipher *child = ctx->child;
cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
crypto_blkcipher_crt(child)->decrypt);
}
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
crypto_completion_t complete)
{
struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct cryptd_queue *queue;
queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
return cryptd_enqueue_request(queue, &req->base);
}
static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
{
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
}
static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
{
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
}
static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_spawn *spawn = &ictx->spawn;
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_blkcipher *cipher;
cipher = crypto_spawn_blkcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_ablkcipher.reqsize =
sizeof(struct cryptd_blkcipher_request_ctx);
return 0;
}
static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(ctx->child);
}
static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
unsigned int tail)
{
char *p;
struct crypto_instance *inst;
int err;
p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
return p;
out_free_inst:
kfree(p);
p = ERR_PTR(err);
goto out;
}
static int cryptd_create_blkcipher(struct crypto_template *tmpl,
struct rtattr **tb,
struct cryptd_queue *queue)
{
struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return PTR_ERR(alg);
inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
ctx = crypto_instance_ctx(inst);
ctx->queue = queue;
err = crypto_init_spawn(&ctx->spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto out_free_inst;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type;
inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
inst->alg.cra_init = cryptd_blkcipher_init_tfm;
inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
err = crypto_register_instance(tmpl, inst);
if (err) {
crypto_drop_spawn(&ctx->spawn);
out_free_inst:
kfree(inst);
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
ctx->child = hash;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_shash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_shash *child = ctx->child;
int err;
crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_shash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int cryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t complete)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_queue *queue =
cryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
return cryptd_enqueue_request(queue, &req->base);
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_shash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
desc->tfm = child;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_init(desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
err = shash_ahash_update(req, &rctx->desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
err = crypto_shash_final(&rctx->desc, req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
err = shash_ahash_finup(req, &rctx->desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_finup);
}
static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_shash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
desc->tfm = child;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = shash_ahash_digest(req, desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
static int cryptd_hash_export(struct ahash_request *req, void *out)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_shash_export(&rctx->desc, out);
}
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_shash_import(&rctx->desc, in);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct cryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct shash_alg *salg;
struct crypto_alg *alg;
int err;
salg = shash_attr_alg(tb[1], 0, 0);
if (IS_ERR(salg))
return PTR_ERR(salg);
alg = &salg->base;
inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
ctx = ahash_instance_ctx(inst);
ctx->queue = queue;
err = crypto_init_shash_spawn(&ctx->spawn, salg,
ahash_crypto_instance(inst));
if (err)
goto out_free_inst;
inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;
inst->alg.final = cryptd_hash_final_enqueue;
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
if (err) {
crypto_drop_shash(&ctx->spawn);
out_free_inst:
kfree(inst);
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static void cryptd_aead_crypt(struct aead_request *req,
struct crypto_aead *child,
int err,
int (*crypt)(struct aead_request *req))
{
struct cryptd_aead_request_ctx *rctx;
rctx = aead_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
aead_request_set_tfm(req, child);
err = crypt( req );
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
{
struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
struct crypto_aead *child = ctx->child;
struct aead_request *req;
req = container_of(areq, struct aead_request, base);
cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
}
static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
{
struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
struct crypto_aead *child = ctx->child;
struct aead_request *req;
req = container_of(areq, struct aead_request, base);
cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
}
static int cryptd_aead_enqueue(struct aead_request *req,
crypto_completion_t complete)
{
struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
return cryptd_enqueue_request(queue, &req->base);
}
static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
{
return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
}
static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
{
return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
}
static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_aead *cipher;
cipher = crypto_spawn_aead(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
ctx->child = cipher;
tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
return 0;
}
static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_aead(ctx->child);
}
static int cryptd_create_aead(struct crypto_template *tmpl,
struct rtattr **tb,
struct cryptd_queue *queue)
{
struct aead_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return PTR_ERR(alg);
inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
ctx = crypto_instance_ctx(inst);
ctx->queue = queue;
err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto out_free_inst;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = alg->cra_type;
inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
inst->alg.cra_init = cryptd_aead_init_tfm;
inst->alg.cra_exit = cryptd_aead_exit_tfm;
inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
err = crypto_register_instance(tmpl, inst);
if (err) {
crypto_drop_spawn(&ctx->aead_spawn.base);
out_free_inst:
kfree(inst);
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static struct cryptd_queue queue;
static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_create_hash(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_AEAD:
return cryptd_create_aead(tmpl, tb, &queue);
}
return -EINVAL;
}
static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_drop_shash(&hctx->spawn);
kfree(ahash_instance(inst));
return;
case CRYPTO_ALG_TYPE_AEAD:
crypto_drop_spawn(&aead_ctx->aead_spawn.base);
kfree(inst);
return;
default:
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
}
}
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
.create = cryptd_create,
.free = cryptd_free,
.module = THIS_MODULE,
};
struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_tfm *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->__crt_alg->cra_module != THIS_MODULE) {
crypto_free_tfm(tfm);
return ERR_PTR(-EINVAL);
}
return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
{
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
{
crypto_free_ablkcipher(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_ahash *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ahash(tfm);
return ERR_PTR(-EINVAL);
}
return __cryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_child);
struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return &rctx->desc;
}
EXPORT_SYMBOL_GPL(cryptd_shash_desc);
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_aead(tfm);
return ERR_PTR(-EINVAL);
}
return __cryptd_aead_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
{
struct cryptd_aead_ctx *ctx;
ctx = crypto_aead_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_aead_child);
void cryptd_free_aead(struct cryptd_aead *tfm)
{
crypto_free_aead(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_aead);
static int __init cryptd_init(void)
{
int err;
err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
if (err)
return err;
err = crypto_register_template(&cryptd_tmpl);
if (err)
cryptd_fini_queue(&queue);
return err;
}
static void __exit cryptd_exit(void)
{
cryptd_fini_queue(&queue);
crypto_unregister_template(&cryptd_tmpl);
}
module_init(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async crypto daemon");
| gpl-2.0 |
infraredbg/Lenovo_A820_kernel_kk | bionic/libm/upstream-freebsd/lib/msun/src/e_lgamma.c | 730 |
/* @(#)e_lgamma.c 1.3 95/01/18 */
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunSoft, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* __ieee754_lgamma(x)
* Return the logarithm of the Gamma function of x.
*
* Method: call __ieee754_lgamma_r
*/
#include "math.h"
#include "math_private.h"
extern int signgam;
double
__ieee754_lgamma(double x)
{
return __ieee754_lgamma_r(x,&signgam);
}
| gpl-2.0 |
AlexanderDolgan/juliawp | wp-content/themes/node_modules/gulp-rigger/node_modules/rigger/test/input-settings/stringval.js | 18 | //=set test "Test" | gpl-2.0 |
latelee/coreboot | src/vendorcode/amd/agesa/f15/Proc/HT/htInterface.h | 18096 | /* $NoKeywords:$ */
/**
* @file
*
* Internal access to HT Interface.
*
* This file provides definitions used by HT internal modules. The
* external HT interface (in agesa.h) is accessed using these methods.
* This keeps the HT Feature implementations abstracted from the HT
* interface.
*
* This file includes the interface access constructor and interface
* support which is not removed with various build options.
*
* @xrefitem bom "File Content Label" "Release Content"
* @e project: AGESA
* @e sub-project: HyperTransport
*
*/
/*
*****************************************************************************
*
* Copyright (c) 2008 - 2012, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ***************************************************************************
*
*/
#ifndef _HT_INTERFACE_H_
#define _HT_INTERFACE_H_
/**
* @page htimplintf HT Internal Interface Implementation Guide
*
* HT Internal Interface provides access to the HT Component external interface (see AGESA.h),
* in a manner that isolates calling code from knowledge about the external interface or which
* interfaces are supported in the current build.
*
* @par Adding a Method to HT Internal Interface
*
* To add a new method to the HT Internal Interface, follow these steps.
* <ul>
* <li> Create a typedef for the Method with the correct parameters and return type.
*
* <ul>
* <li> Name the method typedef (F_METHOD_NAME)(), where METHOD_NAME is the same name as the method table item,
* but with "_"'s and UPPERCASE, rather than mixed case.
* @n <tt> typedef VOID (F_METHOD_NAME)(); </tt> @n
*
* <li> Make a reference type for references to a method implementation:
* @n <tt> /// Reference to a Method </tt>
* @n <tt> typedef F_METHOD_NAME *PF_METHOD_NAME </tt> @n
* </ul>
*
* <li> Provide a standard doxygen function preamble for the Method typedef. Begin the
* detailed description by providing a reference to the method instances page by including
* the lines below:
* @code
* *
* * @HtInterfaceInstances
* *
* @endcode
* @note It is important to provide documentation for the method type, because the method may not
* have an implementation in any families supported by the current package. @n
*
* <li> Add to the HT_INTERFACE struct an item for the Method:
* @n <tt> PF_METHOD_NAME MethodName; ///< Method: description. </tt> @n
* </ul>
*
* @par Implementing an HT Internal Interface Instance of the method.
*
* To implement an instance of a method for a specific interface follow these steps.
*
* - In appropriate files, implement the method with the return type and parameters
* matching the method typedef.
*
* - Name the function MethodName().
*
* - Create a doxygen function preamble for the method instance. Begin the detailed description with
* an Implements command to reference the method type and add this instance to the Method Instances page.
* @code
* *
* * @HtInterfaceMethod{::F_METHOD_NAME}.
* *
* @endcode
*
* - To access other Ht internal interface routines or data as part of the method implementation, the function
* must use HtInterface->OtherMethod(). Do not directly access other HT internal interface
* routines, because in the table there may be overrides or this routine may be shared by multiple families.
*
* - Add the instance to the HT_INTERFACE instances.
*
* - If a configuration does not need an instance of the method use one of the CommonReturns from
* CommonReturns.h with the same return type.
*
* @par Invoking HT Internal Interface Methods.
*
* The first step is carried out only once by the top level HT entry point.
* @n @code
* HT_INTERFACE HtInterface;
* // Get the current HT internal interface (to HtBlock data)
* NewHtInterface (&HtInterface);
* State->HtInterface = &HtInterface;
* @endcode
*
* The following example shows how to invoke a HT Internal Interface method.
* @n @code
* State->HtInterface->MethodName ();
* @endcode
*
*/
/*----------------------------------------------------------------------------
* Mixed (DEFINITIONS AND MACROS / TYPEDEFS, STRUCTURES, ENUMS)
*
*----------------------------------------------------------------------------
*/
/*-----------------------------------------------------------------------------
* DEFINITIONS AND MACROS
*
*-----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* TYPEDEFS, STRUCTURES, ENUMS
*
*----------------------------------------------------------------------------
*/
/**
* Get limits for CPU to CPU Links.
*
* @HtInterfaceInstances.
*
* @param[in] NodeA One Node on which this Link is located
* @param[in] LinkA The Link on this Node
* @param[in] NodeB The other Node on which this Link is located
* @param[in] LinkB The Link on that Node
* @param[in,out] ABLinkWidthLimit modify to change the Link Width In
* @param[in,out] BALinkWidthLimit modify to change the Link Width Out
* @param[in,out] PcbFreqCap modify to change the Link's frequency capability
* @param[in] State the input data
*
*/
typedef VOID F_GET_CPU_2_CPU_PCB_LIMITS (
IN UINT8 NodeA,
IN UINT8 LinkA,
IN UINT8 NodeB,
IN UINT8 LinkB,
IN OUT UINT8 *ABLinkWidthLimit,
IN OUT UINT8 *BALinkWidthLimit,
IN OUT UINT32 *PcbFreqCap,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_CPU_2_CPU_PCB_LIMITS *PF_GET_CPU_2_CPU_PCB_LIMITS;
/**
* Skip reganging of subLinks.
*
* @HtInterfaceInstances.
*
* @param[in] NodeA One Node on which this Link is located
* @param[in] LinkA The Link on this Node
* @param[in] NodeB The other Node on which this Link is located
* @param[in] LinkB The Link on that Node
* @param[in] State the input data
*
* @retval MATCHED leave Link unganged
* @retval POWERED_OFF leave link unganged and power off the paired sublink
* @retval UNMATCHED regang Link automatically
*/
typedef FINAL_LINK_STATE F_GET_SKIP_REGANG (
IN UINT8 NodeA,
IN UINT8 LinkA,
IN UINT8 NodeB,
IN UINT8 LinkB,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_SKIP_REGANG *PF_GET_SKIP_REGANG;
/**
* Manually control bus number assignment.
*
* @HtInterfaceInstances.
*
* @param[in] Node The Node on which this chain is located
* @param[in] Link The Link on the host for this chain
* @param[out] SecBus Secondary Bus number for this non-coherent chain
* @param[out] SubBus Subordinate Bus number
* @param[in] State the input data
*
* @retval TRUE this routine is supplying the bus numbers
* @retval FALSE use auto Bus numbering
*/
typedef BOOLEAN F_GET_OVERRIDE_BUS_NUMBERS (
IN UINT8 Node,
IN UINT8 Link,
OUT UINT8 *SecBus,
OUT UINT8 *SubBus,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_OVERRIDE_BUS_NUMBERS *PF_GET_OVERRIDE_BUS_NUMBERS;
/**
* Get Manual BUID assignment list.
*
* @HtInterfaceInstances.
*
* @param[in] Node The Node on which this chain is located
* @param[in] Link The Link on the host for this chain
* @param[out] List a pointer to a list, if returns TRUE
* @param[in] State the input data
*
* @retval TRUE use manual List
* @retval FALSE initialize the Link automatically. List not valid.
*/
typedef BOOLEAN F_GET_MANUAL_BUID_SWAP_LIST (
IN UINT8 Node,
IN UINT8 Link,
OUT BUID_SWAP_LIST **List,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_MANUAL_BUID_SWAP_LIST *PF_GET_MANUAL_BUID_SWAP_LIST;
/**
* Override capabilities of a device.
*
* @HtInterfaceInstances.
*
* @param[in] HostNode The Node on which this chain is located
* @param[in] HostLink The Link on the host for this chain
* @param[in] Depth The Depth in the I/O chain from the Host
* @param[in] PciAddress The Device's PCI config address (for callout)
* @param[in] DevVenId The Device's PCI Vendor + Device ID (offset 0x00)
* @param[in] Revision The Device's PCI Revision
* @param[in] Link The Device's Link number (0 or 1)
* @param[in,out] LinkWidthIn modify to change the Link Width In
* @param[in,out] LinkWidthOut modify to change the Link Width Out
* @param[in,out] FreqCap modify to change the Link's frequency capability
* @param[in,out] Clumping modify to change unit id clumping capability
* @param[in] State the input data
*
*/
typedef VOID F_GET_DEVICE_CAP_OVERRIDE (
IN UINT8 HostNode,
IN UINT8 HostLink,
IN UINT8 Depth,
IN PCI_ADDR PciAddress,
IN UINT32 DevVenId,
IN UINT8 Revision,
IN UINT8 Link,
IN OUT UINT8 *LinkWidthIn,
IN OUT UINT8 *LinkWidthOut,
IN OUT UINT32 *FreqCap,
IN OUT UINT32 *Clumping,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_DEVICE_CAP_OVERRIDE *PF_GET_DEVICE_CAP_OVERRIDE;
/**
* Get limits for non-coherent Links.
*
* @HtInterfaceInstances.
*
* @param[in] HostNode The Node on which this Link is located
* @param[in] HostLink The Link about to be initialized
* @param[in] Depth The Depth in the I/O chain from the Host
* @param[in,out] DownstreamLinkWidthLimit modify to change the Link Width In
* @param[in,out] UpstreamLinkWidthLimit modify to change the Link Width Out
* @param[in,out] PcbFreqCap modify to change the Link's frequency capability
* @param[in] State the input data
*/
typedef VOID F_GET_IO_PCB_LIMITS (
IN UINT8 HostNode,
IN UINT8 HostLink,
IN UINT8 Depth,
IN OUT UINT8 *DownstreamLinkWidthLimit,
IN OUT UINT8 *UpstreamLinkWidthLimit,
IN OUT UINT32 *PcbFreqCap,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_IO_PCB_LIMITS *PF_GET_IO_PCB_LIMITS;
/**
* Get the Socket number for a given Node number.
*
* @HtInterfaceInstances.
*
* @param[in] Node Node discovered event data.
* @param[in] State reference to Node to socket map
*
* @return the socket id
*
*/
typedef UINT8 F_GET_SOCKET_FROM_MAP (
IN UINT8 Node,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_SOCKET_FROM_MAP *PF_GET_SOCKET_FROM_MAP;
/**
* Ignore a Link.
*
* @HtInterfaceInstances.
*
* @param[in] Node The Node on which this Link is located
* @param[in] Link The Link about to be initialized
* @param[in] NbList The northbridge default ignore link list
* @param[in] State the input data
*
* @retval MATCHED ignore this Link and skip it
* @retval POWERED_OFF ignore this link and power it off.
* @retval UNMATCHED initialize the Link normally
*/
typedef FINAL_LINK_STATE F_GET_IGNORE_LINK (
IN UINT8 Node,
IN UINT8 Link,
IN IGNORE_LINK *NbIgnoreLinkList,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_GET_IGNORE_LINK *PF_GET_IGNORE_LINK;
/**
* Post Node id and other context info to AP cores via mailbox.
*
* @HtInterfaceInstances.
*
* @param[in] State Our state
*/
typedef VOID F_POST_MAP_TO_AP (
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_POST_MAP_TO_AP *PF_POST_MAP_TO_AP;
/**
* Clean up the map structures after severe event has caused a fall back to 1 node.
*
* @HtInterfaceInstances.
*
* @param[in] State Our state
*/
typedef VOID F_CLEAN_MAPS_AFTER_ERROR (
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_CLEAN_MAPS_AFTER_ERROR *PF_CLEAN_MAPS_AFTER_ERROR;
/**
* Get a new Socket Die to Node Map.
*
* @HtInterfaceInstances.
*
* @param[in,out] State global state
*/
typedef VOID F_NEW_NODE_AND_SOCKET_TABLES (
IN OUT STATE_DATA *State
);
/// Reference to a method.
typedef F_NEW_NODE_AND_SOCKET_TABLES *PF_NEW_NODE_AND_SOCKET_TABLES;
/**
* Fill in the socket's Node id when a processor is discovered in that socket.
*
* @HtInterfaceInstances.
*
* @param[in] Node Node from which a new node was discovered
* @param[in] CurrentNodeModule The current node's module id in it's processor.
* @param[in] PackageLink The package level link from Node to NewNode.
* @param[in] NewNode The new node's id
* @param[in] HardwareSocket If we use the hardware method (preferred), this is the socket of new node.
* @param[in] Module The new node's module id in it's processor.
* @param[in] State our State
*/
typedef VOID F_SET_NODE_TO_SOCKET_MAP (
IN UINT8 Node,
IN UINT8 CurrentNodeModule,
IN UINT8 PackageLink,
IN UINT8 NewNode,
IN UINT8 HardwareSocket,
IN UINT8 Module,
IN STATE_DATA *State
);
/// Reference to a method.
typedef F_SET_NODE_TO_SOCKET_MAP *PF_SET_NODE_TO_SOCKET_MAP;
/**
* Get a new, empty Hop Count Table, to make one for the installed topology.
*
* @HtInterfaceInstances.
*
* @param[in,out] State Keep our buffer handle.
*
*/
typedef VOID F_NEW_HOP_COUNT_TABLE (
IN OUT STATE_DATA *State
);
/// Reference to a method.
typedef F_NEW_HOP_COUNT_TABLE *PF_NEW_HOP_COUNT_TABLE;
/**
* Get the minimum Northbridge frequency for the system.
*
* @HtInterfaceInstances.
*
* Invoke the CPU component power mgt interface.
*
* @param[in] PlatformConfig Platform profile/build option config structure.
* @param[in] StdHeader Config for library and services.
*
* @return Frequency in MHz.
*
*/
typedef UINT32 F_GET_MIN_NB_CORE_FREQ (
IN PLATFORM_CONFIGURATION *PlatformConfig,
IN AMD_CONFIG_PARAMS *StdHeader
);
/// Reference to a Method.
typedef F_GET_MIN_NB_CORE_FREQ *PF_GET_MIN_NB_CORE_FREQ;
/**
* The HT Interface, feature code uses these methods to get interface parameters.
*/
struct _HT_INTERFACE { // See Forward Declaration in HtFeates.h
PF_GET_CPU_2_CPU_PCB_LIMITS GetCpu2CpuPcbLimits; /**< Method: Get link limits for coherent links. */
PF_GET_SKIP_REGANG GetSkipRegang; /**< Method: Skip reganging for coherent links. */
PF_NEW_HOP_COUNT_TABLE NewHopCountTable; /**< Method: Get a new hop count table. */
PF_GET_OVERRIDE_BUS_NUMBERS GetOverrideBusNumbers; /**< Method: Control Bus number assignment. */
PF_GET_MANUAL_BUID_SWAP_LIST GetManualBuidSwapList; /**< Method: Assign device IDs. */
PF_GET_DEVICE_CAP_OVERRIDE GetDeviceCapOverride; /**< Method: Override Device capabilities. */
PF_GET_IO_PCB_LIMITS GetIoPcbLimits; /**< Method: Get link limits for noncoherent links. */
PF_GET_SOCKET_FROM_MAP GetSocketFromMap; /**< Method: Get the Socket for a node id. */
PF_GET_IGNORE_LINK GetIgnoreLink; /**< Method: Ignore a link. */
PF_POST_MAP_TO_AP PostMapToAp; /**< Method: Post Socket and other info to AP cores. */
PF_NEW_NODE_AND_SOCKET_TABLES NewNodeAndSocketTables; /**< Method: Get new socket and node maps. */
PF_CLEAN_MAPS_AFTER_ERROR CleanMapsAfterError; /**< Method: Clean up maps for forced 1P on error fall back. */
PF_SET_NODE_TO_SOCKET_MAP SetNodeToSocketMap; /**< Method: Associate a node id with a socket. */
PF_GET_MIN_NB_CORE_FREQ GetMinNbCoreFreq; /**< Method: Get the minimum northbridge frequency */
} ;
/*----------------------------------------------------------------------------
* Prototypes to Interface from Feature Code
*
*----------------------------------------------------------------------------
*/
/**
* A constructor for the internal Ht Interface.
*
*/
VOID
NewHtInterface (
OUT HT_INTERFACE *HtInterface,
IN AMD_CONFIG_PARAMS *StdHeader
);
#endif /* _HT_INTERFACE_H_ */
| gpl-2.0 |
mturquette/linux-omap | drivers/usb/class/cdc-acm.c | 43490 | /*
* cdc-acm.c
*
* Copyright (c) 1999 Armin Fuerst <[email protected]>
* Copyright (c) 1999 Pavel Machek <[email protected]>
* Copyright (c) 1999 Johannes Erdfelt <[email protected]>
* Copyright (c) 2000 Vojtech Pavlik <[email protected]>
* Copyright (c) 2004 Oliver Neukum <[email protected]>
* Copyright (c) 2005 David Kubicek <[email protected]>
*
* USB Abstract Control Model driver for USB modems and ISDN adapters
*
* Sponsored by SuSE
*
* ChangeLog:
* v0.9 - thorough cleaning, URBification, almost a rewrite
* v0.10 - some more cleanups
* v0.11 - fixed flow control, read error doesn't stop reads
* v0.12 - added TIOCM ioctls, added break handling, made struct acm
* kmalloced
* v0.13 - added termios, added hangup
* v0.14 - sized down struct acm
* v0.15 - fixed flow control again - characters could be lost
* v0.16 - added code for modems with swapped data and control interfaces
* v0.17 - added new style probing
* v0.18 - fixed new style probing for devices with more configurations
* v0.19 - fixed CLOCAL handling (thanks to Richard Shih-Ping Chan)
* v0.20 - switched to probing on interface (rather than device) class
* v0.21 - revert to probing on device for devices with multiple configs
* v0.22 - probe only the control interface. if usbcore doesn't choose the
* config we want, sysadmin changes bConfigurationValue in sysfs.
* v0.23 - use softirq for rx processing, as needed by tty layer
* v0.24 - change probe method to evaluate CDC union descriptor
* v0.25 - downstream tasks paralelized to maximize throughput
* v0.26 - multiple write urbs, writesize increased
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
#undef VERBOSE_DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/list.h>
#include "cdc-acm.h"
#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */
/*
* Version Information
*/
#define DRIVER_VERSION "v0.26"
#define DRIVER_AUTHOR "Armin Fuerst, Pavel Machek, Johannes Erdfelt, Vojtech Pavlik, David Kubicek"
#define DRIVER_DESC "USB Abstract Control Model driver for USB modems and ISDN adapters"
static struct usb_driver acm_driver;
static struct tty_driver *acm_tty_driver;
static struct acm *acm_table[ACM_TTY_MINORS];
static DEFINE_MUTEX(open_mutex);
#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
static const struct tty_port_operations acm_port_ops = {
};
#ifdef VERBOSE_DEBUG
#define verbose 1
#else
#define verbose 0
#endif
/*
* Functions for ACM control messages.
*/
static int acm_ctrl_msg(struct acm *acm, int request, int value,
void *buf, int len)
{
int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
acm->control->altsetting[0].desc.bInterfaceNumber,
buf, len, 5000);
dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d",
request, value, len, retval);
return retval < 0 ? retval : 0;
}
/* devices aren't required to support these requests.
* the cdc acm descriptor tells whether they do...
*/
#define acm_set_control(acm, control) \
acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0)
#define acm_set_line(acm, line) \
acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line))
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
*/
static int acm_wb_alloc(struct acm *acm)
{
int i, wbn;
struct acm_wb *wb;
wbn = 0;
i = 0;
for (;;) {
wb = &acm->wb[wbn];
if (!wb->use) {
wb->use = 1;
return wbn;
}
wbn = (wbn + 1) % ACM_NW;
if (++i >= ACM_NW)
return -1;
}
}
static int acm_wb_is_avail(struct acm *acm)
{
int i, n;
unsigned long flags;
n = ACM_NW;
spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++)
n -= acm->wb[i].use;
spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
/*
* Finish write. Caller must hold acm->write_lock
*/
static void acm_write_done(struct acm *acm, struct acm_wb *wb)
{
wb->use = 0;
acm->transmitting--;
}
/*
* Poke write.
*
* the caller is responsible for locking
*/
static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
{
int rc;
acm->transmitting++;
wb->urb->transfer_buffer = wb->buf;
wb->urb->transfer_dma = wb->dmah;
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
dbg("usb_submit_urb(write bulk) failed: %d", rc);
acm_write_done(acm, wb);
}
return rc;
}
static int acm_write_start(struct acm *acm, int wbn)
{
unsigned long flags;
struct acm_wb *wb = &acm->wb[wbn];
int rc;
spin_lock_irqsave(&acm->write_lock, flags);
if (!acm->dev) {
wb->use = 0;
spin_unlock_irqrestore(&acm->write_lock, flags);
return -ENODEV;
}
dbg("%s susp_count: %d", __func__, acm->susp_count);
if (acm->susp_count) {
acm->delayed_wb = wb;
schedule_work(&acm->waker);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
usb_mark_last_busy(acm->dev);
rc = acm_start_wb(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
return rc;
}
/*
* attributes exported through sysfs
*/
static ssize_t show_caps
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct acm *acm = usb_get_intfdata(intf);
return sprintf(buf, "%d", acm->ctrl_caps);
}
static DEVICE_ATTR(bmCapabilities, S_IRUGO, show_caps, NULL);
static ssize_t show_country_codes
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct acm *acm = usb_get_intfdata(intf);
memcpy(buf, acm->country_codes, acm->country_code_size);
return acm->country_code_size;
}
static DEVICE_ATTR(wCountryCodes, S_IRUGO, show_country_codes, NULL);
static ssize_t show_country_rel_date
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct acm *acm = usb_get_intfdata(intf);
return sprintf(buf, "%d", acm->country_rel_date);
}
static DEVICE_ATTR(iCountryCodeRelDate, S_IRUGO, show_country_rel_date, NULL);
/*
* Interrupt handlers for various ACM device responses
*/
/* control interface reports status changes with "interrupt" transfers */
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
struct usb_cdc_notification *dr = urb->transfer_buffer;
struct tty_struct *tty;
unsigned char *data;
int newctrl;
int retval;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d", __func__, status);
goto exit;
}
if (!ACM_READY(acm))
goto exit;
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
dbg("%s network", dr->wValue ?
"connected to" : "disconnected from");
break;
case USB_CDC_NOTIFY_SERIAL_STATE:
tty = tty_port_tty_get(&acm->port);
newctrl = get_unaligned_le16(data);
if (tty) {
if (!acm->clocal &&
(acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
dbg("calling hangup");
tty_hangup(tty);
}
tty_kref_put(tty);
}
acm->ctrlin = newctrl;
dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
acm->ctrlin & ACM_CTRL_DCD ? '+' : '-',
acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
acm->ctrlin & ACM_CTRL_BRK ? '+' : '-',
acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-',
acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
break;
default:
dbg("unknown notification %d received: index %d len %d data0 %d data1 %d",
dr->bNotificationType, dr->wIndex,
dr->wLength, data[0], data[1]);
break;
}
exit:
usb_mark_last_busy(acm->dev);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
"result %d", __func__, retval);
}
/* data interface returns incoming bytes, or we got unthrottled */
static void acm_read_bulk(struct urb *urb)
{
struct acm_rb *buf;
struct acm_ru *rcv = urb->context;
struct acm *acm = rcv->instance;
int status = urb->status;
dbg("Entering acm_read_bulk with status %d", status);
if (!ACM_READY(acm)) {
dev_dbg(&acm->data->dev, "Aborting, acm not ready");
return;
}
usb_mark_last_busy(acm->dev);
if (status)
dev_dbg(&acm->data->dev, "bulk rx status %d\n", status);
buf = rcv->buffer;
buf->size = urb->actual_length;
if (likely(status == 0)) {
spin_lock(&acm->read_lock);
acm->processing++;
list_add_tail(&rcv->list, &acm->spare_read_urbs);
list_add_tail(&buf->list, &acm->filled_read_bufs);
spin_unlock(&acm->read_lock);
} else {
/* we drop the buffer due to an error */
spin_lock(&acm->read_lock);
list_add_tail(&rcv->list, &acm->spare_read_urbs);
list_add(&buf->list, &acm->spare_read_bufs);
spin_unlock(&acm->read_lock);
/* nevertheless the tasklet must be kicked unconditionally
so the queue cannot dry up */
}
if (likely(!acm->susp_count))
tasklet_schedule(&acm->urb_task);
}
static void acm_rx_tasklet(unsigned long _acm)
{
struct acm *acm = (void *)_acm;
struct acm_rb *buf;
struct tty_struct *tty;
struct acm_ru *rcv;
unsigned long flags;
unsigned char throttled;
dbg("Entering acm_rx_tasklet");
if (!ACM_READY(acm)) {
dbg("acm_rx_tasklet: ACM not ready");
return;
}
spin_lock_irqsave(&acm->throttle_lock, flags);
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
if (throttled) {
dbg("acm_rx_tasklet: throttled");
return;
}
tty = tty_port_tty_get(&acm->port);
next_buffer:
spin_lock_irqsave(&acm->read_lock, flags);
if (list_empty(&acm->filled_read_bufs)) {
spin_unlock_irqrestore(&acm->read_lock, flags);
goto urbs;
}
buf = list_entry(acm->filled_read_bufs.next,
struct acm_rb, list);
list_del(&buf->list);
spin_unlock_irqrestore(&acm->read_lock, flags);
dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);
if (tty) {
spin_lock_irqsave(&acm->throttle_lock, flags);
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
if (!throttled) {
tty_buffer_request_room(tty, buf->size);
tty_insert_flip_string(tty, buf->base, buf->size);
tty_flip_buffer_push(tty);
} else {
tty_kref_put(tty);
dbg("Throttling noticed");
spin_lock_irqsave(&acm->read_lock, flags);
list_add(&buf->list, &acm->filled_read_bufs);
spin_unlock_irqrestore(&acm->read_lock, flags);
return;
}
}
spin_lock_irqsave(&acm->read_lock, flags);
list_add(&buf->list, &acm->spare_read_bufs);
spin_unlock_irqrestore(&acm->read_lock, flags);
goto next_buffer;
urbs:
tty_kref_put(tty);
while (!list_empty(&acm->spare_read_bufs)) {
spin_lock_irqsave(&acm->read_lock, flags);
if (list_empty(&acm->spare_read_urbs)) {
acm->processing = 0;
spin_unlock_irqrestore(&acm->read_lock, flags);
return;
}
rcv = list_entry(acm->spare_read_urbs.next,
struct acm_ru, list);
list_del(&rcv->list);
spin_unlock_irqrestore(&acm->read_lock, flags);
buf = list_entry(acm->spare_read_bufs.next,
struct acm_rb, list);
list_del(&buf->list);
rcv->buffer = buf;
if (acm->is_int_ep)
usb_fill_int_urb(rcv->urb, acm->dev,
acm->rx_endpoint,
buf->base,
acm->readsize,
acm_read_bulk, rcv, acm->bInterval);
else
usb_fill_bulk_urb(rcv->urb, acm->dev,
acm->rx_endpoint,
buf->base,
acm->readsize,
acm_read_bulk, rcv);
rcv->urb->transfer_dma = buf->dma;
rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* This shouldn't kill the driver as unsuccessful URBs are
returned to the free-urbs-pool and resubmited ASAP */
spin_lock_irqsave(&acm->read_lock, flags);
if (acm->susp_count ||
usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
list_add(&buf->list, &acm->spare_read_bufs);
list_add(&rcv->list, &acm->spare_read_urbs);
acm->processing = 0;
spin_unlock_irqrestore(&acm->read_lock, flags);
return;
} else {
spin_unlock_irqrestore(&acm->read_lock, flags);
dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
}
}
spin_lock_irqsave(&acm->read_lock, flags);
acm->processing = 0;
spin_unlock_irqrestore(&acm->read_lock, flags);
}
/* data interface wrote those outgoing bytes */
static void acm_write_bulk(struct urb *urb)
{
struct acm_wb *wb = urb->context;
struct acm *acm = wb->instance;
unsigned long flags;
if (verbose || urb->status
|| (urb->actual_length != urb->transfer_buffer_length))
dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
urb->actual_length,
urb->transfer_buffer_length,
urb->status);
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
if (ACM_READY(acm))
schedule_work(&acm->work);
else
wake_up_interruptible(&acm->drain_wait);
}
static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
struct tty_struct *tty;
dev_vdbg(&acm->data->dev, "tx work\n");
if (!ACM_READY(acm))
return;
tty = tty_port_tty_get(&acm->port);
tty_wakeup(tty);
tty_kref_put(tty);
}
static void acm_waker(struct work_struct *waker)
{
struct acm *acm = container_of(waker, struct acm, waker);
int rv;
rv = usb_autopm_get_interface(acm->control);
if (rv < 0) {
dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
return;
}
if (acm->delayed_wb) {
acm_start_wb(acm, acm->delayed_wb);
acm->delayed_wb = NULL;
}
usb_autopm_put_interface(acm->control);
}
/*
* TTY handlers
*/
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm;
int rv = -ENODEV;
int i;
dbg("Entering acm_tty_open.");
mutex_lock(&open_mutex);
acm = acm_table[tty->index];
if (!acm || !acm->dev)
goto err_out;
else
rv = 0;
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
tty->driver_data = acm;
tty_port_tty_set(&acm->port, tty);
if (usb_autopm_get_interface(acm->control) < 0)
goto early_bail;
else
acm->control->needs_remote_wakeup = 1;
mutex_lock(&acm->mutex);
if (acm->port.count++) {
usb_autopm_put_interface(acm->control);
goto done;
}
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
dbg("usb_submit_urb(ctrl irq) failed");
goto bail_out;
}
if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
goto full_bailout;
usb_autopm_put_interface(acm->control);
INIT_LIST_HEAD(&acm->spare_read_urbs);
INIT_LIST_HEAD(&acm->spare_read_bufs);
INIT_LIST_HEAD(&acm->filled_read_bufs);
for (i = 0; i < acm->rx_buflimit; i++)
list_add(&(acm->ru[i].list), &acm->spare_read_urbs);
for (i = 0; i < acm->rx_buflimit; i++)
list_add(&(acm->rb[i].list), &acm->spare_read_bufs);
acm->throttle = 0;
tasklet_schedule(&acm->urb_task);
set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp);
done:
mutex_unlock(&acm->mutex);
err_out:
mutex_unlock(&open_mutex);
return rv;
full_bailout:
usb_kill_urb(acm->ctrlurb);
bail_out:
usb_autopm_put_interface(acm->control);
acm->port.count--;
mutex_unlock(&acm->mutex);
early_bail:
mutex_unlock(&open_mutex);
tty_port_tty_set(&acm->port, NULL);
return -EIO;
}
static void acm_tty_unregister(struct acm *acm)
{
int i, nr;
nr = acm->rx_buflimit;
tty_unregister_device(acm_tty_driver, acm->minor);
usb_put_intf(acm->control);
acm_table[acm->minor] = NULL;
usb_free_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
for (i = 0; i < nr; i++)
usb_free_urb(acm->ru[i].urb);
kfree(acm->country_codes);
kfree(acm);
}
static int acm_tty_chars_in_buffer(struct tty_struct *tty);
static void acm_port_down(struct acm *acm, int drain)
{
int i, nr = acm->rx_buflimit;
mutex_lock(&open_mutex);
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
/* try letting the last writes drain naturally */
if (drain) {
wait_event_interruptible_timeout(acm->drain_wait,
(ACM_NW == acm_wb_is_avail(acm)) || !acm->dev,
ACM_CLOSE_TIMEOUT * HZ);
}
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
for (i = 0; i < nr; i++)
usb_kill_urb(acm->ru[i].urb);
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
mutex_unlock(&open_mutex);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
tty_port_hangup(&acm->port);
acm_port_down(acm, 0);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
/* Perform the closing process and see if we need to do the hardware
shutdown */
if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
return;
acm_port_down(acm, 0);
tty_port_close_end(&acm->port, tty);
mutex_lock(&open_mutex);
tty_port_tty_set(&acm->port, NULL);
if (!acm->dev)
acm_tty_unregister(acm);
mutex_unlock(&open_mutex);
}
static int acm_tty_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct acm *acm = tty->driver_data;
int stat;
unsigned long flags;
int wbn;
struct acm_wb *wb;
dbg("Entering acm_tty_write to write %d bytes,", count);
if (!ACM_READY(acm))
return -EINVAL;
if (!count)
return 0;
spin_lock_irqsave(&acm->write_lock, flags);
wbn = acm_wb_alloc(acm);
if (wbn < 0) {
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0;
}
wb = &acm->wb[wbn];
count = (count > acm->writesize) ? acm->writesize : count;
dbg("Get %d bytes...", count);
memcpy(wb->buf, buf, count);
wb->len = count;
spin_unlock_irqrestore(&acm->write_lock, flags);
stat = acm_write_start(acm, wbn);
if (stat < 0)
return stat;
return count;
}
static int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
if (!ACM_READY(acm))
return -EINVAL;
/*
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
*/
return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
if (!ACM_READY(acm))
return 0;
/*
* This is inaccurate (overcounts), but it works.
*/
return (ACM_NW - acm_wb_is_avail(acm)) * acm->writesize;
}
static void acm_tty_throttle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
if (!ACM_READY(acm))
return;
spin_lock_bh(&acm->throttle_lock);
acm->throttle = 1;
spin_unlock_bh(&acm->throttle_lock);
}
static void acm_tty_unthrottle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
if (!ACM_READY(acm))
return;
spin_lock_bh(&acm->throttle_lock);
acm->throttle = 0;
spin_unlock_bh(&acm->throttle_lock);
tasklet_schedule(&acm->urb_task);
}
static int acm_tty_break_ctl(struct tty_struct *tty, int state)
{
struct acm *acm = tty->driver_data;
int retval;
if (!ACM_READY(acm))
return -EINVAL;
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
dbg("send break failed");
return retval;
}
static int acm_tty_tiocmget(struct tty_struct *tty, struct file *file)
{
struct acm *acm = tty->driver_data;
if (!ACM_READY(acm))
return -EINVAL;
return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
(acm->ctrlin & ACM_CTRL_RI ? TIOCM_RI : 0) |
(acm->ctrlin & ACM_CTRL_DCD ? TIOCM_CD : 0) |
TIOCM_CTS;
}
static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
struct acm *acm = tty->driver_data;
unsigned int newctrl;
if (!ACM_READY(acm))
return -EINVAL;
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
newctrl = (newctrl & ~clear) | set;
if (acm->ctrlout == newctrl)
return 0;
return acm_set_control(acm, acm->ctrlout = newctrl);
}
static int acm_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct acm *acm = tty->driver_data;
if (!ACM_READY(acm))
return -EINVAL;
return -ENOIOCTLCMD;
}
static const __u32 acm_tty_speed[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600,
1200, 1800, 2400, 4800, 9600, 19200, 38400,
57600, 115200, 230400, 460800, 500000, 576000,
921600, 1000000, 1152000, 1500000, 2000000,
2500000, 3000000, 3500000, 4000000
};
static const __u8 acm_tty_size[] = {
5, 6, 7, 8
};
static void acm_tty_set_termios(struct tty_struct *tty,
struct ktermios *termios_old)
{
struct acm *acm = tty->driver_data;
struct ktermios *termios = tty->termios;
struct usb_cdc_line_coding newline;
int newctrl = acm->ctrlout;
if (!ACM_READY(acm))
return;
newline.dwDTERate = cpu_to_le32(tty_get_baud_rate(tty));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
(termios->c_cflag & PARODD ? 1 : 2) +
(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
/* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
if (!newline.dwDTERate) {
newline.dwDTERate = acm->line.dwDTERate;
newctrl &= ~ACM_CTRL_DTR;
} else
newctrl |= ACM_CTRL_DTR;
if (newctrl != acm->ctrlout)
acm_set_control(acm, acm->ctrlout = newctrl);
if (memcmp(&acm->line, &newline, sizeof newline)) {
memcpy(&acm->line, &newline, sizeof newline);
dbg("set line: %d %d %d %d", le32_to_cpu(newline.dwDTERate),
newline.bCharFormat, newline.bParityType,
newline.bDataBits);
acm_set_line(acm, &acm->line);
}
}
/*
* USB probe and disconnect routines.
*/
/* Little helpers: write/read buffers free */
static void acm_write_buffers_free(struct acm *acm)
{
int i;
struct acm_wb *wb;
struct usb_device *usb_dev = interface_to_usbdev(acm->control);
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
}
static void acm_read_buffers_free(struct acm *acm)
{
struct usb_device *usb_dev = interface_to_usbdev(acm->control);
int i, n = acm->rx_buflimit;
for (i = 0; i < n; i++)
usb_buffer_free(usb_dev, acm->readsize,
acm->rb[i].base, acm->rb[i].dma);
}
/* Little helper: write buffers allocate */
static int acm_write_buffers_alloc(struct acm *acm)
{
int i;
struct acm_wb *wb;
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
wb->buf = usb_buffer_alloc(acm->dev, acm->writesize, GFP_KERNEL,
&wb->dmah);
if (!wb->buf) {
while (i != 0) {
--i;
--wb;
usb_buffer_free(acm->dev, acm->writesize,
wb->buf, wb->dmah);
}
return -ENOMEM;
}
}
return 0;
}
static int acm_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_cdc_union_desc *union_header = NULL;
struct usb_cdc_country_functional_desc *cfd = NULL;
unsigned char *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
struct usb_interface *control_interface;
struct usb_interface *data_interface;
struct usb_endpoint_descriptor *epctrl = NULL;
struct usb_endpoint_descriptor *epread = NULL;
struct usb_endpoint_descriptor *epwrite = NULL;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct acm *acm;
int minor;
int ctrlsize, readsize;
u8 *buf;
u8 ac_management_function = 0;
u8 call_management_function = 0;
int call_interface_num = -1;
int data_interface_num;
unsigned long quirks;
int num_rx_buf;
int i;
int combined_interfaces = 0;
/* normal quirks */
quirks = (unsigned long)id->driver_info;
num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
/* handle quirks deadly to normal probing*/
if (quirks == NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
goto skip_normal_probe;
}
/* normal probing*/
if (!buffer) {
dev_err(&intf->dev, "Weird descriptor references\n");
return -EINVAL;
}
if (!buflen) {
if (intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
dev_dbg(&intf->dev,
"Seeking extra descriptors on endpoint\n");
buflen = intf->cur_altsetting->endpoint->extralen;
buffer = intf->cur_altsetting->endpoint->extra;
} else {
dev_err(&intf->dev,
"Zero length descriptor references\n");
return -EINVAL;
}
}
while (buflen > 0) {
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (union_header) {
dev_err(&intf->dev, "More than one "
"union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
cfd = (struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE: /* maybe check version */
break; /* for now we ignore it */
case USB_CDC_ACM_TYPE:
ac_management_function = buffer[3];
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
call_management_function = buffer[3];
call_interface_num = buffer[4];
if ((call_management_function & 3) != 3)
dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
break;
default:
/* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: "
"type %02x, length %d\n",
buffer[2], buffer[0]);
break;
}
next_desc:
buflen -= buffer[0];
buffer += buffer[0];
}
if (!union_header) {
if (call_interface_num > 0) {
dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
control_interface = intf;
} else {
if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
dev_dbg(&intf->dev,"No union descriptor, giving up\n");
return -ENODEV;
} else {
dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n");
combined_interfaces = 1;
control_interface = data_interface = intf;
goto look_for_collapsed_interface;
}
}
} else {
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
if (!control_interface || !data_interface) {
dev_dbg(&intf->dev, "no interfaces\n");
return -ENODEV;
}
}
if (data_interface_num != call_interface_num)
dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
if (control_interface == data_interface) {
/* some broken devices designed for windows work this way */
dev_warn(&intf->dev,"Control and data interfaces are not separated!\n");
combined_interfaces = 1;
/* a popular other OS doesn't use it */
quirks |= NO_CAP_LINE;
if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
return -EINVAL;
}
look_for_collapsed_interface:
for (i = 0; i < 3; i++) {
struct usb_endpoint_descriptor *ep;
ep = &data_interface->cur_altsetting->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep))
epctrl = ep;
else if (usb_endpoint_is_bulk_out(ep))
epwrite = ep;
else if (usb_endpoint_is_bulk_in(ep))
epread = ep;
else
return -EINVAL;
}
if (!epctrl || !epread || !epwrite)
return -ENODEV;
else
goto made_compressed_probe;
}
skip_normal_probe:
/*workaround for switched interfaces */
if (data_interface->cur_altsetting->desc.bInterfaceClass
!= CDC_DATA_INTERFACE_TYPE) {
if (control_interface->cur_altsetting->desc.bInterfaceClass
== CDC_DATA_INTERFACE_TYPE) {
struct usb_interface *t;
dev_dbg(&intf->dev,
"Your device has switched interfaces.\n");
t = control_interface;
control_interface = data_interface;
data_interface = t;
} else {
return -EINVAL;
}
}
/* Accept probe requests only for the control interface */
if (!combined_interfaces && intf != control_interface)
return -ENODEV;
if (!combined_interfaces && usb_interface_claimed(data_interface)) {
/* valid in this context */
dev_dbg(&intf->dev, "The data interface isn't available\n");
return -EBUSY;
}
if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
return -EINVAL;
epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
epread = &data_interface->cur_altsetting->endpoint[0].desc;
epwrite = &data_interface->cur_altsetting->endpoint[1].desc;
/* workaround for switched endpoints */
if (!usb_endpoint_dir_in(epread)) {
/* descriptors are swapped */
struct usb_endpoint_descriptor *t;
dev_dbg(&intf->dev,
"The data interface has switched endpoints\n");
t = epread;
epread = epwrite;
epwrite = t;
}
made_compressed_probe:
dbg("interfaces are valid");
for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
if (minor == ACM_TTY_MINORS) {
dev_err(&intf->dev, "no more free acm devices\n");
return -ENODEV;
}
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
if (acm == NULL) {
dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n");
goto alloc_fail;
}
ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
readsize = le16_to_cpu(epread->wMaxPacketSize) *
(quirks == SINGLE_RX_URB ? 1 : 2);
acm->combined_interfaces = combined_interfaces;
acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
acm->control = control_interface;
acm->data = data_interface;
acm->minor = minor;
acm->dev = usb_dev;
acm->ctrl_caps = ac_management_function;
if (quirks & NO_CAP_LINE)
acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
INIT_WORK(&acm->waker, acm_waker);
init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf) {
dev_dbg(&intf->dev, "out of memory (ctrl buffer alloc)\n");
goto alloc_fail2;
}
acm->ctrl_buffer = buf;
if (acm_write_buffers_alloc(acm) < 0) {
dev_dbg(&intf->dev, "out of memory (write buffer alloc)\n");
goto alloc_fail4;
}
acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
if (!acm->ctrlurb) {
dev_dbg(&intf->dev, "out of memory (ctrlurb kmalloc)\n");
goto alloc_fail5;
}
for (i = 0; i < num_rx_buf; i++) {
struct acm_ru *rcv = &(acm->ru[i]);
rcv->urb = usb_alloc_urb(0, GFP_KERNEL);
if (rcv->urb == NULL) {
dev_dbg(&intf->dev,
"out of memory (read urbs usb_alloc_urb)\n");
goto alloc_fail7;
}
rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
rcv->instance = acm;
}
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->rb[i]);
rb->base = usb_buffer_alloc(acm->dev, readsize,
GFP_KERNEL, &rb->dma);
if (!rb->base) {
dev_dbg(&intf->dev,
"out of memory (read bufs usb_buffer_alloc)\n");
goto alloc_fail7;
}
}
for (i = 0; i < ACM_NW; i++) {
struct acm_wb *snd = &(acm->wb[i]);
snd->urb = usb_alloc_urb(0, GFP_KERNEL);
if (snd->urb == NULL) {
dev_dbg(&intf->dev,
"out of memory (write urbs usb_alloc_urb)");
goto alloc_fail7;
}
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev,
usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
else
usb_fill_bulk_urb(snd->urb, usb_dev,
usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
snd->instance = acm;
}
usb_set_intfdata(intf, acm);
i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
if (i < 0)
goto alloc_fail8;
if (cfd) { /* export the country data */
acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL);
if (!acm->country_codes)
goto skip_countries;
acm->country_code_size = cfd->bLength - 4;
memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0,
cfd->bLength - 4);
acm->country_rel_date = cfd->iCountryCodeRelDate;
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
if (i < 0) {
kfree(acm->country_codes);
goto skip_countries;
}
i = device_create_file(&intf->dev,
&dev_attr_iCountryCodeRelDate);
if (i < 0) {
kfree(acm->country_codes);
goto skip_countries;
}
}
skip_countries:
usb_fill_int_urb(acm->ctrlurb, usb_dev,
usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
/* works around buggy devices */
epctrl->bInterval ? epctrl->bInterval : 0xff);
acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
acm->ctrlurb->transfer_dma = acm->ctrl_dma;
dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
acm_set_control(acm, acm->ctrlout);
acm->line.dwDTERate = cpu_to_le32(9600);
acm->line.bDataBits = 8;
acm_set_line(acm, &acm->line);
usb_driver_claim_interface(&acm_driver, data_interface, acm);
usb_set_intfdata(data_interface, acm);
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
acm_table[minor] = acm;
return 0;
alloc_fail8:
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
alloc_fail7:
acm_read_buffers_free(acm);
for (i = 0; i < num_rx_buf; i++)
usb_free_urb(acm->ru[i].urb);
usb_free_urb(acm->ctrlurb);
alloc_fail5:
acm_write_buffers_free(acm);
alloc_fail4:
usb_buffer_free(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
kfree(acm);
alloc_fail:
return -ENOMEM;
}
static void stop_data_traffic(struct acm *acm)
{
int i;
dbg("Entering stop_data_traffic");
tasklet_disable(&acm->urb_task);
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->ru[i].urb);
tasklet_enable(&acm->urb_task);
cancel_work_sync(&acm->work);
cancel_work_sync(&acm->waker);
}
static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct tty_struct *tty;
/* sibling interface is already cleaning up */
if (!acm)
return;
mutex_lock(&open_mutex);
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
acm->dev = NULL;
usb_set_intfdata(acm->control, NULL);
usb_set_intfdata(acm->data, NULL);
stop_data_traffic(acm);
acm_write_buffers_free(acm);
usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
if (acm->port.count == 0) {
acm_tty_unregister(acm);
mutex_unlock(&open_mutex);
return;
}
mutex_unlock(&open_mutex);
tty = tty_port_tty_get(&acm->port);
if (tty) {
tty_hangup(tty);
tty_kref_put(tty);
}
}
#ifdef CONFIG_PM
static int acm_suspend(struct usb_interface *intf, pm_message_t message)
{
struct acm *acm = usb_get_intfdata(intf);
int cnt;
if (message.event & PM_EVENT_AUTO) {
int b;
spin_lock_irq(&acm->read_lock);
spin_lock(&acm->write_lock);
b = acm->processing + acm->transmitting;
spin_unlock(&acm->write_lock);
spin_unlock_irq(&acm->read_lock);
if (b)
return -EBUSY;
}
spin_lock_irq(&acm->read_lock);
spin_lock(&acm->write_lock);
cnt = acm->susp_count++;
spin_unlock(&acm->write_lock);
spin_unlock_irq(&acm->read_lock);
if (cnt)
return 0;
/*
we treat opened interfaces differently,
we must guard against open
*/
mutex_lock(&acm->mutex);
if (acm->port.count)
stop_data_traffic(acm);
mutex_unlock(&acm->mutex);
return 0;
}
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
int rv = 0;
int cnt;
spin_lock_irq(&acm->read_lock);
acm->susp_count -= 1;
cnt = acm->susp_count;
spin_unlock_irq(&acm->read_lock);
if (cnt)
return 0;
mutex_lock(&acm->mutex);
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
if (rv < 0)
goto err_out;
tasklet_schedule(&acm->urb_task);
}
err_out:
mutex_unlock(&acm->mutex);
return rv;
}
#endif /* CONFIG_PM */
/*
* USB driver structure.
*/
static struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; [email protected] */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0482, 0x0203), /* KYOCERA AH-K3001V */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x079b, 0x000f), /* BT On-Air USB MODEM */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0ace, 0x1602), /* ZyDAS 56K USB MODEM */
.driver_info = SINGLE_RX_URB,
},
{ USB_DEVICE(0x0ace, 0x1608), /* ZyDAS 56K USB MODEM */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
{ USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
communications interface.
Maybe we should define a new
quirk for this. */
},
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_PCCA101) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_PCCA101_WAKE) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_GSM) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_3G) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
/* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
{ }
};
MODULE_DEVICE_TABLE(usb, acm_ids);
static struct usb_driver acm_driver = {
.name = "cdc_acm",
.probe = acm_probe,
.disconnect = acm_disconnect,
#ifdef CONFIG_PM
.suspend = acm_suspend,
.resume = acm_resume,
#endif
.id_table = acm_ids,
#ifdef CONFIG_PM
.supports_autosuspend = 1,
#endif
};
/*
* TTY driver structures.
*/
static const struct tty_operations acm_ops = {
.open = acm_tty_open,
.close = acm_tty_close,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
.ioctl = acm_tty_ioctl,
.throttle = acm_tty_throttle,
.unthrottle = acm_tty_unthrottle,
.chars_in_buffer = acm_tty_chars_in_buffer,
.break_ctl = acm_tty_break_ctl,
.set_termios = acm_tty_set_termios,
.tiocmget = acm_tty_tiocmget,
.tiocmset = acm_tty_tiocmset,
};
/*
* Init / exit.
*/
static int __init acm_init(void)
{
int retval;
acm_tty_driver = alloc_tty_driver(ACM_TTY_MINORS);
if (!acm_tty_driver)
return -ENOMEM;
acm_tty_driver->owner = THIS_MODULE,
acm_tty_driver->driver_name = "acm",
acm_tty_driver->name = "ttyACM",
acm_tty_driver->major = ACM_TTY_MAJOR,
acm_tty_driver->minor_start = 0,
acm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL,
acm_tty_driver->subtype = SERIAL_TYPE_NORMAL,
acm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
acm_tty_driver->init_termios = tty_std_termios;
acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
HUPCL | CLOCAL;
tty_set_operations(acm_tty_driver, &acm_ops);
retval = tty_register_driver(acm_tty_driver);
if (retval) {
put_tty_driver(acm_tty_driver);
return retval;
}
retval = usb_register(&acm_driver);
if (retval) {
tty_unregister_driver(acm_tty_driver);
put_tty_driver(acm_tty_driver);
return retval;
}
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
}
static void __exit acm_exit(void)
{
usb_deregister(&acm_driver);
tty_unregister_driver(acm_tty_driver);
put_tty_driver(acm_tty_driver);
}
module_init(acm_init);
module_exit(acm_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR);
| gpl-2.0 |
agat63/N861_ZTE_kernel | drivers/hwmon/akm8962.c | 33334 | /* drivers/misc/akm8962.c - akm8962 compass driver
*
* Copyright (C) 2007-2008 HTC Corporation.
* Author: Hou-Kun Chen <[email protected]>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <linux/gpio.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/freezer.h>
#include <linux/akm8962_new.h>
#define AKM8962_DEBUG_IF 0
#define AKM8962_DEBUG_DATA 0
#if AKM8962_DEBUG_DATA
#define AKM_DATA(dev, ...) \
dev_dbg((dev), ##__VA_ARGS__)
#else
#define AKM_DATA(dev, format, ...)
#endif
#define AKM_ACCEL_ITEMS 3
/* Wait timeout in millisecond */
#define AKM8962_DRDY_TIMEOUT 100
struct akm8962_data {
struct i2c_client *i2c;
struct input_dev *input;
struct device *class_dev;
struct class *compass;
struct delayed_work work;
wait_queue_head_t drdy_wq;
wait_queue_head_t open_wq;
struct mutex sensor_mutex;
int8_t sense_data[SENSOR_DATA_SIZE];
struct mutex accel_mutex;
int16_t accel_data[AKM_ACCEL_ITEMS];
struct mutex val_mutex;
uint32_t enable_flag;
int64_t delay[AKM_NUM_SENSORS];
atomic_t active;
atomic_t is_busy;
atomic_t drdy;
atomic_t suspend;
char layout;
int irq;
};
static struct akm8962_data *s_akm;
/***** I2C I/O function ***********************************************/
static int akm8962_i2c_rxdata(
struct i2c_client *i2c,
unsigned char *rxData,
int length)
{
struct i2c_msg msgs[] = {
{
.addr = i2c->addr,
.flags = 0,
.len = 1,
.buf = rxData,
},
{
.addr = i2c->addr,
.flags = I2C_M_RD,
.len = length,
.buf = rxData,
}, };
#if AKM8962_DEBUG_DATA
unsigned char addr = rxData[0];
#endif
if (i2c_transfer(i2c->adapter, msgs, 2) < 0) {
dev_err(&i2c->dev, "%s: transfer failed.", __func__);
return -EIO;
}
AKM_DATA(&i2c->dev, "RxData: len=%02x, addr=%02x, data=%02x",
length, addr, rxData[0]);
return 0;
}
static int akm8962_i2c_txdata(
struct i2c_client *i2c,
unsigned char *txData,
int length)
{
struct i2c_msg msg[] = {
{
.addr = i2c->addr,
.flags = 0,
.len = length,
.buf = txData,
}, };
if (i2c_transfer(i2c->adapter, msg, 1) < 0) {
dev_err(&i2c->dev, "%s: transfer failed.", __func__);
return -EIO;
}
AKM_DATA(&i2c->dev, "TxData: len=%02x, addr=%02x data=%02x",
length, txData[0], txData[1]);
return 0;
}
static int akm8962_i2c_check_device(
struct i2c_client *client)
{
unsigned char buffer[2];
int err;
/* Set measure mode */
buffer[0] = AK8962_REG_WIA;
err = akm8962_i2c_rxdata(client, buffer, 1);
if (err < 0) {
dev_err(&client->dev,
"%s: Can not read WIA.", __func__);
return err;
}
/* Check read data */
if (buffer[0] != 0x48) {
dev_err(&client->dev,
"%s: The device is not AK8962.", __func__);
return -ENXIO;
}
return err;
}
/***** akm miscdevice functions *************************************/
static int AKECS_Open(struct inode *inode, struct file *file);
static int AKECS_Release(struct inode *inode, struct file *file);
static long AKECS_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static struct file_operations AKECS_fops = {
.owner = THIS_MODULE,
.open = AKECS_Open,
.release = AKECS_Release,
.unlocked_ioctl = AKECS_ioctl,
};
static struct miscdevice akm8962_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "akm8962_dev",
.fops = &AKECS_fops,
};
static int AKECS_Set_CNTL(
struct akm8962_data *akm,
unsigned char mode)
{
unsigned char buffer[2];
int err;
/* Busy check */
if (atomic_cmpxchg(&akm->is_busy, 0, 1) != 0) {
dev_err(&akm->i2c->dev, "%s: device is busy.", __func__);
return -EBUSY;
}
/* Set flag */
atomic_set(&akm->drdy, 0);
/* Set measure mode */
buffer[0] = AK8962_REG_CNTL;
buffer[1] = mode;
err = akm8962_i2c_txdata(akm->i2c, buffer, 2);
if (err < 0) {
dev_err(&akm->i2c->dev, "%s: Can not set CNTL.", __func__);
atomic_set(&akm->is_busy, 0);
} else {
AKM_DATA(&akm->i2c->dev, "Mode is set to (%d).", mode);
}
return err;
}
static int AKECS_Set_PowerDown(
struct akm8962_data *akm)
{
unsigned char buffer[2];
int err;
/* Set measure mode */
buffer[0] = AK8962_REG_CNTL;
buffer[1] = AK8962_MODE_POWERDOWN;
err = akm8962_i2c_txdata(akm->i2c, buffer, 2);
if (err < 0) {
dev_err(&akm->i2c->dev,
"%s: Can not set to measurement mode.", __func__);
atomic_set(&akm->is_busy, 0);
} else {
AKM_DATA(&akm->i2c->dev, "Powerdown mode is set.");
}
/* Set to initial status. */
atomic_set(&akm->is_busy, 0);
atomic_set(&akm->drdy, 0);
return err;
}
static int AKECS_SetMode(
struct akm8962_data *akm,
unsigned char mode)
{
int err;
switch (mode) {
case AK8962_MODE_SNG_MEASURE:
case AK8962_MODE_SELF_TEST:
err = AKECS_Set_CNTL(akm, mode);
if ((err >= 0) && (akm->irq == 0)) {
schedule_delayed_work(
&akm->work,
usecs_to_jiffies(AK8962_MEASUREMENT_TIME_US));
}
break;
case AK8962_MODE_FUSE_ACCESS:
err = AKECS_Set_CNTL(akm, mode);
break;
case AK8962_MODE_POWERDOWN:
err = AKECS_Set_PowerDown(akm);
break;
default:
dev_err(&akm->i2c->dev,
"%s: Unknown mode(%d).", __func__, mode);
return -EINVAL;
}
/* wait at least 100us after changing mode */
udelay(100);
return err;
}
/* This function will block a process until the latest measurement
* data is available.
*/
static int AKECS_GetData(
struct akm8962_data *akm,
char *rbuf,
int size)
{
int err;
err = wait_event_interruptible_timeout(
akm->drdy_wq,
atomic_read(&akm->drdy),
AKM8962_DRDY_TIMEOUT);
if (err < 0) {
dev_dbg(&akm->i2c->dev,
"%s: wait_event failed (%d).", __func__, err);
return -1;
}
if (!atomic_read(&akm->drdy)) {
dev_dbg(&akm->i2c->dev,
"%s: DRDY is not set.", __func__);
return -1;
}
mutex_lock(&akm->sensor_mutex);
memcpy(rbuf, akm->sense_data, size);
atomic_set(&akm->drdy, 0);
mutex_unlock(&akm->sensor_mutex);
return 0;
}
static void AKECS_SetYPR(
struct akm8962_data *akm,
int *rbuf)
{
uint32_t ready;
AKM_DATA(&akm->i2c->dev, "AKM8962 %s: flag =0x%X", __func__,
rbuf[0]);
AKM_DATA(&akm->input->dev, " Acceleration[LSB]: %6d,%6d,%6d stat=%d",
rbuf[1], rbuf[2], rbuf[3], rbuf[4]);
AKM_DATA(&akm->input->dev, " Geomagnetism[LSB]: %6d,%6d,%6d stat=%d",
rbuf[5], rbuf[6], rbuf[7], rbuf[8]);
AKM_DATA(&akm->input->dev, " Orientation[YPR] : %6d,%6d,%6d",
rbuf[9], rbuf[10], rbuf[11]);
/* No events are reported */
if (!rbuf[0]) {
dev_err(&akm->i2c->dev, "Don't waste a time.");
return;
}
mutex_lock(&akm->val_mutex);
ready = (akm->enable_flag & (uint32_t)rbuf[0]);
mutex_unlock(&akm->val_mutex);
/* Report acceleration sensor information */
/*
if (ready & ACC_DATA_READY) {
input_report_abs(akm->input, ABS_X, rbuf[1]);
input_report_abs(akm->input, ABS_Y, rbuf[2]);
input_report_abs(akm->input, ABS_Z, rbuf[3]);
input_report_abs(akm->input, ABS_THROTTLE, rbuf[4]);
}
*/
/* Report magnetic vector information */
if (ready & MAG_DATA_READY) {
input_report_abs(akm->input, ABS_RX, rbuf[5]);
input_report_abs(akm->input, ABS_RY, rbuf[6]);
input_report_abs(akm->input, ABS_RZ, rbuf[7]);
input_report_abs(akm->input, ABS_RUDDER, rbuf[8]);
}
/* Report orientation sensor information */
if (ready & ORI_DATA_READY) {
input_report_abs(akm->input, ABS_HAT0X, rbuf[9]);
input_report_abs(akm->input, ABS_HAT0Y, rbuf[10]);
input_report_abs(akm->input, ABS_HAT1X, rbuf[11]);
input_report_abs(akm->input, ABS_HAT1Y, rbuf[8]);// here is a bug
}
input_sync(akm->input);
}
static int AKECS_GetOpenStatus(
struct akm8962_data *akm)
{
return wait_event_interruptible(
akm->open_wq, (atomic_read(&akm->active) != 0));
}
static int AKECS_GetCloseStatus(
struct akm8962_data *akm)
{
return wait_event_interruptible(
akm->open_wq, (atomic_read(&akm->active) <= 0));
}
static int AKECS_Open(struct inode *inode, struct file *file)
{
file->private_data = s_akm;
return nonseekable_open(inode, file);
}
static int AKECS_Release(struct inode *inode, struct file *file)
{
return 0;
}
static long
AKECS_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct akm8962_data *akm = file->private_data;
/* NOTE: In this function the size of "char" should be 1-byte. */
char i2c_buf[RWBUF_SIZE]; /* for READ/WRITE */
int8_t sensor_buf[SENSOR_DATA_SIZE];/* for GETDATA */
int32_t ypr_buf[YPR_DATA_SIZE]; /* for SET_YPR */
int16_t acc_buf[3]; /* for GET_ACCEL */
int64_t delay[AKM_NUM_SENSORS]; /* for GET_DELAY */
char mode; /* for SET_MODE*/
char layout; /* for GET_LAYOUT */
int status; /* for OPEN/CLOSE_STATUS */
int ret = -1; /* Return value. */
switch (cmd) {
case ECS_IOCTL_READ:
case ECS_IOCTL_WRITE:
if (argp == NULL) {
dev_err(&akm->i2c->dev, "invalid argument.");
return -EINVAL;
}
if (copy_from_user(&i2c_buf, argp, sizeof(i2c_buf))) {
dev_err(&akm->i2c->dev, "copy_from_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_SET_MODE:
if (argp == NULL) {
dev_err(&akm->i2c->dev, "invalid argument.");
return -EINVAL;
}
if (copy_from_user(&mode, argp, sizeof(mode))) {
dev_err(&akm->i2c->dev, "copy_from_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_SET_YPR:
if (argp == NULL) {
dev_err(&akm->i2c->dev, "invalid argument.");
return -EINVAL;
}
if (copy_from_user(&ypr_buf, argp, sizeof(ypr_buf))) {
dev_err(&akm->i2c->dev, "copy_from_user failed.");
return -EFAULT;
}
case ECS_IOCTL_GETDATA:
case ECS_IOCTL_GET_OPEN_STATUS:
case ECS_IOCTL_GET_CLOSE_STATUS:
case ECS_IOCTL_GET_DELAY:
case ECS_IOCTL_GET_LAYOUT:
case ECS_IOCTL_GET_ACCEL:
/* Just check buffer pointer */
if (argp == NULL) {
dev_err(&akm->i2c->dev, "invalid argument.");
return -EINVAL;
}
break;
break;
default:
break;
}
switch (cmd) {
case ECS_IOCTL_READ:
AKM_DATA(&akm->i2c->dev, "IOCTL_READ called.");
if ((i2c_buf[0] < 1) || (i2c_buf[0] > (RWBUF_SIZE-1))) {
dev_err(&akm->i2c->dev, "invalid argument.");
return -EINVAL;
}
ret = akm8962_i2c_rxdata(akm->i2c, &i2c_buf[1], i2c_buf[0]);
if (ret < 0)
return ret;
break;
case ECS_IOCTL_WRITE:
AKM_DATA(&akm->i2c->dev, "IOCTL_WRITE called.");
if ((i2c_buf[0] < 2) || (i2c_buf[0] > (RWBUF_SIZE-1))) {
dev_err(&akm->i2c->dev, "invalid argument.");
return -EINVAL;
}
ret = akm8962_i2c_txdata(akm->i2c, &i2c_buf[1], i2c_buf[0]);
if (ret < 0)
return ret;
break;
case ECS_IOCTL_SET_MODE:
AKM_DATA(&akm->i2c->dev, "IOCTL_SET_MODE called.");
ret = AKECS_SetMode(akm, mode);
if (ret < 0)
return ret;
break;
case ECS_IOCTL_GETDATA:
AKM_DATA(&akm->i2c->dev, "IOCTL_GETDATA called.");
ret = AKECS_GetData(akm, sensor_buf, SENSOR_DATA_SIZE);
if (ret < 0)
return ret;
break;
case ECS_IOCTL_SET_YPR:
AKM_DATA(&akm->i2c->dev, "IOCTL_SET_YPR called.");
AKECS_SetYPR(akm, ypr_buf);
break;
case ECS_IOCTL_GET_OPEN_STATUS:
AKM_DATA(&akm->i2c->dev, "IOCTL_GET_OPEN_STATUS called.");
ret = AKECS_GetOpenStatus(akm);
if (ret < 0) {
dev_err(&akm->i2c->dev,
"Get Open returns error (%d).", ret);
}
break;
case ECS_IOCTL_GET_CLOSE_STATUS:
AKM_DATA(&akm->i2c->dev, "IOCTL_GET_CLOSE_STATUS called.");
ret = AKECS_GetCloseStatus(akm);
if (ret < 0) {
dev_err(&akm->i2c->dev,
"Get Close returns error (%d).", ret);
}
break;
case ECS_IOCTL_GET_DELAY:
AKM_DATA(&akm->i2c->dev, "IOCTL_GET_DELAY called.");
mutex_lock(&akm->val_mutex);
delay[0] = akm->delay[0];
delay[1] = akm->delay[1];
delay[2] = akm->delay[2];
mutex_unlock(&akm->val_mutex);
break;
case ECS_IOCTL_GET_LAYOUT:
AKM_DATA(&akm->i2c->dev, "IOCTL_GET_LAYOUT called.");
layout = akm->layout;
break;
case ECS_IOCTL_GET_ACCEL:
AKM_DATA(&akm->i2c->dev, "IOCTL_GET_ACCEL called.");
mutex_lock(&akm->accel_mutex);
acc_buf[0] = akm->accel_data[0];
acc_buf[1] = akm->accel_data[1];
acc_buf[2] = akm->accel_data[2];
mutex_unlock(&akm->accel_mutex);
break;
default:
return -ENOTTY;
}
switch (cmd) {
case ECS_IOCTL_READ:
if (copy_to_user(argp, &i2c_buf, i2c_buf[0]+1)) {
dev_err(&akm->i2c->dev, "copy_to_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_GETDATA:
if (copy_to_user(argp, &sensor_buf, sizeof(sensor_buf))) {
dev_err(&akm->i2c->dev, "copy_to_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_GET_OPEN_STATUS:
case ECS_IOCTL_GET_CLOSE_STATUS:
status = atomic_read(&akm->active);
if (copy_to_user(argp, &status, sizeof(status))) {
dev_err(&akm->i2c->dev, "copy_to_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_GET_DELAY:
if (copy_to_user(argp, &delay, sizeof(delay))) {
dev_err(&akm->i2c->dev, "copy_to_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_GET_LAYOUT:
if (copy_to_user(argp, &layout, sizeof(layout))) {
dev_err(&akm->i2c->dev, "copy_to_user failed.");
return -EFAULT;
}
break;
case ECS_IOCTL_GET_ACCEL:
if (copy_to_user(argp, &acc_buf, sizeof(acc_buf))) {
dev_err(&akm->i2c->dev, "copy_to_user failed.");
return -EFAULT;
}
break;
default:
break;
}
return 0;
}
/***** akm sysfs functions ******************************************/
static int create_device_attributes(
struct device *dev,
struct device_attribute *attrs)
{
int i;
int err = 0;
for (i = 0 ; NULL != attrs[i].attr.name ; ++i) {
err = device_create_file(dev, &attrs[i]);
if (0 != err)
break;
}
if (0 != err) {
for (; i >= 0 ; --i)
device_remove_file(dev, &attrs[i]);
}
return err;
}
static void remove_device_attributes(
struct device *dev,
struct device_attribute *attrs)
{
int i;
for (i = 0 ; NULL != attrs[i].attr.name ; ++i)
device_remove_file(dev, &attrs[i]);
}
static int create_device_binary_attributes(
struct kobject *kobj,
struct bin_attribute *attrs)
{
int i;
int err = 0;
err = 0;
for (i = 0 ; NULL != attrs[i].attr.name ; ++i) {
err = sysfs_create_bin_file(kobj, &attrs[i]);
if (0 != err)
break;
}
if (0 != err) {
for (; i >= 0 ; --i)
sysfs_remove_bin_file(kobj, &attrs[i]);
}
return err;
}
static void remove_device_binary_attributes(
struct kobject *kobj,
struct bin_attribute *attrs)
{
int i;
for (i = 0 ; NULL != attrs[i].attr.name ; ++i)
sysfs_remove_bin_file(kobj, &attrs[i]);
}
static bool get_value_as_int(char const *buf, size_t size, int *value)
{
long tmp;
if (size == 0)
return false;
/* maybe text format value */
if ((buf[0] == '0') && (size > 1)) {
if ((buf[1] == 'x') || (buf[1] == 'X')) {
/* hexadecimal format */
if (0 != strict_strtol(buf, 16, &tmp))
return false;
} else {
/* octal format */
if (0 != strict_strtol(buf, 8, &tmp))
return false;
}
} else {
/* decimal format */
if (0 != strict_strtol(buf, 10, &tmp))
return false;
}
if (tmp > INT_MAX)
return false;
*value = tmp;
return true;
}
static bool get_value_as_int64(char const *buf, size_t size, long long *value)
{
long long tmp;
if (size == 0)
return false;
/* maybe text format value */
if ((buf[0] == '0') && (size > 1)) {
if ((buf[1] == 'x') || (buf[1] == 'X')) {
/* hexadecimal format */
if (0 != strict_strtoll(buf, 16, &tmp))
return false;
} else {
/* octal format */
if (0 != strict_strtoll(buf, 8, &tmp))
return false;
}
} else {
/* decimal format */
if (0 != strict_strtoll(buf, 10, &tmp))
return false;
}
if (tmp > LLONG_MAX)
return false;
*value = tmp;
return true;
}
/*********************************************************************
*
* SysFS attribute functions
*
* directory : /sys/class/compass/akm8962/
* files :
* - enable_acc [rw] [t] : enable flag for accelerometer
* - enable_mag [rw] [t] : enable flag for magnetometer
* - enable_ori [rw] [t] : enable flag for orientation
* - delay_acc [rw] [t] : delay in nanosecond for accelerometer
* - delay_mag [rw] [t] : delay in nanosecond for magnetometer
* - delay_ori [rw] [t] : delay in nanosecond for orientation
* - accel [w] [b] : accelerometer data
*
* debug :
* - mode [w] [t] : AK8962's mode
* - bdata [r] [t] : raw data
* - asa [r] [t] : FUSEROM data
*
* [b] = binary format
* [t] = text format
*/
/***** sysfs enable *************************************************/
static void akm8962_sysfs_update_active_status(
struct akm8962_data *akm)
{
uint32_t en;
mutex_lock(&akm->val_mutex);
en = akm->enable_flag;
mutex_unlock(&akm->val_mutex);
if (en == 0) {
if (atomic_cmpxchg(&akm->active, 1, 0) == 1) {
wake_up(&akm->open_wq);
dev_dbg(akm->class_dev, "Deactivated");
}
} else {
if (atomic_cmpxchg(&akm->active, 0, 1) == 0) {
wake_up(&akm->open_wq);
dev_dbg(akm->class_dev, "Activated");
}
}
dev_dbg(&akm->i2c->dev,
"Status updated: enable=0x%X, active=%d",
en, atomic_read(&akm->active));
}
static ssize_t akm8962_sysfs_enable_show(
struct akm8962_data *akm, char *buf, int pos)
{
int flag;
mutex_lock(&akm->val_mutex);
flag = ((akm->enable_flag >> pos) & 1);
mutex_unlock(&akm->val_mutex);
return sprintf(buf, "%d\n", flag);
}
static ssize_t akm8962_sysfs_enable_store(
struct akm8962_data *akm, char const *buf, size_t count, int pos)
{
int en = 0;
if (NULL == buf)
return -EINVAL;
if (0 == count)
return 0;
if (false == get_value_as_int(buf, count, &en))
return -EINVAL;
en = en ? 1 : 0;
mutex_lock(&akm->val_mutex);
akm->enable_flag &= ~(1<<pos);
akm->enable_flag |= ((uint32_t)(en))<<pos;
mutex_unlock(&akm->val_mutex);
akm8962_sysfs_update_active_status(akm);
return count;
}
/***** Acceleration ***/
static ssize_t akm8962_enable_acc_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
return akm8962_sysfs_enable_show(
dev_get_drvdata(dev), buf, ACC_DATA_FLAG);
}
static ssize_t akm8962_enable_acc_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
return akm8962_sysfs_enable_store(
dev_get_drvdata(dev), buf, count, ACC_DATA_FLAG);
}
/***** Magnetic field ***/
static ssize_t akm8962_enable_mag_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
return akm8962_sysfs_enable_show(
dev_get_drvdata(dev), buf, MAG_DATA_FLAG);
}
static ssize_t akm8962_enable_mag_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
return akm8962_sysfs_enable_store(
dev_get_drvdata(dev), buf, count, MAG_DATA_FLAG);
}
/***** Orientation ***/
static ssize_t akm8962_enable_ori_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
return akm8962_sysfs_enable_show(
dev_get_drvdata(dev), buf, ORI_DATA_FLAG);
}
static ssize_t akm8962_enable_ori_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
return akm8962_sysfs_enable_store(
dev_get_drvdata(dev), buf, count, ORI_DATA_FLAG);
}
/***** sysfs delay **************************************************/
static ssize_t akm8962_sysfs_delay_show(
struct akm8962_data *akm, char *buf, int pos)
{
int64_t val;
mutex_lock(&akm->val_mutex);
val = akm->delay[pos];
mutex_unlock(&akm->val_mutex);
return sprintf(buf, "%lld\n", val);
}
static ssize_t akm8962_sysfs_delay_store(
struct akm8962_data *akm, char const *buf, size_t count, int pos)
{
long long val = 0;
if (NULL == buf)
return -EINVAL;
if (0 == count)
return 0;
if (false == get_value_as_int64(buf, count, &val))
return -EINVAL;
mutex_lock(&akm->val_mutex);
akm->delay[pos] = val;
mutex_unlock(&akm->val_mutex);
return count;
}
/***** Accelerometer ***/
static ssize_t akm8962_delay_acc_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
return akm8962_sysfs_delay_show(
dev_get_drvdata(dev), buf, ACC_DATA_FLAG);
}
static ssize_t akm8962_delay_acc_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
return akm8962_sysfs_delay_store(
dev_get_drvdata(dev), buf, count, ACC_DATA_FLAG);
}
/***** Magnetic field ***/
static ssize_t akm8962_delay_mag_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
return akm8962_sysfs_delay_show(
dev_get_drvdata(dev), buf, MAG_DATA_FLAG);
}
static ssize_t akm8962_delay_mag_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
return akm8962_sysfs_delay_store(
dev_get_drvdata(dev), buf, count, MAG_DATA_FLAG);
}
/***** Orientation ***/
static ssize_t akm8962_delay_ori_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
return akm8962_sysfs_delay_show(
dev_get_drvdata(dev), buf, ORI_DATA_FLAG);
}
static ssize_t akm8962_delay_ori_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
return akm8962_sysfs_delay_store(
dev_get_drvdata(dev), buf, count, ORI_DATA_FLAG);
}
/***** accel (binary) ***/
static ssize_t akm8962_bin_accel_write(
struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf,
loff_t pos,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct akm8962_data *akm = dev_get_drvdata(dev);
int16_t *accel_data;
if (size == 0)
return 0;
accel_data = (int16_t*)buf;
mutex_lock(&akm->accel_mutex);
akm->accel_data[0] = accel_data[0];
akm->accel_data[1] = accel_data[1];
akm->accel_data[2] = accel_data[2];
mutex_unlock(&akm->accel_mutex);
AKM_DATA(&akm->i2c->dev, "accel:%d,%d,%d\n",
accel_data[0], accel_data[1], accel_data[2]);
return size;
}
#ifdef AKM8962_DEBUG_IF
static ssize_t akm8962_mode_store(
struct device *dev, struct device_attribute *attr,
char const *buf, size_t count)
{
struct akm8962_data *akm = dev_get_drvdata(dev);
int mode = 0;
if (NULL == buf)
return -EINVAL;
if (0 == count)
return 0;
if (false == get_value_as_int(buf, count, &mode))
return -EINVAL;
if (AKECS_SetMode(akm, mode) < 0)
return -EINVAL;
return 1;
}
static ssize_t akm8962_bdata_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
struct akm8962_data *akm = dev_get_drvdata(dev);
char rbuf[SENSOR_DATA_SIZE];
mutex_lock(&akm->sensor_mutex);
memcpy(&rbuf, akm->sense_data, sizeof(rbuf));
mutex_unlock(&akm->sensor_mutex);
return sprintf(buf,
"0x%02X,0x%02X,0x%02X,0x%02X,"
"0x%02X,0x%02X,0x%02X,0x%02X\n",
rbuf[0],rbuf[1],rbuf[2],rbuf[3],
rbuf[4],rbuf[5],rbuf[6],rbuf[7]);
}
static ssize_t akm8962_asa_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
struct akm8962_data *akm = dev_get_drvdata(dev);
int err;
unsigned char asa[3];
err = AKECS_SetMode(akm, AK8962_MODE_FUSE_ACCESS);
if (err < 0)
return err;
asa[0] = AK8962_FUSE_ASAX;
err = akm8962_i2c_rxdata(akm->i2c, asa, 3);
if (err < 0)
return err;
err = AKECS_SetMode(akm, AK8962_MODE_POWERDOWN);
if (err < 0)
return err;
return sprintf(buf, "0x%02X,0x%02X,0x%02X\n",
asa[0], asa[1], asa[2]);
}
#endif
static struct device_attribute akm8962_attributes[] = {
__ATTR(enable_acc, 0660, akm8962_enable_acc_show, akm8962_enable_acc_store),
__ATTR(enable_mag, 0660, akm8962_enable_mag_show, akm8962_enable_mag_store),
__ATTR(enable_ori, 0660, akm8962_enable_ori_show, akm8962_enable_ori_store),
__ATTR(delay_acc, 0660, akm8962_delay_acc_show, akm8962_delay_acc_store),
__ATTR(delay_mag, 0660, akm8962_delay_mag_show, akm8962_delay_mag_store),
__ATTR(delay_ori, 0660, akm8962_delay_ori_show, akm8962_delay_ori_store),
#ifdef AKM8962_DEBUG_IF
__ATTR(mode, 0220, NULL, akm8962_mode_store),
__ATTR(bdata, 0440, akm8962_bdata_show, NULL),
__ATTR(asa, 0440, akm8962_asa_show, NULL),
#endif
__ATTR_NULL,
};
#define __BIN_ATTR(name_, mode_, size_, private_, read_, write_) \
{ \
.attr = { .name = __stringify(name_), .mode = mode_ }, \
.size = size_, \
.private = private_, \
.read = read_, \
.write = write_, \
}
#define __BIN_ATTR_NULL \
{ \
.attr = { .name = NULL }, \
}
static struct bin_attribute akm8962_bin_attributes[] = {
__BIN_ATTR(accel, 0220, 6, NULL,
NULL, akm8962_bin_accel_write),
__BIN_ATTR_NULL
};
static char const *const compass_class_name = "compass";
static char const *const akm8962_device_name = "akm8962";
static char const *const device_link_name = "i2c";
static dev_t const akm8962_device_dev_t = MKDEV(MISC_MAJOR, 240);
static int create_sysfs_interfaces(struct akm8962_data *akm)
{
int err;
if (NULL == akm)
return -EINVAL;
err = 0;
akm->compass = class_create(THIS_MODULE, compass_class_name);
if (IS_ERR(akm->compass)) {
err = PTR_ERR(akm->compass);
goto exit_class_create_failed;
}
akm->class_dev = device_create(
akm->compass,
NULL,
akm8962_device_dev_t,
akm,
akm8962_device_name);
if (IS_ERR(akm->class_dev)) {
err = PTR_ERR(akm->class_dev);
goto exit_class_device_create_failed;
}
err = sysfs_create_link(
&akm->class_dev->kobj,
&akm->i2c->dev.kobj,
device_link_name);
if (0 > err)
goto exit_sysfs_create_link_failed;
err = create_device_attributes(
akm->class_dev,
akm8962_attributes);
if (0 > err)
goto exit_device_attributes_create_failed;
err = create_device_binary_attributes(
&akm->class_dev->kobj,
akm8962_bin_attributes);
if (0 > err)
goto exit_device_binary_attributes_create_failed;
return err;
exit_device_binary_attributes_create_failed:
remove_device_attributes(akm->class_dev, akm8962_attributes);
exit_device_attributes_create_failed:
sysfs_remove_link(&akm->class_dev->kobj, device_link_name);
exit_sysfs_create_link_failed:
device_destroy(akm->compass, akm8962_device_dev_t);
exit_class_device_create_failed:
akm->class_dev = NULL;
class_destroy(akm->compass);
exit_class_create_failed:
akm->compass = NULL;
return err;
}
static void remove_sysfs_interfaces(struct akm8962_data *akm)
{
if (NULL == akm)
return;
if (NULL != akm->class_dev) {
remove_device_binary_attributes(
&akm->class_dev->kobj,
akm8962_bin_attributes);
remove_device_attributes(
akm->class_dev,
akm8962_attributes);
sysfs_remove_link(
&akm->class_dev->kobj,
device_link_name);
akm->class_dev = NULL;
}
if (NULL != akm->compass) {
device_destroy(
akm->compass,
akm8962_device_dev_t);
class_destroy(akm->compass);
akm->compass = NULL;
}
}
/***** akm input device functions ***********************************/
static int akm8962_input_init(
struct input_dev **input)
{
int err = 0;
/* Declare input device */
*input = input_allocate_device();
if (!*input)
return -ENOMEM;
/* Setup input device */
set_bit(EV_ABS, (*input)->evbit);
/* Accelerometer (720 x 16G)*/
input_set_abs_params(*input, ABS_X,
-11520, 11520, 0, 0);
input_set_abs_params(*input, ABS_Y,
-11520, 11520, 0, 0);
input_set_abs_params(*input, ABS_Z,
-11520, 11520, 0, 0);
input_set_abs_params(*input, ABS_THROTTLE,
0, 3, 0, 0);
/* Magnetic field (-8188, 8188)*/
input_set_abs_params(*input, ABS_RX,
-32768, 32767, 0, 0);
input_set_abs_params(*input, ABS_RY,
-32768, 32767, 0, 0);
input_set_abs_params(*input, ABS_RZ,
-32768, 32767, 0, 0);
input_set_abs_params(*input, ABS_RUDDER,
0, 3, 0, 0);
/* Orientation (yaw:0,360 pitch:-180,180 roll:-90,90) */
input_set_abs_params(*input, ABS_HAT0X,
0, 23040, 0, 0);
input_set_abs_params(*input, ABS_HAT0Y,
-11520, 11520, 0, 0);
input_set_abs_params(*input, ABS_HAT1X,
-5760, 5760, 0, 0);
input_set_abs_params(*input, ABS_HAT1Y,
0, 3, 0, 0);
/* Set name */
(*input)->name = "compass";
/* Register */
err = input_register_device(*input);
if (err) {
input_free_device(*input);
return err;
}
return err;
}
/***** akm functions ************************************************/
static irqreturn_t akm8962_irq(int irq, void *handle)
{
struct akm8962_data *akm = handle;
char buffer[SENSOR_DATA_SIZE];
int err;
memset(buffer, 0, sizeof(buffer));
buffer[0] = AK8962_REG_ST1;
err = akm8962_i2c_rxdata(akm->i2c, buffer, SENSOR_DATA_SIZE);
if (err < 0) {
dev_err(&akm->i2c->dev, "%s failed.", __func__);
goto work_func_end;
}
/* Check ST bit */
if ((buffer[0] & 0x01) != 0x01) {
dev_err(&akm->i2c->dev, "%s ST is not set.", __func__);
goto work_func_end;
}
mutex_lock(&akm->sensor_mutex);
memcpy(akm->sense_data, buffer, SENSOR_DATA_SIZE);
mutex_unlock(&akm->sensor_mutex);
atomic_set(&akm->drdy, 1);
atomic_set(&akm->is_busy, 0);
wake_up(&akm->drdy_wq);
work_func_end:
return IRQ_HANDLED;
}
static void akm8962_delayed_work(struct work_struct *work)
{
struct akm8962_data *akm = container_of(
work, struct akm8962_data, work.work);
akm8962_irq(akm->irq, akm);
}
#ifdef CONFIG_PM
static int akm8962_suspend(struct i2c_client *client, pm_message_t mesg)
{
return 0;
}
static int akm8962_resume(struct i2c_client *client)
{
return 0;
}
#endif
int akm8962_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct akm8962_platform_data *pdata;
int err = 0;
int i;
dev_dbg(&client->dev, "start probing.");
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "%s: check_functionality failed.", __func__);
err = -ENODEV;
goto exit0;
}
/* Allocate memory for driver data */
s_akm = kzalloc(sizeof(struct akm8962_data), GFP_KERNEL);
if (!s_akm) {
dev_err(&client->dev, "%s: memory allocation failed.", __func__);
err = -ENOMEM;
goto exit1;
}
/***** Set layout information *****/
pdata = client->dev.platform_data;
if (pdata) {
/* Platform data is available. copy its value to local. */
s_akm->layout = pdata->layout;
} else {
/* Platform data is not available.
Layout information should be set by each application. */
dev_dbg(&client->dev, "%s: No platform data.", __func__);
s_akm->layout = 0;
}
/***** I2C initialization *****/
s_akm->i2c = client;
/* check connection */
err = akm8962_i2c_check_device(client);
if (err < 0)
goto exit2;
/* set client data */
i2c_set_clientdata(client, s_akm);
/***** input *****/
err = akm8962_input_init(&s_akm->input);
if (err) {
dev_err(&client->dev,
"%s: input_dev register failed", __func__);
goto exit3;
}
input_set_drvdata(s_akm->input, s_akm);
/**** initialize variables in akm8962_data *****/
init_waitqueue_head(&s_akm->drdy_wq);
init_waitqueue_head(&s_akm->open_wq);
mutex_init(&s_akm->sensor_mutex);
mutex_init(&s_akm->accel_mutex);
mutex_init(&s_akm->val_mutex);
atomic_set(&s_akm->active, 0);
atomic_set(&s_akm->is_busy, 0);
atomic_set(&s_akm->drdy, 0);
atomic_set(&s_akm->suspend, 0);
s_akm->enable_flag = 0;
for (i=0; i<AKM_NUM_SENSORS; i++)
s_akm->delay[i] = -1;
/***** IRQ setup *****/
s_akm->irq = client->irq;
if (s_akm->irq == 0) {
dev_dbg(&client->dev, "%s: IRQ is not set.", __func__);
/* Use timer to notify measurement end */
INIT_DELAYED_WORK(&s_akm->work, akm8962_delayed_work);
} else {
err = request_threaded_irq(
s_akm->irq,
NULL,
akm8962_irq,
IRQF_TRIGGER_HIGH|IRQF_ONESHOT,
dev_name(&client->dev),
s_akm);
if (err < 0) {
dev_err(&client->dev,
"%s: request irq failed.", __func__);
goto exit4;
}
}
/***** misc *****/
err = misc_register(&akm8962_dev);
if (err) {
dev_err(&client->dev,
"%s: akm8962_dev register failed", __func__);
goto exit5;
}
/***** sysfs *****/
err = create_sysfs_interfaces(s_akm);
if (0 > err) {
dev_err(&client->dev,
"%s: create sysfs failed.", __func__);
goto exit6;
}
dev_dbg(&client->dev, "successfully probed.");
return 0;
exit6:
misc_deregister(&akm8962_dev);
exit5:
if (s_akm->irq)
free_irq(s_akm->irq, s_akm);
exit4:
input_unregister_device(s_akm->input);
exit3:
exit2:
kfree(s_akm);
exit1:
exit0:
return err;
}
static int akm8962_remove(struct i2c_client *client)
{
struct akm8962_data *akm = i2c_get_clientdata(client);
remove_sysfs_interfaces(akm);
if (misc_deregister(&akm8962_dev) < 0)
dev_dbg(&client->dev, "misc deregister failed.");
if (akm->irq)
free_irq(akm->irq, akm);
input_unregister_device(akm->input);
kfree(akm);
dev_dbg(&client->dev, "successfully removed.");
return 0;
}
static const struct i2c_device_id akm8962_id[] = {
{AKM8962_I2C_NAME, 0 },
{ }
};
static struct i2c_driver akm8962_driver = {
.probe = akm8962_probe,
.remove = akm8962_remove,
#ifdef CONFIG_PM
.suspend = akm8962_suspend,
.resume = akm8962_resume,
#endif
.id_table = akm8962_id,
.driver = {
.name = AKM8962_I2C_NAME,
},
};
static int __init akm8962_init(void)
{
printk(KERN_INFO "AKM8962 compass driver: initialize.");
return i2c_add_driver(&akm8962_driver);
}
static void __exit akm8962_exit(void)
{
printk(KERN_INFO "AKM8962 compass driver: release.");
i2c_del_driver(&akm8962_driver);
}
module_init(akm8962_init);
module_exit(akm8962_exit);
MODULE_AUTHOR("viral wang <[email protected]>");
MODULE_DESCRIPTION("AKM8962 compass driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Tilde88/android_kernel_lge_msm8996 | drivers/net/wireless/bcmdhd/src/dhd/sys/dhd_msgbuf.c | 193687 | /**
* @file definition of host message ring functionality
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
* Copyright (C) 1999-2016, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
*
* <<Broadcom-WL-IPTag/Open:>>
*
* $Id: dhd_msgbuf.c 639091 2016-05-20 05:55:57Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_proto.h>
#include <dhd_bus.h>
#include <dhd_dbg.h>
#include <dhd_debug.h>
#include <siutils.h>
#include <dhd_flowring.h>
#include <pcie_core.h>
#include <bcmpcie.h>
#include <dhd_pcie.h>
#if defined(DHD_LB)
#include <linux/cpu.h>
#include <bcm_ring.h>
#define DHD_LB_WORKQ_SZ (8192)
#define DHD_LB_WORKQ_SYNC (16)
#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
#endif /* DHD_LB */
/**
* Host configures a soft doorbell for d2h rings, by specifying a 32bit host
* address where a value must be written. Host may also interrupt coalescing
* on this soft doorbell.
* Use Case: Hosts with network processors, may register with the dongle the
* network processor's thread wakeup register and a value corresponding to the
* core/thread context. Dongle will issue a write transaction <address,value>
* to the PCIE RC which will need to be routed to the mapped register space, by
* the host.
*/
/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
/* Dependency Check */
#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
#define DEFAULT_RX_BUFFERS_TO_POST 256
#define RXBUFPOST_THRESHOLD 32
#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
#define DHD_STOP_QUEUE_THRESHOLD 200
#define DHD_START_QUEUE_THRESHOLD 100
#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
/* flags for ioctl pending status */
#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
#define DMA_ALIGN_LEN 4
#define DMA_D2H_SCRATCH_BUF_LEN 8
#define DMA_XFER_LEN_LIMIT 0x400000
#ifdef BCM_HOST_BUF
#ifndef DMA_HOST_BUFFER_LEN
#define DMA_HOST_BUFFER_LEN 0x200000
#endif
#endif /* BCM_HOST_BUF */
#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
#define DHD_FLOWRING_MAX_EVENTBUF_POST 8
#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
#define DHD_PROT_FUNCS 37
/* Length of buffer in host for bus throughput measurement */
#define DHD_BUS_TPUT_BUF_LEN 2048
#define TXP_FLUSH_NITEMS
/* optimization to write "n" tx items at a time to ring */
#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
#define RING_NAME_MAX_LENGTH 24
struct msgbuf_ring; /* ring context for common and flow rings */
/**
* PCIE D2H DMA Complete Sync Modes
*
* Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
* Host system memory. A WAR using one of 3 approaches is needed:
* 1. Dongle places a modulo-253 seqnum in last word of each D2H message
* 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
* writes in the last word of each work item. Each work item has a seqnum
* number = sequence num % 253.
*
* 3. Read Barrier: Dongle does a host memory read access prior to posting an
* interrupt, ensuring that D2H data transfer indeed completed.
* 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
* ring contents before the indices.
*
* Host does not sync for DMA to complete with option #3 or #4, and a noop sync
* callback (see dhd_prot_d2h_sync_none) may be bound.
*
* Dongle advertizes host side sync mechanism requirements.
*/
#define PCIE_D2H_SYNC
#if defined(PCIE_D2H_SYNC)
#define PCIE_D2H_SYNC_WAIT_TRIES 512
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
*
* On success: return cmn_msg_hdr_t::msg_type
* On failure: return 0 (invalid msg_type)
*/
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
#endif /* PCIE_D2H_SYNC */
/*
* +----------------------------------------------------------------------------
*
* RingIds and FlowId are not equivalent as ringids include D2H rings whereas
* flowids do not.
*
* Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
* the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
*
* Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
* BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
* BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
*
* H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
* H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
*
* D2H Control Complete RingId = 2
* D2H Transmit Complete RingId = 3
* D2H Receive Complete RingId = 4
*
* H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
* H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
* H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
*
* When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
* unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
*
* Example: when a system supports 4 bc/mc and 128 uc flowrings, with
* BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
* FlowId values would be in the range [2..133] and the corresponding
* RingId values would be in the range [5..136].
*
* The flowId allocator, may chose to, allocate Flowids:
* bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
* X# of uc flowids in consecutive ranges (per station Id), where X is the
* packet's access category (e.g. 4 uc flowids per station).
*
* CAUTION:
* When DMA indices array feature is used, RingId=5, corresponding to the 0th
* FLOWRING, will actually use the FlowId as index into the H2D DMA index,
* since the FlowId truly represents the index in the H2D DMA indices array.
*
* Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
* will represent the index in the D2H DMA indices array.
*
* +----------------------------------------------------------------------------
*/
/* First TxPost Flowring Id */
#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
/* Determine whether a ringid belongs to a TxPost flowring */
#define DHD_IS_FLOWRING(ringid) \
((ringid) >= BCMPCIE_COMMON_MSGRINGS)
/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
#define DHD_FLOWID_TO_RINGID(flowid) \
(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
#define DHD_RINGID_TO_FLOWID(ringid) \
(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
* This may be used for the H2D DMA WR index array or H2D DMA RD index array or
* any array of H2D rings.
*/
#define DHD_H2D_RING_OFFSET(ringid) \
((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
* This may be used for the D2H DMA WR index array or D2H DMA RD index array or
* any array of D2H rings.
*/
#define DHD_D2H_RING_OFFSET(ringid) \
((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
/* Convert a D2H DMA Indices Offset to a RingId */
#define DHD_D2H_RINGID(offset) \
((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
#define DHD_DMAH_NULL ((void*)NULL)
/*
* Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
* buffer does not occupy the entire cacheline, and another object is placed
* following the DMA-able buffer, data corruption may occur if the DMA-able
* buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
* is not available.
*/
#if defined(L1_CACHE_BYTES)
#define DHD_DMA_PAD (L1_CACHE_BYTES)
#else
#define DHD_DMA_PAD (128)
#endif
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
dhd_dma_buf_t srcmem;
dhd_dma_buf_t dstmem;
uint32 srcdelay;
uint32 destdelay;
uint32 len;
bool in_progress;
} dhd_dmaxfer_t;
/**
* msgbuf_ring : This object manages the host side ring that includes a DMA-able
* buffer, the WR and RD indices, ring parameters such as max number of items
* an length of each items, and other miscellaneous runtime state.
* A msgbuf_ring may be used to represent a H2D or D2H common ring or a
* H2D TxPost ring as specified in the PCIE FullDongle Spec.
* Ring parameters are conveyed to the dongle, which maintains its own peer end
* ring state. Depending on whether the DMA Indices feature is supported, the
* host will update the WR/RD index in the DMA indices array in host memory or
* directly in dongle memory.
*/
typedef struct msgbuf_ring {
bool inited;
uint16 idx; /* ring id */
uint16 rd; /* read index */
uint16 curr_rd; /* read index for debug */
uint16 wr; /* write index */
uint16 max_items; /* maximum number of items in ring */
uint16 item_len; /* length of each item in the ring */
sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
uint32 seqnum; /* next expected item's sequence number */
#ifdef TXP_FLUSH_NITEMS
void *start_addr;
/* # of messages on ring not yet announced to dongle */
uint16 pend_items_count;
#endif /* TXP_FLUSH_NITEMS */
uchar name[RING_NAME_MAX_LENGTH];
} msgbuf_ring_t;
#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
#define DHD_RING_END_VA(ring) \
((uint8 *)(DHD_RING_BGN_VA((ring))) + \
(((ring)->max_items - 1) * (ring)->item_len))
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
uint16 max_ioctlrespbufpost;
uint16 cur_event_bufs_posted;
uint16 cur_ioctlresp_bufs_posted;
/* Flow control mechanism based on active transmits pending */
uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
uint16 max_tx_count;
uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
uint32 rx_dataoffset;
dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
/* ioctl related resources */
uint8 ioctl_state;
int16 ioctl_status; /* status returned from dongle */
uint16 ioctl_resplen;
dhd_ioctl_recieved_status_t ioctl_received;
uint curr_ioctl_cmd;
dhd_dma_buf_t retbuf; /* For holding ioctl response */
dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
/* DMA-able arrays for holding WR and RD indices */
uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
uint32 flowring_num;
#if defined(PCIE_D2H_SYNC)
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
#endif /* PCIE_D2H_SYNC */
dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
uint16 ioctl_seq_no;
uint16 data_seq_no;
uint16 ioctl_trans_id;
void *pktid_map_handle; /* a pktid maps to a packet and its metadata */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
uint16 tx_metadata_offset;
#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
/* Host's soft doorbell configuration */
bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
#if defined(DHD_LB)
/* Work Queues to be used by the producer and the consumer, and threshold
* when the WRITE index must be synced to consumer's workq
*/
#if defined(DHD_LB_TXC)
uint32 tx_compl_prod_sync ____cacheline_aligned;
bcm_workq_t tx_compl_prod, tx_compl_cons;
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
uint32 rx_compl_prod_sync ____cacheline_aligned;
bcm_workq_t rx_compl_prod, rx_compl_cons;
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
} dhd_prot_t;
/* Convert a dmaaddr_t to a base_addr with htol operations */
static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
/* APIs for managing a DMA-able buffer */
static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
/* msgbuf ring management */
static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
/* Fetch and Release a flowring msgbuf_ring from flowring pool */
static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
uint16 flowid);
/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
/* Producer: Allocate space in a msgbuf ring */
static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint16 nitems, uint16 *alloced, bool exactly_nitems);
static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
uint16 *alloced, bool exactly_nitems);
/* Consumer: Determine the location where the next message may be consumed */
static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint32 *available_len);
/* Producer (WR index update) or Consumer (RD index update) indication */
static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
void *p, uint16 len);
static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
dhd_dma_buf_t *dma_buf, uint32 bufsz);
/* Set/Get a RD or WR index in the array of indices */
/* See also: dhd_prot_dma_indx_init() */
static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
uint16 ringid);
static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
/* Locate a packet given a pktid */
static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
bool free_pktid);
/* Locate a packet given a PktId and free it. */
static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
void *buf, uint len, uint8 action);
static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
void *buf, uint len, uint8 action);
static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
void *buf, int ifidx);
/* Post buffers for Rx, control ioctl response and events */
static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
/* D2H Message handling */
static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
/* D2H Message handlers */
static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
/* Loopback test with dongle */
static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
uint destdelay, dhd_dmaxfer_t *dma);
static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
/* Flowring management communication with dongle */
static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
/* Configure a soft doorbell per D2H ring */
static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
static int dhd_prot_debug_info_print(dhd_pub_t *dhd);
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
/** callback functions for messages generated by the dongle */
#define MSG_TYPE_INVALID 0
static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
NULL,
dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
NULL,
dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
NULL,
dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
NULL,
dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
NULL,
dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
NULL,
dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
NULL,
dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
NULL,
dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
NULL,
dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
NULL, /* MSG_TYPE_FLOW_RING_RESUME */
NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
NULL, /* MSG_TYPE_INFO_BUF_POST */
NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
NULL, /* MSG_TYPE_H2D_RING_CREATE */
NULL, /* MSG_TYPE_D2H_RING_CREATE */
NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
NULL, /* MSG_TYPE_H2D_RING_CONFIG */
NULL, /* MSG_TYPE_D2H_RING_CONFIG */
NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
};
#ifdef DHD_RX_CHAINING
#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
(!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \
dhd_l2_filter_chainable((dhd), (evh), (ifidx)))
static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
#endif /* DHD_RX_CHAINING */
static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
/**
* D2H DMA to completion callback handlers. Based on the mode advertised by the
* dongle through the PCIE shared region, the appropriate callback will be
* registered in the proto layer to be invoked prior to precessing any message
* from a D2H DMA ring. If the dongle uses a read barrier or another mode that
* does not require host participation, then a noop callback handler will be
* bound that simply returns the msg_type.
*/
static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint32 tries, uchar *msg, int msglen);
static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
/**
* dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
* not completed, a livelock condition occurs. Host will avert this livelock by
* dropping this message and moving to the next. This dropped message can lead
* to a packet leak, or even something disastrous in the case the dropped
* message happens to be a control response.
* Here we will log this condition. One may choose to reboot the dongle.
*
*/
static void
dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
uchar *msg, int msglen)
{
uint32 seqnum = ring->seqnum;
DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
"dma_buf va<%p> msg<%p> curr_rd<%d>\n",
dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
ring->dma_buf.va, msg, ring->curr_rd));
prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
dhd->bus->no_cfg_restore = TRUE;
#endif /* CONFIG_ARCH_MSM */
dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
dhd_os_send_hang_message(dhd);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
/**
* dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
* mode. Sequence number is always in the last word of a message.
*/
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
uint32 tries;
uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
dhd_prot_t *prot = dhd->prot;
ASSERT(msglen == ring->item_len);
for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
uint32 msg_seqnum = *marker;
if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
ring->seqnum++; /* next expected sequence number */
goto dma_completed;
}
if (tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = tries;
OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
OSL_DELAY(50); /* For ARM there is no pause in cpu_relax, so add extra delay */
#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
} /* for PCIE_D2H_SYNC_WAIT_TRIES */
dhd_prot_d2h_sync_livelock(dhd, ring, tries, (uchar *)msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
dma_completed:
prot->d2h_sync_wait_tot += tries;
return msg->msg_type;
}
/**
* dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
* mode. The xorcsum is placed in the last word of a message. Dongle will also
* place a seqnum in the epoch field of the cmn_msg_hdr.
*/
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
uint32 tries;
uint32 prot_checksum = 0; /* computed checksum */
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
dhd_prot_t *prot = dhd->prot;
ASSERT(msglen == ring->item_len);
for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
if (prot_checksum == 0U) { /* checksum is OK */
if (msg->epoch == ring_seqnum) {
ring->seqnum++; /* next expected sequence number */
goto dma_completed;
}
}
if (tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = tries;
OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
OSL_DELAY(50); /* For ARM there is no pause in cpu_relax, so add extra delay */
#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
} /* for PCIE_D2H_SYNC_WAIT_TRIES */
dhd_prot_d2h_sync_livelock(dhd, ring, tries, (uchar *)msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
dma_completed:
prot->d2h_sync_wait_tot += tries;
return msg->msg_type;
}
/**
* dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
* need to try to sync. This noop sync handler will be bound when the dongle
* advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
*/
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
return msg->msg_type;
}
/**
* dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
* dongle advertizes.
*/
static void
dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
prot->d2h_sync_wait_max = 0UL;
prot->d2h_sync_wait_tot = 0UL;
prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
}
}
#endif /* PCIE_D2H_SYNC */
int INLINE
dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
{
/* To synchronize with the previous memory operations call wmb() */
OSL_SMP_WMB();
dhd->prot->ioctl_received = reason;
/* Call another wmb() to make sure before waking up the other event value gets updated */
OSL_SMP_WMB();
dhd_os_ioctl_resp_wake(dhd);
return 0;
}
/**
* dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
*/
static void
dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
}
/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
/*
* +---------------------------------------------------------------------------+
* PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
* virtual and physical address, the buffer lenght and the DMA handler.
* A secdma handler is also included in the dhd_dma_buf object.
* +---------------------------------------------------------------------------+
*/
static INLINE void
dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
{
base_addr->low_addr = htol32(PHYSADDRLO(pa));
base_addr->high_addr = htol32(PHYSADDRHI(pa));
}
/**
* dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
*/
static int
dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
uint32 base, end; /* dongle uses 32bit ptr arithmetic */
ASSERT(dma_buf);
base = PHYSADDRLO(dma_buf->pa);
ASSERT(base);
ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
ASSERT(dma_buf->len != 0);
/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
end = (base + dma_buf->len); /* end address */
if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
__FUNCTION__, base, dma_buf->len));
return BCME_ERROR;
}
return BCME_OK;
}
/**
* dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
static int
dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
{
uint32 dma_pad = 0;
osl_t *osh = dhd->osh;
ASSERT(dma_buf != NULL);
ASSERT(dma_buf->va == NULL);
ASSERT(dma_buf->len == 0);
/* Pad the buffer length by one extra cacheline size.
* Required for D2H direction.
*/
dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
if (dma_buf->va == NULL) {
DHD_ERROR(("%s: buf_len %d, no memory available\n",
__FUNCTION__, buf_len));
return BCME_NOMEM;
}
dma_buf->len = buf_len; /* not including padded len */
if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
dhd_dma_buf_free(dhd, dma_buf);
return BCME_ERROR;
}
dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
return BCME_OK;
}
/**
* dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
*/
static void
dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
return;
}
(void)dhd_dma_buf_audit(dhd, dma_buf);
/* Zero out the entire buffer and cache flush */
memset((void*)dma_buf->va, 0, dma_buf->len);
OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
}
/**
* dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
* dhd_dma_buf_alloc().
*/
static void
dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
osl_t *osh = dhd->osh;
ASSERT(dma_buf);
if (dma_buf->va == NULL) {
return; /* Allow for free invocation, when alloc failed */
}
/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
(void)dhd_dma_buf_audit(dhd, dma_buf);
/* dma buffer may have been padded at allocation */
DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
dma_buf->pa, dma_buf->dmah);
memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
}
/**
* dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
* Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
*/
void
dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
{
dhd_dma_buf_t *dma_buf;
ASSERT(dhd_dma_buf);
dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
dma_buf->va = va;
dma_buf->len = len;
dma_buf->pa = pa;
dma_buf->dmah = dmah;
dma_buf->secdma = secdma;
/* Audit user defined configuration */
(void)dhd_dma_buf_audit(dhd, dma_buf);
}
/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
/*
* +---------------------------------------------------------------------------+
* PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
* Main purpose is to save memory on the dongle, has other purposes as well.
* The packet id map, also includes storage for some packet parameters that
* may be saved. A native packet pointer along with the parameters may be saved
* and a unique 32bit pkt id will be returned. Later, the saved packet pointer
* and the metadata may be retrieved using the previously allocated packet id.
* +---------------------------------------------------------------------------+
*/
#define DHD_PCIE_PKTID
#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */
/* On Router, the pktptr serves as a pktid. */
#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
#endif
/* Enum for marking the buffer color based on usage */
typedef enum dhd_pkttype {
PKTTYPE_DATA_TX = 0,
PKTTYPE_DATA_RX,
PKTTYPE_IOCTL_RX,
PKTTYPE_EVENT_RX,
/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
PKTTYPE_NO_CHECK
} dhd_pkttype_t;
#define DHD_PKTID_INVALID (0U)
#define DHD_IOCTL_REQ_PKTID (0xFFFE)
#define DHD_FAKE_PKTID (0xFACE)
#define DHD_PKTID_FREE_LOCKER (FALSE)
#define DHD_PKTID_RSV_LOCKER (TRUE)
typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
/* Construct a packet id mapping table, returning an opaque map handle */
static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
/* Destroy a packet id mapping table, freeing all packets active in the table */
static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
#define PKTID_MAP_HANDLE (0)
#define PKTID_MAP_HANDLE_IOCTL (1)
#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
#if defined(DHD_PCIE_PKTID)
/* Determine number of pktids that are available */
static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
/* Allocate a unique pktid against which a pkt and some metadata is saved */
static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
void *pkt);
static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
/*
* DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
*
* DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
* DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
*
* CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
* either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
*/
#ifndef DHD_PKTID_AUDIT_ENABLED
#define DHD_PKTID_AUDIT_ENABLED 1
#endif /* DHD_PKTID_AUDIT_ENABLED */
#if defined(DHD_PKTID_AUDIT_ENABLED)
#define USE_DHD_PKTID_AUDIT_LOCK 1
/* Audit the pktidmap allocator */
/* #define DHD_PKTID_AUDIT_MAP */
/* Audit the pktid during production/consumption of workitems */
#define DHD_PKTID_AUDIT_RING
#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
#error "May only enabled audit of MAP or RING, at a time."
#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
#define DHD_DUPLICATE_ALLOC 1
#define DHD_DUPLICATE_FREE 2
#define DHD_TEST_IS_ALLOC 3
#define DHD_TEST_IS_FREE 4
#ifdef USE_DHD_PKTID_AUDIT_LOCK
#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
#else
#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
#define DHD_PKTID_AUDIT_LOCK(lock) 0
#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
#endif /* !USE_DHD_PKTID_AUDIT_LOCK */
#endif /* DHD_PKTID_AUDIT_ENABLED */
/* #define USE_DHD_PKTID_LOCK 1 */
#ifdef USE_DHD_PKTID_LOCK
#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
#else
#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
#define DHD_PKTID_LOCK_DEINIT(osh, lock) \
do { \
BCM_REFERENCE(osh); \
BCM_REFERENCE(lock); \
} while (0)
#define DHD_PKTID_LOCK(lock) 0
#define DHD_PKTID_UNLOCK(lock, flags) \
do { \
BCM_REFERENCE(lock); \
BCM_REFERENCE(flags); \
} while (0)
#endif /* !USE_DHD_PKTID_LOCK */
/* Packet metadata saved in packet id mapper */
/* The Locker can be 3 states
* LOCKER_IS_FREE - Locker is free and can be allocated
* LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
* locker (buffer address, len, phy addr etc) are populated
* with valid values
* LOCKER_IS_RSVD - The locker is reserved for future use, but the values
* in the locker are not valid. Especially pkt should be
* NULL in this state. When the user wants to re-use the
* locker dhd_pktid_map_free can be called with a flag
* to reserve the pktid for future use, which will clear
* the contents of the locker. When the user calls
* dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
*/
typedef enum dhd_locker_state {
LOCKER_IS_FREE,
LOCKER_IS_BUSY,
LOCKER_IS_RSVD
} dhd_locker_state_t;
typedef struct dhd_pktid_item {
dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
uint16 len; /* length of mapped packet's buffer */
void *pkt; /* opaque native pointer to a packet */
dmaaddr_t pa; /* physical address of mapped packet's buffer */
void *dmah; /* handle to OS specific DMA map */
void *secdma;
} dhd_pktid_item_t;
typedef struct dhd_pktid_map {
uint32 items; /* total items in map */
uint32 avail; /* total available items */
int failures; /* lockers unavailable count */
/* Spinlock to protect dhd_pktid_map in process/tasklet context */
void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
#if defined(DHD_PKTID_AUDIT_ENABLED)
void *pktid_audit_lock;
struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
#endif /* DHD_PKTID_AUDIT_ENABLED */
uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
dhd_pktid_item_t lockers[0]; /* metadata storage */
} dhd_pktid_map_t;
/*
* PktId (Locker) #0 is never allocated and is considered invalid.
*
* On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
* depleted pktid pool and must not be used by the caller.
*
* Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
*/
#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
#define DHD_PKIDMAP_ITEMS(items) (items)
#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
(DHD_PKTID_ITEM_SZ * ((items) + 1)))
#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map))
/* Convert a packet to a pktid, and save pkt pointer in busy locker */
#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt))
/* Reuse a previously reserved locker to save packet params */
#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
(uint8)(dir), (void *)(dmah), (void *)(secdma), \
(dhd_pkttype_t)(pkttype))
/* Convert a packet to a pktid, and save packet params in locker */
#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
(uint8)(dir), (void *)(dmah), (void *)(secdma), \
(dhd_pkttype_t)(pkttype))
/* Convert pktid to a packet, and free the locker */
#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
(void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
/* Convert the pktid to a packet, empty locker, but keep it reserved */
#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
(void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
#if defined(DHD_PKTID_AUDIT_ENABLED)
static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
const int test_for, const char *errmsg);
/* Call back into OS layer to take the dongle dump and panic */
#ifdef DHD_DEBUG_PAGEALLOC
extern void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp);
#endif /* DHD_DEBUG_PAGEALLOC */
/**
* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
*/
static int
dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
const int test_for, const char *errmsg)
{
#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
struct bcm_mwbmap *handle;
uint32 flags;
bool ignore_audit;
if (pktid_map == (dhd_pktid_map_t *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
return BCME_OK;
}
flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
handle = pktid_map->pktid_audit;
if (handle == (struct bcm_mwbmap *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
return BCME_OK;
}
/* Exclude special pktids from audit */
ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
if (ignore_audit) {
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
return BCME_OK;
}
if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
/* lock is released in "error" */
goto error;
}
/* Perform audit */
switch (test_for) {
case DHD_DUPLICATE_ALLOC:
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
errmsg, pktid));
goto error;
}
bcm_mwbmap_force(handle, pktid);
break;
case DHD_DUPLICATE_FREE:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
errmsg, pktid));
goto error;
}
bcm_mwbmap_free(handle, pktid);
break;
case DHD_TEST_IS_ALLOC:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
errmsg, pktid));
goto error;
}
break;
case DHD_TEST_IS_FREE:
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
errmsg, pktid));
goto error;
}
break;
default:
goto error;
}
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
return BCME_OK;
error:
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
/* May insert any trap mechanism here ! */
#ifdef DHD_DEBUG_PAGEALLOC
dhd_pktid_audit_fail_cb(dhd);
#else
ASSERT(0);
#endif /* DHD_DEBUG_PAGEALLOC */
return BCME_ERROR;
}
#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
#endif /* DHD_PKTID_AUDIT_ENABLED */
/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */
/**
* +---------------------------------------------------------------------------+
* Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
*
* dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
*
* dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
* packet id is returned. This unique packet id may be used to retrieve the
* previously saved packet metadata, using dhd_pktid_map_free(). On invocation
* of dhd_pktid_map_free(), the unique packet id is essentially freed. A
* subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
*
* Implementation Note:
* Convert this into a <key,locker> abstraction and place into bcmutils !
* Locker abstraction should treat contents as opaque storage, and a
* callback should be registered to handle busy lockers on destructor.
*
* +---------------------------------------------------------------------------+
*/
/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
{
void *osh;
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
uint32 map_items;
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_PKTIDMAP)
uint32 section;
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_PKTIDMAP */
osh = dhd->osh;
ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_PKTIDMAP)
if (index == PKTID_MAP_HANDLE) {
section = DHD_PREALLOC_PKTID_MAP;
} else {
section = DHD_PREALLOC_PKTID_MAP_IOCTL;
}
map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
#else
map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_PKTIDMAP */
if (map == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
__FUNCTION__, __LINE__, dhd_pktid_map_sz));
goto error;
}
bzero(map, dhd_pktid_map_sz);
/* Initialize the lock that protects this structure */
map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
if (map->pktid_lock == NULL) {
DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
goto error;
}
map->items = num_items;
map->avail = num_items;
map_items = DHD_PKIDMAP_ITEMS(map->items);
#if defined(DHD_PKTID_AUDIT_ENABLED)
/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
goto error;
} else {
DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
__FUNCTION__, __LINE__, map_items + 1));
}
map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
#endif /* DHD_PKTID_AUDIT_ENABLED */
for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
map->keys[nkey] = nkey; /* populate with unique keys */
map->lockers[nkey].state = LOCKER_IS_FREE;
map->lockers[nkey].pkt = NULL; /* bzero: redundant */
map->lockers[nkey].len = 0;
}
/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
map->lockers[DHD_PKTID_INVALID].len = 0;
#if defined(DHD_PKTID_AUDIT_ENABLED)
/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
#endif /* DHD_PKTID_AUDIT_ENABLED */
return (dhd_pktid_map_handle_t *)map; /* opaque handle */
error:
if (map) {
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock)
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
if (map->pktid_lock)
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_PKTIDMAP)
DHD_OS_PREFREE(dhd, map, dhd_pktid_map_sz);
#else
MFREE(osh, map, dhd_pktid_map_sz);
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_PKTIDMAP */
}
return (dhd_pktid_map_handle_t *)NULL;
}
/**
* Retrieve all allocated keys and free all <numbered_key, locker>.
* Freeing implies: unmapping the buffers and freeing the native packet
* This could have been a callback registered with the pktid mapper.
*/
static void
dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
void *osh;
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
uint32 map_items;
uint32 flags;
if (handle == NULL) {
return;
}
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
osh = dhd->osh;
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
nkey = 1; /* skip reserved KEY #0, and start from 1 */
locker = &map->lockers[nkey];
map_items = DHD_PKIDMAP_ITEMS(map->items);
for (; nkey <= map_items; nkey++, locker++) {
if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
locker->state = LOCKER_IS_FREE; /* force open the locker */
#if defined(DHD_PKTID_AUDIT_ENABLED)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_ENABLED */
{ /* This could be a callback registered with dhd_pktid_map */
DMA_UNMAP(osh, locker->pa, locker->len,
locker->dir, 0, DHD_DMAH_NULL);
dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
locker->pkttype, TRUE);
}
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
else {
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
locker->pkt = NULL; /* clear saved pkt */
locker->len = 0;
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock) {
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_PKTIDMAP)
DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
#else
MFREE(osh, handle, dhd_pktid_map_sz);
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_PKTIDMAP */
}
#ifdef IOCTLRESP_USE_CONSTMEM
/** Called in detach scenario. Releasing IOCTL buffers. */
static void
dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
uint32 map_items;
uint32 flags;
osl_t *osh = dhd->osh;
if (handle == NULL) {
return;
}
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
nkey = 1; /* skip reserved KEY #0, and start from 1 */
locker = &map->lockers[nkey];
map_items = DHD_PKIDMAP_ITEMS(map->items);
for (; nkey <= map_items; nkey++, locker++) {
if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
locker->state = LOCKER_IS_FREE; /* force open the locker */
#if defined(DHD_PKTID_AUDIT_ENABLED)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_ENABLED */
{
dhd_dma_buf_t retbuf;
retbuf.va = locker->pkt;
retbuf.len = locker->len;
retbuf.pa = locker->pa;
retbuf.dmah = locker->dmah;
retbuf.secdma = locker->secdma;
/* This could be a callback registered with dhd_pktid_map */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
free_ioctl_return_buffer(dhd, &retbuf);
flags = DHD_PKTID_LOCK(map->pktid_lock);
}
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
else {
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
locker->pkt = NULL; /* clear saved pkt */
locker->len = 0;
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock) {
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_PKTIDMAP)
DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
#else
MFREE(osh, handle, dhd_pktid_map_sz);
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_PKTIDMAP */
}
#endif /* IOCTLRESP_USE_CONSTMEM */
/** Get the pktid free count */
static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
{
dhd_pktid_map_t *map;
uint32 flags;
uint32 avail;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
avail = map->avail;
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return avail;
}
/**
* Allocate locker, save pkt contents, and return the locker's numbered key.
* dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
* Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
* implying a depleted pool of pktids.
*/
static INLINE uint32
__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
{
uint32 nkey;
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
if (map->avail <= 0) { /* no more pktids to allocate */
map->failures++;
DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
return DHD_PKTID_INVALID; /* failed alloc request */
}
ASSERT(map->avail <= map->items);
nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
locker = &map->lockers[nkey]; /* save packet metadata in locker */
map->avail--;
locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
locker->len = 0;
locker->state = LOCKER_IS_BUSY; /* reserve this locker */
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
#endif /* DHD_PKTID_AUDIT_MAP */
ASSERT(nkey != DHD_PKTID_INVALID);
return nkey; /* return locker's numbered key */
}
/**
* dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
* yet populated. Invoke the pktid save api to populate the packet parameters
* into the locker.
* Wrapper that takes the required lock when called directly.
*/
static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
{
dhd_pktid_map_t *map;
uint32 flags;
uint32 ret;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return ret;
}
static INLINE void
__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
locker = &map->lockers[nkey];
ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
#endif /* DHD_PKTID_AUDIT_MAP */
/* store contents in locker */
locker->dir = dir;
locker->pa = pa;
locker->len = (uint16)len; /* 16bit len */
locker->dmah = dmah; /* 16bit len */
locker->secdma = secdma;
locker->pkttype = pkttype;
locker->pkt = pkt;
locker->state = LOCKER_IS_BUSY; /* make this locker busy */
}
/**
* dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
* to a previously reserved unique numbered key.
* Wrapper that takes the required lock when called directly.
*/
static INLINE void
dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
dhd_pktid_map_t *map;
uint32 flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
__dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
dir, dmah, secdma, pkttype);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
}
/**
* dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
* contents into the corresponding locker. Return the numbered key.
*/
static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
uint32 nkey;
uint32 flags;
dhd_pktid_map_t *map;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
if (nkey != DHD_PKTID_INVALID) {
__dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
len, dir, dmah, secdma, pkttype);
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
#endif /* DHD_PKTID_AUDIT_MAP */
}
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return nkey;
}
/**
* dhd_pktid_map_free - Given a numbered key, return the locker contents.
* dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
* Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
* value. Only a previously allocated pktid may be freed.
*/
static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
dhd_pkttype_t pkttype, bool rsv_locker)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
void * pkt;
uint32 flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
locker = &map->lockers[nkey];
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
#endif /* DHD_PKTID_AUDIT_MAP */
if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
__FUNCTION__, __LINE__, nkey));
ASSERT(locker->state != LOCKER_IS_FREE);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return NULL;
}
/* Check for the colour of the buffer i.e The buffer posted for TX,
* should be freed for TX completion. Similarly the buffer posted for
* IOCTL should be freed for IOCT completion etc.
*/
if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
__FUNCTION__, __LINE__, nkey));
ASSERT(locker->pkttype == pkttype);
return NULL;
}
if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
map->avail++;
map->keys[map->avail] = nkey; /* make this numbered key available */
locker->state = LOCKER_IS_FREE; /* open and free Locker */
} else {
/* pktid will be reused, but the locker does not have a valid pkt */
locker->state = LOCKER_IS_RSVD;
}
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
#endif /* DHD_PKTID_AUDIT_MAP */
*pa = locker->pa; /* return contents of locker */
*len = (uint32)locker->len;
*dmah = locker->dmah;
*secdma = locker->secdma;
pkt = locker->pkt;
locker->pkt = NULL; /* Clear pkt */
locker->len = 0;
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return pkt;
}
#else /* ! DHD_PCIE_PKTID */
typedef struct pktlist {
PKT_LIST *tx_pkt_list; /* list for tx packets */
PKT_LIST *rx_pkt_list; /* list for rx packets */
PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
} pktlists_t;
/*
* Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
* of a one to one mapping 32bit pktptr and a 32bit pktid.
*
* - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
* - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
* a lock.
* - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
*/
#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
dhd_pkttype_t pkttype);
static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
dhd_pkttype_t pkttype);
static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
{
osl_t *osh = dhd->osh;
pktlists_t *handle = NULL;
if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(pktlists_t)));
goto error_done;
}
if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(PKT_LIST)));
goto error;
}
if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(PKT_LIST)));
goto error;
}
if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(PKT_LIST)));
goto error;
}
PKTLIST_INIT(handle->tx_pkt_list);
PKTLIST_INIT(handle->rx_pkt_list);
PKTLIST_INIT(handle->ctrl_pkt_list);
return (dhd_pktid_map_handle_t *) handle;
error:
if (handle->ctrl_pkt_list) {
MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
}
if (handle->rx_pkt_list) {
MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
}
if (handle->tx_pkt_list) {
MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
}
if (handle) {
MFREE(osh, handle, sizeof(pktlists_t));
}
error_done:
return (dhd_pktid_map_handle_t *)NULL;
}
static void
dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
{
osl_t *osh = dhd->osh;
pktlists_t *handle = (pktlists_t *) map;
ASSERT(handle != NULL);
if (handle == (pktlists_t *)NULL) {
return;
}
if (handle->ctrl_pkt_list) {
PKTLIST_FINI(handle->ctrl_pkt_list);
MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
}
if (handle->rx_pkt_list) {
PKTLIST_FINI(handle->rx_pkt_list);
MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
}
if (handle->tx_pkt_list) {
PKTLIST_FINI(handle->tx_pkt_list);
MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
}
if (handle) {
MFREE(osh, handle, sizeof(pktlists_t));
}
}
/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
pktlists_t *handle = (pktlists_t *) map;
ASSERT(pktptr32 != NULL);
DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
DHD_PKT_SET_DMAH(pktptr32, dmah);
DHD_PKT_SET_PA(pktptr32, pa);
DHD_PKT_SET_SECDMA(pktptr32, secdma);
if (pkttype == PKTTYPE_DATA_TX) {
PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
} else if (pkttype == PKTTYPE_DATA_RX) {
PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
} else {
PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
}
return DHD_PKTID32(pktptr32);
}
/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
dhd_pkttype_t pkttype)
{
pktlists_t *handle = (pktlists_t *) map;
void *pktptr32;
ASSERT(pktid32 != 0U);
pktptr32 = DHD_PKTPTR32(pktid32);
*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
*dmah = DHD_PKT_GET_DMAH(pktptr32);
*pa = DHD_PKT_GET_PA(pktptr32);
*secdma = DHD_PKT_GET_SECDMA(pktptr32);
if (pkttype == PKTTYPE_DATA_TX) {
PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
} else if (pkttype == PKTTYPE_DATA_RX) {
PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
} else {
PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
}
return pktptr32;
}
#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt)
#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
(dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
})
#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
(dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
})
#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
})
#define DHD_PKTID_AVAIL(map) (~0)
#endif /* ! DHD_PCIE_PKTID */
/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
/**
* The PCIE FD protocol layer is constructed in two phases:
* Phase 1. dhd_prot_attach()
* Phase 2. dhd_prot_init()
*
* dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
* All Common rings are allose attached (msgbuf_ring_t objects are allocated
* with DMA-able buffers).
* All dhd_dma_buf_t objects are also allocated here.
*
* As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
* initialization of objects that requires information advertized by the dongle
* may not be performed here.
* E.g. the number of TxPost flowrings is not know at this point, neither do
* we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
* whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
* rings (common + flow).
*
* dhd_prot_init() is invoked after the bus layer has fetched the information
* advertized by the dongle in the pcie_shared_t.
*/
int
dhd_prot_attach(dhd_pub_t *dhd)
{
osl_t *osh = dhd->osh;
dhd_prot_t *prot;
/* Allocate prot structure */
if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
sizeof(dhd_prot_t)))) {
DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
goto fail;
}
memset(prot, 0, sizeof(*prot));
prot->osh = osh;
dhd->prot = prot;
/* DMAing ring completes supported? FALSE by default */
dhd->dma_d2h_ring_upd_support = FALSE;
dhd->dma_h2d_ring_upd_support = FALSE;
/* Common Ring Allocations */
/* Ring 0: H2D Control Submission */
if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 1: H2D Receive Buffer Post */
if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 2: D2H Control Completion */
if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 3: D2H Transmit Complete */
if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 4: D2H Receive Complete */
if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
__FUNCTION__));
goto fail;
}
/*
* Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
* buffers for flowrings will be instantiated, in dhd_prot_init() .
* See dhd_prot_flowrings_pool_attach()
*/
/* ioctl response buffer */
if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
goto fail;
}
/* IOCTL request buffer */
if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
goto fail;
}
/* Scratch buffer for dma rx offset */
#ifdef BCM_HOST_BUF
if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) {
#else
if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
#endif /* BCM_HOST_BUF */
goto fail;
}
/* scratch buffer bus throughput measurement */
if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
goto fail;
}
#ifdef DHD_RX_CHAINING
dhd_rxchain_reset(&prot->rxchain);
#endif
#if defined(DHD_LB)
/* Initialize the work queues to be used by the Load Balancing logic */
#if defined(DHD_LB_TXC)
{
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->tx_compl_prod_sync = 0;
DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
}
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
{
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->rx_compl_prod_sync = 0;
DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
}
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
return BCME_OK;
fail:
#ifndef CONFIG_DHD_USE_STATIC_BUF
if (prot != NULL) {
dhd_prot_detach(dhd);
}
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return BCME_NOMEM;
} /* dhd_prot_attach */
/**
* dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
* completed it's initialization of the pcie_shared structure, we may now fetch
* the dongle advertized features and adjust the protocol layer accordingly.
*
* dhd_prot_init() may be invoked again after a dhd_prot_reset().
*/
int
dhd_prot_init(dhd_pub_t *dhd)
{
sh_addr_t base_addr;
dhd_prot_t *prot = dhd->prot;
/* PKTID handle INIT */
if (prot->pktid_map_handle != NULL) {
DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
ASSERT(0);
return BCME_ERROR;
}
#ifdef IOCTLRESP_USE_CONSTMEM
if (prot->pktid_map_handle_ioctl != NULL) {
DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
ASSERT(0);
return BCME_ERROR;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
if (prot->pktid_map_handle == NULL) {
DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
ASSERT(0);
return BCME_NOMEM;
}
#ifdef IOCTLRESP_USE_CONSTMEM
prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
if (prot->pktid_map_handle_ioctl == NULL) {
DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
ASSERT(0);
return BCME_NOMEM;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
/* Max pkts in ring */
prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
/* Read max rx packets supported by dongle */
dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
if (prot->max_rxbufpost == 0) {
/* This would happen if the dongle firmware is not */
/* using the latest shared structure template */
prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
}
DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
/* Initialize. bzero() would blow away the dma pointers. */
prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
prot->cur_ioctlresp_bufs_posted = 0;
prot->active_tx_count = 0;
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->rxbufpost = 0;
prot->cur_event_bufs_posted = 0;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
prot->dmaxfer.srcmem.va = NULL;
prot->dmaxfer.dstmem.va = NULL;
prot->dmaxfer.in_progress = FALSE;
prot->metadata_dbg = FALSE;
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
prot->ioctl_trans_id = 0;
prot->ioctl_state = 0;
prot->ioctl_status = 0;
prot->ioctl_resplen = 0;
prot->ioctl_received = 0;
/* Register the interrupt function upfront */
/* remove corerev checks in data path */
prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
/* Initialize Common MsgBuf Rings */
dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
#if defined(PCIE_D2H_SYNC)
dhd_prot_d2h_sync_init(dhd);
#endif /* PCIE_D2H_SYNC */
dhd_prot_h2d_sync_init(dhd);
/* init the scratch buffer */
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_SCRATCH_BUF, 0);
dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
/* If supported by the host, indicate the memory block
* for completion writes / submission reads to shared space
*/
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_WR_BUF, 0);
dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
H2D_DMA_INDX_RD_BUF, 0);
}
if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
H2D_DMA_INDX_WR_BUF, 0);
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_RD_BUF, 0);
}
/*
* If the DMA-able buffers for flowring needs to come from a specific
* contiguous memory region, then setup prot->flowrings_dma_buf here.
* dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
* this contiguous memory region, for each of the flowrings.
*/
/* Pre-allocate pool of msgbuf_ring for flowrings */
if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
return BCME_ERROR;
}
/* Host should configure soft doorbells if needed ... here */
/* Post to dongle host configured soft doorbells */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
/* Post buffers for packet reception and ioctl/event responses */
dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
return BCME_OK;
} /* dhd_prot_init */
/**
* dhd_prot_detach - PCIE FD protocol layer destructor.
* Unlink, frees allocated protocol memory (including dhd_prot)
*/
void
dhd_prot_detach(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
/* Stop the protocol module */
if (prot) {
/* free up all DMA-able buffers allocated during prot attach/init */
dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
dhd_dma_buf_free(dhd, &prot->ioctbuf);
dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
/* Common MsgBuf Rings */
dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
dhd_prot_flowrings_pool_detach(dhd);
DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
#ifndef CONFIG_DHD_USE_STATIC_BUF
MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
#endif /* CONFIG_DHD_USE_STATIC_BUF */
#if defined(DHD_LB)
#if defined(DHD_LB_TXC)
if (prot->tx_compl_prod.buffer) {
MFREE(dhd->osh, prot->tx_compl_prod.buffer,
sizeof(void*) * DHD_LB_WORKQ_SZ);
}
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
if (prot->rx_compl_prod.buffer) {
MFREE(dhd->osh, prot->rx_compl_prod.buffer,
sizeof(void*) * DHD_LB_WORKQ_SZ);
}
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
dhd->prot = NULL;
}
} /* dhd_prot_detach */
/**
* dhd_prot_reset - Reset the protocol layer without freeing any objects. This
* may be invoked to soft reboot the dongle, without having to detach and attach
* the entire protocol layer.
*
* After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
* a dhd_prot_attach() phase.
*/
void
dhd_prot_reset(dhd_pub_t *dhd)
{
struct dhd_prot *prot = dhd->prot;
DHD_TRACE(("%s\n", __FUNCTION__));
if (prot == NULL) {
return;
}
dhd_prot_flowrings_pool_reset(dhd);
dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
dhd_dma_buf_reset(dhd, &prot->retbuf);
dhd_dma_buf_reset(dhd, &prot->ioctbuf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
prot->rxbufpost = 0;
prot->cur_event_bufs_posted = 0;
prot->cur_ioctlresp_bufs_posted = 0;
prot->active_tx_count = 0;
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
prot->ioctl_trans_id = 0;
/* dhd_flow_rings_init is located at dhd_bus_start,
* so when stopping bus, flowrings shall be deleted
*/
if (dhd->flow_rings_inited) {
dhd_flow_rings_deinit(dhd);
}
if (prot->pktid_map_handle) {
DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
prot->pktid_map_handle = NULL;
}
#ifdef IOCTLRESP_USE_CONSTMEM
if (prot->pktid_map_handle_ioctl) {
DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
prot->pktid_map_handle_ioctl = NULL;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
} /* dhd_prot_reset */
void
dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
{
dhd_prot_t *prot = dhd->prot;
prot->rx_dataoffset = rx_offset;
}
/**
* Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
*/
int
dhd_sync_with_dongle(dhd_pub_t *dhd)
{
int ret = 0;
wlc_rev_info_t revinfo;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
#ifdef DHD_FW_COREDUMP
/* For Android Builds check memdump capability */
/* Check the memdump capability */
dhd_get_memdump_info(dhd);
#endif /* DHD_FW_COREDUMP */
/* Get the device rev info */
memset(&revinfo, 0, sizeof(revinfo));
ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
if (ret < 0) {
DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
goto done;
}
DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
dhd_process_cid_mac(dhd, TRUE);
ret = dhd_preinit_ioctls(dhd);
if (!ret) {
dhd_process_cid_mac(dhd, FALSE);
}
/* Always assumes wl for now */
dhd->iswl = TRUE;
done:
return ret;
} /* dhd_sync_with_dongle */
#if defined(DHD_LB)
/* DHD load balancing: deferral of work to another online CPU */
/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
/**
* dhd_lb_dispatch - load balance by dispatch work to other CPU cores
* Note: rx_compl_tasklet is dispatched explicitly.
*/
static INLINE void
dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
{
switch (ring_idx) {
#if defined(DHD_LB_TXC)
case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
break;
#endif /* DHD_LB_TXC */
case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
{
#if defined(DHD_LB_RXC)
dhd_prot_t *prot = dhdp->prot;
/* Schedule the takslet only if we have to */
if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
/* flush WR index */
bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
}
#endif /* DHD_LB_RXC */
#if defined(DHD_LB_RXP)
dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
#endif /* DHD_LB_RXP */
break;
}
default:
break;
}
}
#if defined(DHD_LB_TXC)
/**
* DHD load balanced tx completion tasklet handler, that will perform the
* freeing of packets on the selected CPU. Packet pointers are delivered to
* this tasklet via the tx complete workq.
*/
void
dhd_lb_tx_compl_handler(unsigned long data)
{
int elem_ix;
void *pkt, **elem;
dmaaddr_t pa;
uint32 pa_len;
dhd_pub_t *dhd = (dhd_pub_t *)data;
dhd_prot_t *prot = dhd->prot;
bcm_workq_t *workq = &prot->tx_compl_cons;
uint32 count = 0;
DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
while (1) {
elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
if (elem_ix == BCM_RING_EMPTY) {
break;
}
elem = WORKQ_ELEMENT(void *, workq, elem_ix);
pkt = *elem;
DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
OSL_PREFETCH(PKTTAG(pkt));
OSL_PREFETCH(pkt);
pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
#endif
PKTFREE(dhd->osh, pkt, TRUE);
count++;
}
/* smp_wmb(); */
bcm_workq_cons_sync(workq);
DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
}
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
void
dhd_lb_rx_compl_handler(unsigned long data)
{
dhd_pub_t *dhd = (dhd_pub_t *)data;
bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
bcm_workq_cons_sync(workq);
}
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
#define DHD_DBG_SHOW_METADATA 0
#if DHD_DBG_SHOW_METADATA
static void BCMFASTPATH
dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
{
uint8 tlv_t;
uint8 tlv_l;
uint8 *tlv_v = (uint8 *)ptr;
if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
return;
len -= BCMPCIE_D2H_METADATA_HDRLEN;
tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
while (len > TLV_HDR_LEN) {
tlv_t = tlv_v[TLV_TAG_OFF];
tlv_l = tlv_v[TLV_LEN_OFF];
len -= TLV_HDR_LEN;
tlv_v += TLV_HDR_LEN;
if (len < tlv_l)
break;
if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
break;
switch (tlv_t) {
case WLFC_CTL_TYPE_TXSTATUS: {
uint32 txs;
memcpy(&txs, tlv_v, sizeof(uint32));
if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
printf("METADATA TX_STATUS: %08x\n", txs);
} else {
wl_txstatus_additional_info_t tx_add_info;
memcpy(&tx_add_info, tlv_v + sizeof(uint32),
sizeof(wl_txstatus_additional_info_t));
printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
" rate = %08x tries = %d - %d\n", txs,
tx_add_info.seq, tx_add_info.entry_ts,
tx_add_info.enq_ts, tx_add_info.last_ts,
tx_add_info.rspec, tx_add_info.rts_cnt,
tx_add_info.tx_cnt);
}
} break;
case WLFC_CTL_TYPE_RSSI: {
if (tlv_l == 1)
printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
else
printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
(*(tlv_v + 3) << 8) | *(tlv_v + 2),
(int8)(*tlv_v), *(tlv_v + 1));
} break;
case WLFC_CTL_TYPE_FIFO_CREDITBACK:
bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
break;
case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
break;
case WLFC_CTL_TYPE_RX_STAMP: {
struct {
uint32 rspec;
uint32 bus_time;
uint32 wlan_time;
} rx_tmstamp;
memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
} break;
case WLFC_CTL_TYPE_TRANS_ID:
bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
break;
case WLFC_CTL_TYPE_COMP_TXSTATUS:
bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
break;
default:
bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
break;
}
len -= tlv_l;
tlv_v += tlv_l;
}
}
#endif /* DHD_DBG_SHOW_METADATA */
static INLINE void BCMFASTPATH
dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
{
if (pkt) {
if (pkttype == PKTTYPE_IOCTL_RX ||
pkttype == PKTTYPE_EVENT_RX) {
#ifdef DHD_USE_STATIC_CTRLBUF
PKTFREE_STATIC(dhd->osh, pkt, send);
#else
PKTFREE(dhd->osh, pkt, send);
#endif /* DHD_USE_STATIC_CTRLBUF */
} else {
PKTFREE(dhd->osh, pkt, send);
}
}
}
static INLINE void * BCMFASTPATH
dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
{
void *PKTBUF;
dmaaddr_t pa;
uint32 len;
void *dmah;
void *secdma;
#ifdef DHD_PCIE_PKTID
if (free_pktid) {
PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
pktid, pa, len, dmah, secdma, pkttype);
} else {
PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle,
pktid, pa, len, dmah, secdma, pkttype);
}
#else
PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa,
len, dmah, secdma, pkttype);
#endif /* DHD_PCIE_PKTID */
if (PKTBUF) {
{
if (SECURE_DMA_ENAB(dhd->osh)) {
SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
secdma, 0);
} else {
DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
}
}
}
return PKTBUF;
}
#ifdef IOCTLRESP_USE_CONSTMEM
static INLINE void BCMFASTPATH
dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
{
memset(retbuf, 0, sizeof(dhd_dma_buf_t));
retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
return;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
static void BCMFASTPATH
dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
{
dhd_prot_t *prot = dhd->prot;
int16 fillbufs;
uint16 cnt = 256;
int retcount = 0;
fillbufs = prot->max_rxbufpost - prot->rxbufpost;
while (fillbufs >= RX_BUF_BURST) {
cnt--;
if (cnt == 0) {
/* find a better way to reschedule rx buf post if space not available */
DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
break;
}
/* Post in a burst of 32 buffers at a time */
fillbufs = MIN(fillbufs, RX_BUF_BURST);
/* Post buffers */
retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
if (retcount >= 0) {
prot->rxbufpost += (uint16)retcount;
#ifdef DHD_LB_RXC
/* dhd_prot_rxbuf_post returns the number of buffers posted */
DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
#endif /* DHD_LB_RXC */
/* how many more to post */
fillbufs = prot->max_rxbufpost - prot->rxbufpost;
} else {
/* Make sure we don't run loop any further */
fillbufs = 0;
}
}
}
/** Post 'count' no of rx buffers to dongle */
static int BCMFASTPATH
dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
{
void *p;
uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
uint8 *rxbuf_post_tmp;
host_rxbuf_post_t *rxbuf_post;
void *msg_start;
dmaaddr_t pa;
uint32 pktlen;
uint8 i = 0;
uint16 alloced = 0;
unsigned long flags;
uint32 pktid;
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
DHD_GENERAL_LOCK(dhd, flags);
/* Claim space for exactly 'count' no of messages, for mitigation purpose */
msg_start = (void *)
dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
DHD_GENERAL_UNLOCK(dhd, flags);
if (msg_start == NULL) {
DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
return -1;
}
/* if msg_start != NULL, we should have alloced space for atleast 1 item */
ASSERT(alloced > 0);
rxbuf_post_tmp = (uint8*)msg_start;
/* loop through each allocated message in the rxbuf post msgbuf_ring */
for (i = 0; i < alloced; i++) {
rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
/* Create a rx buffer */
if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
dhd->rx_pktgetfail++;
break;
}
pktlen = PKTLEN(dhd->osh, p);
if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
DMA_RX, p, 0, ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
} else {
pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
}
if (PHYSADDRISZERO(pa)) {
PKTFREE(dhd->osh, p, FALSE);
DHD_ERROR(("Invalid phyaddr 0\n"));
ASSERT(0);
break;
}
PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
pktlen = PKTLEN(dhd->osh, p);
/* Common msg header */
rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
rxbuf_post->cmn_hdr.if_id = 0;
rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
#if defined(DHD_LB_RXC)
if (use_rsv_pktid == TRUE) {
bcm_workq_t *workq = &prot->rx_compl_cons;
int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
if (elem_ix == BCM_RING_EMPTY) {
DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
pktid = DHD_PKTID_INVALID;
goto alloc_pkt_id;
} else {
uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
pktid = *elem;
}
/* Now populate the previous locker with valid information */
if (pktid != DHD_PKTID_INVALID) {
rxbuf_post->cmn_hdr.request_id = htol32(pktid);
DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid,
pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
PKTTYPE_DATA_RX);
}
} else
#endif /* DHD_LB_RXC */
{
#if defined(DHD_LB_RXC)
alloc_pkt_id:
#endif
#if defined(DHD_PCIE_PKTID)
/* get the lock before calling DHD_NATIVE_TO_PKTID */
DHD_GENERAL_LOCK(dhd, flags);
#endif
pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa,
pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
#if defined(DHD_PCIE_PKTID)
/* free lock */
DHD_GENERAL_UNLOCK(dhd, flags);
if (pktid == DHD_PKTID_INVALID) {
if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
} else {
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
PKTFREE(dhd->osh, p, FALSE);
DHD_ERROR(("Pktid pool depleted.\n"));
break;
}
#endif /* DHD_PCIE_PKTID */
}
rxbuf_post->data_buf_len = htol16((uint16)pktlen);
rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->data_buf_addr.low_addr =
htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
if (prot->rx_metadata_offset) {
rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
} else {
rxbuf_post->metadata_buf_len = 0;
rxbuf_post->metadata_buf_addr.high_addr = 0;
rxbuf_post->metadata_buf_addr.low_addr = 0;
}
#if defined(DHD_PKTID_AUDIT_RING)
DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
rxbuf_post->cmn_hdr.request_id = htol32(pktid);
/* Move rxbuf_post_tmp to next item */
rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
}
if (i < alloced) {
if (ring->wr < (alloced - i)) {
ring->wr = ring->max_items - (alloced - i);
} else {
ring->wr -= (alloced - i);
}
alloced = i;
}
/* Update ring's WR index and ring doorbell to dongle */
if (alloced > 0) {
dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
}
return alloced;
} /* dhd_prot_rxbuf_post */
#ifdef IOCTLRESP_USE_CONSTMEM
static int
alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
{
int err;
memset(retbuf, 0, sizeof(dhd_dma_buf_t));
if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
ASSERT(0);
return BCME_NOMEM;
}
return BCME_OK;
}
static void
free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
{
/* retbuf (declared on stack) not fully populated ... */
if (retbuf->va) {
uint32 dma_pad;
dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
retbuf->len = IOCT_RETBUF_SIZE;
retbuf->_alloced = retbuf->len + dma_pad;
/* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle.
* Need to reassign before free to pass the check in dhd_dma_buf_audit().
*/
retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL);
}
dhd_dma_buf_free(dhd, retbuf);
return;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
static int
dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
{
void *p;
uint16 pktsz;
ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
dmaaddr_t pa;
uint32 pktlen;
dhd_prot_t *prot = dhd->prot;
uint16 alloced = 0;
unsigned long flags;
dhd_dma_buf_t retbuf;
void *dmah = NULL;
uint32 pktid;
void *map_handle;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
return -1;
}
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
if (event_buf) {
/* Allocate packet for event buffer post */
pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
} else {
/* Allocate packet for ctrl/ioctl buffer post */
pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
}
#ifdef IOCTLRESP_USE_CONSTMEM
if (!event_buf) {
if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
return -1;
}
ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
p = retbuf.va;
pktlen = retbuf.len;
pa = retbuf.pa;
dmah = retbuf.dmah;
} else
#endif /* IOCTLRESP_USE_CONSTMEM */
{
#ifdef DHD_USE_STATIC_CTRLBUF
p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
#else
p = PKTGET(dhd->osh, pktsz, FALSE);
#endif /* DHD_USE_STATIC_CTRLBUF */
if (p == NULL) {
DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
__FUNCTION__, __LINE__, event_buf ?
"EVENT" : "IOCTL RESP"));
dhd->rx_pktgetfail++;
return -1;
}
pktlen = PKTLEN(dhd->osh, p);
if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
DMA_RX, p, 0, ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
} else {
pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
}
if (PHYSADDRISZERO(pa)) {
DHD_ERROR(("Invalid physaddr 0\n"));
ASSERT(0);
goto free_pkt_return;
}
}
DHD_GENERAL_LOCK(dhd, flags);
rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (rxbuf_post == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
__FUNCTION__, __LINE__));
#ifdef IOCTLRESP_USE_CONSTMEM
if (event_buf)
#endif /* IOCTLRESP_USE_CONSTMEM */
{
if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
} else {
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
}
goto free_pkt_return;
}
/* CMN msg header */
if (event_buf) {
rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
} else {
rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
}
#ifdef IOCTLRESP_USE_CONSTMEM
if (!event_buf) {
map_handle = dhd->prot->pktid_map_handle_ioctl;
pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen,
DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX);
} else
#endif /* IOCTLRESP_USE_CONSTMEM */
{
map_handle = dhd->prot->pktid_map_handle;
pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX);
}
if (pktid == DHD_PKTID_INVALID) {
if (ring->wr == 0) {
ring->wr = ring->max_items - 1;
} else {
ring->wr--;
}
DHD_GENERAL_UNLOCK(dhd, flags);
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
goto free_pkt_return;
}
#if defined(DHD_PKTID_AUDIT_RING)
DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
rxbuf_post->cmn_hdr.request_id = htol32(pktid);
rxbuf_post->cmn_hdr.if_id = 0;
rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
#if defined(DHD_PCIE_PKTID)
if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
if (ring->wr == 0) {
ring->wr = ring->max_items - 1;
} else {
ring->wr--;
}
DHD_GENERAL_UNLOCK(dhd, flags);
#ifdef IOCTLRESP_USE_CONSTMEM
if (event_buf)
#endif /* IOCTLRESP_USE_CONSTMEM */
{
if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
} else {
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
}
goto free_pkt_return;
}
#endif /* DHD_PCIE_PKTID */
rxbuf_post->cmn_hdr.flags = 0;
#ifndef IOCTLRESP_USE_CONSTMEM
rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
#else
rxbuf_post->host_buf_len = htol16((uint16)pktlen);
#endif /* IOCTLRESP_USE_CONSTMEM */
rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 1;
free_pkt_return:
#ifdef IOCTLRESP_USE_CONSTMEM
if (!event_buf) {
free_ioctl_return_buffer(dhd, &retbuf);
} else
#endif /* IOCTLRESP_USE_CONSTMEM */
{
dhd_prot_packet_free(dhd, p,
event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX,
FALSE);
}
return -1;
} /* dhd_prot_rxbufpost_ctrl */
static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
{
uint32 i = 0;
int32 ret_val;
DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
return 0;
}
while (i < max_to_post) {
ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
if (ret_val < 0) {
break;
}
i++;
}
DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
return (uint16)i;
}
static void
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
int max_to_post;
DHD_INFO(("ioctl resp buf post\n"));
max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
if (max_to_post <= 0) {
DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
__FUNCTION__));
return;
}
prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
FALSE, max_to_post);
}
static void
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
int max_to_post;
max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
if (max_to_post <= 0) {
DHD_INFO(("%s: Cannot post more than max event buffers\n",
__FUNCTION__));
return;
}
prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
TRUE, max_to_post);
}
/** called when DHD needs to check for 'receive complete' messages from the dongle */
bool BCMFASTPATH
dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
{
bool more = TRUE;
uint n = 0;
msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
if (msg_addr == NULL) {
more = FALSE;
break;
}
/* Prefetch data to populate the cache */
OSL_PREFETCH(msg_addr);
if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
DHD_ERROR(("%s: process %s msg addr %p len %d\n",
__FUNCTION__, ring->name, msg_addr, msg_len));
}
/* Update read pointer */
dhd_prot_upd_read_idx(dhd, ring);
/* After batch processing, check RX bound */
n += msg_len / ring->item_len;
if (n >= bound) {
break;
}
}
return more;
}
/**
* Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
*/
void
dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
{
msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
/* Update read pointer */
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
}
DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
ring->idx, flowid, ring->wr, ring->rd));
/* Need more logic here, but for now use it directly */
dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
}
/** called when DHD needs to check for 'transmit complete' messages from the dongle */
bool BCMFASTPATH
dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
{
bool more = TRUE;
uint n = 0;
msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
if (msg_addr == NULL) {
more = FALSE;
break;
}
/* Prefetch data to populate the cache */
OSL_PREFETCH(msg_addr);
if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
DHD_ERROR(("%s: process %s msg addr %p len %d\n",
__FUNCTION__, ring->name, msg_addr, msg_len));
}
/* Write to dngl rd ptr */
dhd_prot_upd_read_idx(dhd, ring);
/* After batch processing, check bound */
n += msg_len / ring->item_len;
if (n >= bound) {
break;
}
}
return more;
}
/** called when DHD needs to check for 'ioctl complete' messages from the dongle */
int BCMFASTPATH
dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
if (dhd->hang_was_sent) {
break;
}
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
if (msg_addr == NULL) {
break;
}
/* Prefetch data to populate the cache */
OSL_PREFETCH(msg_addr);
if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
DHD_ERROR(("%s: process %s msg addr %p len %d\n",
__FUNCTION__, ring->name, msg_addr, msg_len));
}
/* Write to dngl rd ptr */
dhd_prot_upd_read_idx(dhd, ring);
}
return 0;
}
/**
* Consume messages out of the D2H ring. Ensure that the message's DMA to host
* memory has completed, before invoking the message handler via a table lookup
* of the cmn_msg_hdr::msg_type.
*/
static int BCMFASTPATH
dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
{
int buf_len = len;
uint16 item_len;
uint8 msg_type;
cmn_msg_hdr_t *msg = NULL;
int ret = BCME_OK;
ASSERT(ring);
item_len = ring->item_len;
if (item_len == 0) {
DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n",
__FUNCTION__, ring->idx, item_len, buf_len));
return BCME_ERROR;
}
while (buf_len > 0) {
if (dhd->hang_was_sent) {
ret = BCME_ERROR;
goto done;
}
msg = (cmn_msg_hdr_t *)buf;
/*
* Update the curr_rd to the current index in the ring, from where
* the work item is fetched. This way if the fetched work item
* fails in LIVELOCK, we can print the exact read index in the ring
* that shows up the corrupted work item.
*/
if ((ring->curr_rd + 1) >= ring->max_items) {
ring->curr_rd = 0;
} else {
ring->curr_rd += 1;
}
#if defined(PCIE_D2H_SYNC)
/* Wait until DMA completes, then fetch msg_type */
msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
#else
msg_type = msg->msg_type;
#endif /* !PCIE_D2H_SYNC */
/* Prefetch data to populate the cache */
OSL_PREFETCH(buf + item_len);
DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
msg_type, item_len, buf_len));
if (msg_type == MSG_TYPE_LOOPBACK) {
bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
}
ASSERT(msg_type < DHD_PROT_FUNCS);
if (msg_type >= DHD_PROT_FUNCS) {
DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n",
__FUNCTION__, msg_type, item_len, buf_len));
ret = BCME_ERROR;
goto done;
}
if (table_lookup[msg_type]) {
table_lookup[msg_type](dhd, buf);
}
if (buf_len < item_len) {
ret = BCME_ERROR;
goto done;
}
buf_len = buf_len - item_len;
buf = buf + item_len;
}
done:
#ifdef DHD_RX_CHAINING
dhd_rxchain_commit(dhd);
#endif
#if defined(DHD_LB)
dhd_lb_dispatch(dhd, ring->idx);
#endif
return ret;
} /* dhd_prot_process_msgtype */
static void
dhd_prot_noop(dhd_pub_t *dhd, void *msg)
{
return;
}
/** called on MSG_TYPE_RING_STATUS message received from dongle */
static void
dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
{
pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
/* How do we track this to pair it with ??? */
return;
}
/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
static void
dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
{
pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
gen_status->compl_hdr.flow_ring_id));
/* How do we track this to pair it with ??? */
return;
}
/**
* Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
* dongle received the ioctl message in dongle memory.
*/
static void
dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
{
uint32 pktid;
ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
unsigned long flags;
pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
#if defined(DHD_PKTID_AUDIT_RING)
/* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */
if (pktid != DHD_IOCTL_REQ_PKTID) {
DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
DHD_TEST_IS_ALLOC);
}
#endif /* DHD_PKTID_AUDIT_RING */
DHD_GENERAL_LOCK(dhd, flags);
if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
} else {
DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
}
DHD_GENERAL_UNLOCK(dhd, flags);
DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
ioct_ack->compl_hdr.flow_ring_id));
if (ioct_ack->compl_hdr.status != 0) {
DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
}
}
/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
static void
dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
{
dhd_prot_t *prot = dhd->prot;
uint32 pkt_id, xt_id;
ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
void *pkt;
unsigned long flags;
dhd_dma_buf_t retbuf;
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
#if defined(DHD_PKTID_AUDIT_RING)
#ifndef IOCTLRESP_USE_CONSTMEM
DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, DHD_DUPLICATE_FREE);
#else
DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, DHD_DUPLICATE_FREE);
#endif /* !IOCTLRESP_USE_CONSTMEM */
#endif /* DHD_PKTID_AUDIT_RING */
DHD_GENERAL_LOCK(dhd, flags);
if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
/* reset ioctl state */
prot->ioctl_state = 0;
DHD_GENERAL_UNLOCK(dhd, flags);
return;
}
#ifndef IOCTLRESP_USE_CONSTMEM
pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
#else
dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
pkt = retbuf.va;
#endif /* !IOCTLRESP_USE_CONSTMEM */
if (!pkt) {
prot->ioctl_state = 0;
DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
return;
}
DHD_GENERAL_UNLOCK(dhd, flags);
prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
xt_id = ltoh16(ioct_resp->trans_id);
if (xt_id != prot->ioctl_trans_id) {
ASSERT(0);
goto exit;
}
DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
if (prot->ioctl_resplen > 0) {
#ifndef IOCTLRESP_USE_CONSTMEM
bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
#else
bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
#endif /* !IOCTLRESP_USE_CONSTMEM */
}
/* wake up any dhd_os_ioctl_resp_wait() */
dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
exit:
#ifndef IOCTLRESP_USE_CONSTMEM
dhd_prot_packet_free(dhd, pkt,
PKTTYPE_IOCTL_RX, FALSE);
#else
free_ioctl_return_buffer(dhd, &retbuf);
#endif /* !IOCTLRESP_USE_CONSTMEM */
}
/** called on MSG_TYPE_TX_STATUS message received from dongle */
static void BCMFASTPATH
dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
{
dhd_prot_t *prot = dhd->prot;
host_txbuf_cmpl_t * txstatus;
unsigned long flags;
uint32 pktid;
void *pkt = NULL;
dmaaddr_t pa;
uint32 len;
void *dmah;
void *secdma;
bool pkt_fate;
/* locks required to protect circular buffer accesses */
DHD_GENERAL_LOCK(dhd, flags);
txstatus = (host_txbuf_cmpl_t *)msg;
pktid = ltoh32(txstatus->cmn_hdr.request_id);
pkt_fate = TRUE;
#if defined(DHD_PKTID_AUDIT_RING)
DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
DHD_DUPLICATE_FREE);
#endif /* DHD_PKTID_AUDIT_RING */
DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
if (prot->active_tx_count) {
prot->active_tx_count--;
/* Release the Lock when no more tx packets are pending */
if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE && prot->active_tx_count == 0)
DHD_OS_WAKE_UNLOCK(dhd);
} else {
DHD_ERROR(("Extra packets are freed\n"));
}
ASSERT(pktid != 0);
#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
{
int elem_ix;
void **elem;
bcm_workq_t *workq;
pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
workq = &prot->tx_compl_prod;
/*
* Produce the packet into the tx_compl workq for the tx compl tasklet
* to consume.
*/
OSL_PREFETCH(PKTTAG(pkt));
/* fetch next available slot in workq */
elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
if (elem_ix == BCM_RING_FULL) {
DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
goto workq_ring_full;
}
elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
*elem = pkt;
smp_wmb();
/* Sync WR index to consumer if the SYNC threshold has been reached */
if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
bcm_workq_prod_sync(workq);
prot->tx_compl_prod_sync = 0;
}
DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
__FUNCTION__, pkt, prot->tx_compl_prod_sync));
DHD_GENERAL_UNLOCK(dhd, flags);
return;
}
workq_ring_full:
#endif /* !DHD_LB_TXC */
/*
* We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is
* defined but the tx_compl queue is full.
*/
if (pkt == NULL) {
pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
}
if (pkt) {
#ifdef DBG_PKT_MON
/*
* tx_status in TX completion message cannot be used. As a WAR,
* send d11 tx_status through unused status field of PCIe
* completion header.
*/
if (dhd->d11_tx_status) {
uint16 tx_status;
tx_status = ltoh16(txstatus->compl_hdr.status);
pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status);
}
#endif /* DBG_PKT_MON */
if (SECURE_DMA_ENAB(dhd->osh)) {
int offset = 0;
BCM_REFERENCE(offset);
if (dhd->prot->tx_metadata_offset)
offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
(uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
secdma, offset);
} else {
DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
}
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, pkt_fate);
#endif
#if DHD_DBG_SHOW_METADATA
if (dhd->prot->metadata_dbg &&
dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
uchar *ptr;
/* The Ethernet header of TX frame was copied and removed.
* Here, move the data pointer forward by Ethernet header size.
*/
PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
}
#endif /* DHD_DBG_SHOW_METADATA */
PKTFREE(dhd->osh, pkt, TRUE);
DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
txstatus->tx_status);
}
DHD_GENERAL_UNLOCK(dhd, flags);
return;
} /* dhd_prot_txstatus_process */
/** called on MSG_TYPE_WL_EVENT message received from dongle */
static void
dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
{
wlevent_req_msg_t *evnt;
uint32 bufid;
uint16 buflen;
int ifidx = 0;
void* pkt;
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
/* Event complete header */
evnt = (wlevent_req_msg_t *)msg;
bufid = ltoh32(evnt->cmn_hdr.request_id);
#if defined(DHD_PKTID_AUDIT_RING)
DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid,
DHD_DUPLICATE_FREE);
#endif /* DHD_PKTID_AUDIT_RING */
buflen = ltoh16(evnt->event_data_len);
ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
/* Post another rxbuf to the device */
if (prot->cur_event_bufs_posted) {
prot->cur_event_bufs_posted--;
}
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
/* locks required to protect pktid_map */
DHD_GENERAL_LOCK(dhd, flags);
pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
DHD_GENERAL_UNLOCK(dhd, flags);
if (!pkt) {
return;
}
/* DMA RX offset updated through shared area */
if (dhd->prot->rx_dataoffset) {
PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
}
PKTSETLEN(dhd->osh, pkt, buflen);
dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
}
/** called on MSG_TYPE_RX_CMPLT message received from dongle */
static void BCMFASTPATH
dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg)
{
host_rxbuf_cmpl_t *rxcmplt_h;
uint16 data_offset; /* offset at which data starts */
void *pkt;
unsigned long flags;
uint ifidx;
uint32 pktid;
#if defined(DHD_LB_RXC)
const bool free_pktid = FALSE;
#else
const bool free_pktid = TRUE;
#endif /* DHD_LB_RXC */
/* RXCMPLT HDR */
rxcmplt_h = (host_rxbuf_cmpl_t *)msg;
/* offset from which data starts is populated in rxstatus0 */
data_offset = ltoh16(rxcmplt_h->data_offset);
pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id);
#if defined(DHD_PKTID_AUDIT_RING)
DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
DHD_DUPLICATE_FREE);
#endif /* DHD_PKTID_AUDIT_RING */
DHD_GENERAL_LOCK(dhd, flags);
pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid);
DHD_GENERAL_UNLOCK(dhd, flags);
if (!pkt) {
return;
}
/* Post another set of rxbufs to the device */
dhd_prot_return_rxbuf(dhd, pktid, 1);
DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
ltoh16(rxcmplt_h->metadata_len)));
#if DHD_DBG_SHOW_METADATA
if (dhd->prot->metadata_dbg &&
dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
uchar *ptr;
ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
/* header followed by data */
bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
}
#endif /* DHD_DBG_SHOW_METADATA */
if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
DHD_INFO(("D11 frame rxed \n"));
}
/* data_offset from buf start */
if (data_offset) {
/* data offset given from dongle after split rx */
PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
} else {
/* DMA RX offset updated through shared area */
if (dhd->prot->rx_dataoffset) {
PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
}
}
/* Actual length of the packet */
PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
ifidx = rxcmplt_h->cmn_hdr.if_id;
#if defined(DHD_LB_RXP)
dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
#else /* ! DHD_LB_RXP */
#ifdef DHD_RX_CHAINING
/* Chain the packets */
dhd_rxchain_frame(dhd, pkt, ifidx);
#else /* ! DHD_RX_CHAINING */
/* offset from which data starts is populated in rxstatus0 */
dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
#endif /* ! DHD_RX_CHAINING */
#endif /* ! DHD_LB_RXP */
} /* dhd_prot_rxcmplt_process */
/** Stop protocol: sync w/dongle state. */
void dhd_prot_stop(dhd_pub_t *dhd)
{
ASSERT(dhd);
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
}
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
void BCMFASTPATH
dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
{
return;
}
uint
dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
{
return 0;
}
#define PKTBUF pktbuf
/**
* Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
* the corresponding flow ring.
*/
int BCMFASTPATH
dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
{
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
host_txbuf_post_t *txdesc = NULL;
dmaaddr_t pa, meta_pa;
uint8 *pktdata;
uint32 pktlen;
uint32 pktid;
uint8 prio;
uint16 flowid = 0;
uint16 alloced = 0;
uint16 headroom;
msgbuf_ring_t *ring;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
if (dhd->flow_ring_table == NULL) {
return BCME_NORESOURCE;
}
flowid = DHD_PKT_GET_FLOWID(PKTBUF);
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
DHD_GENERAL_LOCK(dhd, flags);
/* Create a unique 32-bit packet id */
pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF);
#if defined(DHD_PCIE_PKTID)
if (pktid == DHD_PKTID_INVALID) {
DHD_ERROR(("Pktid pool depleted.\n"));
/*
* If we return error here, the caller would queue the packet
* again. So we'll just free the skb allocated in DMA Zone.
* Since we have not freed the original SKB yet the caller would
* requeue the same.
*/
goto err_no_res_pktfree;
}
#endif /* DHD_PCIE_PKTID */
/* Reserve space in the circular buffer */
txdesc = (host_txbuf_post_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (txdesc == NULL) {
#if defined(DHD_PCIE_PKTID)
void *dmah;
void *secdma;
/* Free up the PKTID. physaddr and pktlen will be garbage. */
DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid,
pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
#endif /* DHD_PCIE_PKTID */
DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
__FUNCTION__, __LINE__, prot->active_tx_count));
goto err_no_res_pktfree;
}
#ifdef DBG_PKT_MON
DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
#endif /* DBG_PKT_MON */
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
pktlen = PKTLEN(dhd->osh, PKTBUF);
/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
/* Extract the ethernet header and adjust the data pointer and length */
pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
pktlen -= ETHER_HDR_LEN;
/* Map the data pointer to a DMA-able address */
if (SECURE_DMA_ENAB(dhd->osh)) {
int offset = 0;
BCM_REFERENCE(offset);
if (prot->tx_metadata_offset) {
offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
}
pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
} else {
pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
}
if (PHYSADDRISZERO(pa)) {
DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
ASSERT(0);
}
/* No need to lock. Save the rest of the packet's metadata */
DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid,
pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
#ifdef TXP_FLUSH_NITEMS
if (ring->pend_items_count == 0) {
ring->start_addr = (void *)txdesc;
}
ring->pend_items_count++;
#endif
/* Form the Tx descriptor message buffer */
/* Common message hdr */
txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
txdesc->cmn_hdr.if_id = ifidx;
txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
prio = (uint8)PKTPRIO(PKTBUF);
txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
txdesc->seg_cnt = 1;
txdesc->data_len = htol16((uint16) pktlen);
txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
/* Move data pointer to keep ether header in local PKTBUF for later reference */
PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
/* Handle Tx metadata */
headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
DHD_ERROR(("No headroom for Metadata tx %d %d\n",
prot->tx_metadata_offset, headroom));
}
if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
/* Adjust the data pointer to account for meta data in DMA_MAP */
PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
if (SECURE_DMA_ENAB(dhd->osh)) {
meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
0, ring->dma_buf.secdma);
} else {
meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
}
if (PHYSADDRISZERO(meta_pa)) {
DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
ASSERT(0);
}
/* Adjust the data pointer back to original value */
PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
txdesc->metadata_buf_len = prot->tx_metadata_offset;
txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
} else {
txdesc->metadata_buf_len = htol16(0);
txdesc->metadata_buf_addr.high_addr = 0;
txdesc->metadata_buf_addr.low_addr = 0;
}
#if defined(DHD_PKTID_AUDIT_RING)
DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid,
DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
txdesc->cmn_hdr.request_id = htol32(pktid);
DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
txdesc->cmn_hdr.request_id));
/* Update the write pointer in TCM & ring bell */
#ifdef TXP_FLUSH_NITEMS
/* Flush if we have either hit the txp_threshold or if this msg is */
/* occupying the last slot in the flow_ring - before wrap around. */
if ((ring->pend_items_count == prot->txp_threshold) ||
((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
}
#else
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
#endif
prot->active_tx_count++;
/*
* Take a wake lock, do not sleep if we have atleast one packet
* to finish.
*/
if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE && prot->active_tx_count == 1)
DHD_OS_WAKE_LOCK(dhd);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
err_no_res_pktfree:
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_NORESOURCE;
} /* dhd_prot_txdata */
/* called with a lock */
/** optimization to write "n" tx items at a time to ring */
void BCMFASTPATH
dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
{
#ifdef TXP_FLUSH_NITEMS
unsigned long flags = 0;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
msgbuf_ring_t *ring;
if (dhd->flow_ring_table == NULL) {
return;
}
if (!in_lock) {
DHD_GENERAL_LOCK(dhd, flags);
}
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
if (ring->pend_items_count) {
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
ring->pend_items_count);
ring->pend_items_count = 0;
ring->start_addr = NULL;
}
if (!in_lock) {
DHD_GENERAL_UNLOCK(dhd, flags);
}
#endif /* TXP_FLUSH_NITEMS */
}
#undef PKTBUF /* Only defined in the above routine */
int BCMFASTPATH
dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
{
return 0;
}
/** post a set of receive buffers to the dongle */
static void BCMFASTPATH
dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
{
dhd_prot_t *prot = dhd->prot;
#if defined(DHD_LB_RXC)
int elem_ix;
uint32 *elem;
bcm_workq_t *workq;
workq = &prot->rx_compl_prod;
/* Produce the work item */
elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
if (elem_ix == BCM_RING_FULL) {
DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
ASSERT(0);
return;
}
elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
*elem = pktid;
smp_wmb();
/* Sync WR index to consumer if the SYNC threshold has been reached */
if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
bcm_workq_prod_sync(workq);
prot->rx_compl_prod_sync = 0;
}
DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
__FUNCTION__, pktid, prot->rx_compl_prod_sync));
#endif /* DHD_LB_RXC */
if (prot->rxbufpost >= rxcnt) {
prot->rxbufpost -= rxcnt;
} else {
/* ASSERT(0); */
prot->rxbufpost = 0;
}
#if !defined(DHD_LB_RXC)
if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
}
#endif /* !DHD_LB_RXC */
}
/* called before an ioctl is sent to the dongle */
static void
dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
{
dhd_prot_t *prot = dhd->prot;
if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
int slen = 0;
pcie_bus_tput_params_t *tput_params;
slen = strlen("pcie_bus_tput") + 1;
tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
sizeof(tput_params->host_buf_addr));
tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
}
}
#if defined(CUSTOMER_HW4) && defined(CONFIG_CONTROL_PM)
extern bool g_pm_control;
#endif /* CUSTOMER_HW4 & CONFIG_CONTROL_PM */
/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
{
int ret = -1;
uint8 action;
if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
goto done;
}
if (dhd->busstate == DHD_BUS_SUSPEND) {
DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
goto done;
}
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
#ifdef CUSTOMER_HW4
if (ioc->cmd == WLC_SET_PM) {
#ifdef CONFIG_CONTROL_PM
if (g_pm_control == TRUE) {
DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
__FUNCTION__, *(char *)buf));
goto done;
}
#endif /* CONFIG_CONTROL_PM */
DHD_ERROR(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf));
}
#endif /* CUSTOMER_HW4 */
ASSERT(len <= WLC_IOCTL_MAXLEN);
if (len > WLC_IOCTL_MAXLEN) {
goto done;
}
action = ioc->set;
dhd_prot_wlioctl_intercept(dhd, ioc, buf);
if (action & WL_IOCTL_ACTION_SET) {
ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
} else {
ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
if (ret > 0) {
ioc->used = ret;
}
}
/* Too many programs assume ioctl() returns 0 on success */
if (ret >= 0) {
ret = 0;
} else {
DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
dhd->dongle_error = ret;
}
if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
/* Intercept the wme_dp ioctl here */
if (!strcmp(buf, "wme_dp")) {
int slen, val = 0;
slen = strlen("wme_dp") + 1;
if (len >= (int)(slen + sizeof(int))) {
bcopy(((char *)buf + slen), &val, sizeof(int));
}
dhd->wme_dp = (uint8) ltoh32(val);
}
}
done:
return ret;
} /* dhd_prot_ioctl */
/** test / loopback */
int
dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
{
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
uint16 alloced = 0;
ioct_reqst_hdr_t *ioct_rqst;
uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
uint16 msglen = len + hdrlen;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
DHD_GENERAL_LOCK(dhd, flags);
ioct_rqst = (ioct_reqst_hdr_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (ioct_rqst == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
}
{
uint8 *ptr;
uint16 i;
ptr = (uint8 *)ioct_rqst;
for (i = 0; i < msglen; i++) {
ptr[i] = i % 256;
}
}
/* Common msg buf hdr */
ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
ioct_rqst->msg.if_id = 0;
bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
}
/** test / loopback */
void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
{
if (dmaxfer == NULL) {
return;
}
dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
}
/** test / loopback */
int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
{
uint i;
if (!dmaxfer) {
return BCME_ERROR;
}
/* First free up existing buffers */
dmaxfer_free_dmaaddr(dhd, dmaxfer);
if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
return BCME_NOMEM;
}
if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
return BCME_NOMEM;
}
dmaxfer->len = len;
/* Populate source with a pattern */
for (i = 0; i < dmaxfer->len; i++) {
((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
}
OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
dmaxfer->srcdelay = srcdelay;
dmaxfer->destdelay = destdelay;
return BCME_OK;
} /* dmaxfer_prepare_dmaaddr */
static void
dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
{
dhd_prot_t *prot = dhd->prot;
OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
if (memcmp(prot->dmaxfer.srcmem.va,
prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
bcm_print_bytes("XFER SRC: ",
prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
bcm_print_bytes("XFER DST: ",
prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
} else {
DHD_INFO(("DMA successful\n"));
}
}
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
dhd->prot->dmaxfer.in_progress = FALSE;
}
/** Test functionality.
* Transfers bytes from host to dongle and to host again using DMA
* This function is not reentrant, as prot->dmaxfer.in_progress is not protected
* by a spinlock.
*/
int
dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
{
unsigned long flags;
int ret = BCME_OK;
dhd_prot_t *prot = dhd->prot;
pcie_dma_xfer_params_t *dmap;
uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
if (prot->dmaxfer.in_progress) {
DHD_ERROR(("DMA is in progress...\n"));
return ret;
}
prot->dmaxfer.in_progress = TRUE;
if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
&prot->dmaxfer)) != BCME_OK) {
prot->dmaxfer.in_progress = FALSE;
return ret;
}
DHD_GENERAL_LOCK(dhd, flags);
dmap = (pcie_dma_xfer_params_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (dmap == NULL) {
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
prot->dmaxfer.in_progress = FALSE;
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_NOMEM;
}
/* Common msg buf hdr */
dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
dmap->xfer_len = htol32(prot->dmaxfer.len);
dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
dmap->destdelay = htol32(prot->dmaxfer.destdelay);
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("DMA Started...\n"));
return BCME_OK;
} /* dhdmsgbuf_dmaxfer_req */
/** Called in the process of submitting an ioctl to the dongle */
static int
dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
{
int ret = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Respond "bcmerror" and "bcmerrorstr" with local cache */
if (cmd == WLC_GET_VAR && buf)
{
if (!strcmp((char *)buf, "bcmerrorstr"))
{
strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
goto done;
}
else if (!strcmp((char *)buf, "bcmerror"))
{
*(int *)buf = dhd->dongle_error;
goto done;
}
}
ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
/* wait for IOCTL completion message from dongle and get first fragment */
ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
done:
return ret;
}
/**
* Waits for IOCTL completion message from the dongle, copies this into caller
* provided parameter 'buf'.
*/
static int
dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
{
dhd_prot_t *prot = dhd->prot;
int timeleft;
unsigned long flags;
int ret = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (dhd->dongle_reset) {
ret = -EIO;
goto out;
}
if (prot->cur_ioctlresp_bufs_posted) {
prot->cur_ioctlresp_bufs_posted--;
}
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
if (timeleft == 0) {
dhd->rxcnt_timeout++;
dhd->rx_ctlerrs++;
DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d"
"trans_id %d state %d busstate=%d ioctl_received=%d\n",
__FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
prot->ioctl_trans_id, prot->ioctl_state & ~MSGBUF_IOCTL_RESP_PENDING,
dhd->busstate, prot->ioctl_received));
dhd_prot_debug_info_print(dhd);
#if defined(DHD_FW_COREDUMP)
/* Collect socram dump for CUSTOMER_HW4 OR Brix Android */
/* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */
if (dhd->memdump_enabled && !dhd->dongle_trap_occured) {
/* collect core dump */
dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
dhd_bus_mem_dump(dhd);
}
#endif /* DHD_FW_COREDUMP && OEM_ANDROID */
if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
dhd->bus->no_cfg_restore = 1;
#endif /* CONFIG_ARCH_MSM */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
DHD_ERROR(("%s: timeout > MAX_CNTL_RX_TIMEOUT\n", __FUNCTION__));
}
ret = -ETIMEDOUT;
goto out;
} else {
if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
__FUNCTION__, prot->ioctl_received));
ret = -ECONNABORTED;
goto out;
}
dhd->rxcnt_timeout = 0;
dhd->rx_ctlpkts++;
DHD_CTL(("%s: ioctl resp resumed, got %d\n",
__FUNCTION__, prot->ioctl_resplen));
}
if (dhd->dongle_trap_occured) {
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
dhd->bus->no_cfg_restore = 1;
#endif /* CONFIG_ARCH_MSM */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__));
ret = -EREMOTEIO;
goto out;
}
if (dhd->prot->ioctl_resplen > len) {
dhd->prot->ioctl_resplen = (uint16)len;
}
if (buf) {
bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
}
ret = (int)(dhd->prot->ioctl_status);
out:
DHD_GENERAL_LOCK(dhd, flags);
dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
dhd->prot->ioctl_resplen = 0;
dhd->prot->ioctl_received = IOCTL_WAIT;
dhd->prot->curr_ioctl_cmd = 0;
DHD_GENERAL_UNLOCK(dhd, flags);
return ret;
} /* dhd_msgbuf_wait_ioctl_cmplt */
static int
dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
{
int ret = 0;
DHD_TRACE(("%s: Enter \n", __FUNCTION__));
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
return -EIO;
}
/* don't talk to the dongle if fw is about to be reloaded */
if (dhd->hang_was_sent) {
DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
__FUNCTION__));
return -EIO;
}
/* Fill up msgbuf for ioctl req */
ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
return ret;
}
/** Called by upper DHD layer. Handles a protocol control response asynchronously. */
int dhd_prot_ctl_complete(dhd_pub_t *dhd)
{
return 0;
}
/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
return BCME_UNSUPPORTED;
}
/** Add prot dump output to a buffer */
void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
#if defined(PCIE_D2H_SYNC)
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
else
bcm_bprintf(b, "\nd2h_sync: NONE:");
bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
#endif /* PCIE_D2H_SYNC */
bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support),
DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support),
dhd->prot->rw_index_sz);
}
/* Update local copy of dongle statistics */
void dhd_prot_dstats(dhd_pub_t *dhd)
{
return;
}
/** Called by upper DHD layer */
int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
uint reorder_info_len, void **pkt, uint32 *free_buf_count)
{
return 0;
}
/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
int
dhd_post_dummy_msg(dhd_pub_t *dhd)
{
unsigned long flags;
hostevent_hdr_t *hevent = NULL;
uint16 alloced = 0;
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
DHD_GENERAL_LOCK(dhd, flags);
hevent = (hostevent_hdr_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (hevent == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
return -1;
}
/* CMN msg header */
hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
hevent->msg.if_id = 0;
/* Event payload */
hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
/* Since, we are filling the data directly into the bufptr obtained
* from the msgbuf, we can directly call the write_complete
*/
dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
}
/**
* If exactly_nitems is true, this function will allocate space for nitems or fail
* If exactly_nitems is false, this function will allocate space for nitems or less
*/
static void * BCMFASTPATH
dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint16 nitems, uint16 * alloced, bool exactly_nitems)
{
void * ret_buf;
/* Alloc space for nitems in the ring */
ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
/* if alloc failed , invalidate cached read ptr */
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
} else {
dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
}
/* Try allocating once more */
ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
DHD_INFO(("%s: Ring space not available \n", ring->name));
return NULL;
}
}
/* Return alloced space */
return ret_buf;
}
/**
* Non inline ioct request.
* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
* Form a separate request buffer where a 4 byte cmn header is added in the front
* buf contents from parent function is copied to remaining section of this buffer
*/
static int
dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
{
dhd_prot_t *prot = dhd->prot;
ioctl_req_msg_t *ioct_rqst;
void * ioct_buf; /* For ioctl payload */
uint16 rqstlen, resplen;
unsigned long flags;
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
rqstlen = len;
resplen = len;
/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
/* 8K allocation of dongle buffer fails */
/* dhd doesnt give separate input & output buf lens */
/* so making the assumption that input length can never be more than 1.5k */
rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
DHD_GENERAL_LOCK(dhd, flags);
if (prot->ioctl_state) {
DHD_CTL(("pending ioctl %02x\n", prot->ioctl_state));
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_BUSY;
} else {
prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
}
/* Request for cbuf space */
ioct_rqst = (ioctl_req_msg_t*)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (ioct_rqst == NULL) {
DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
DHD_GENERAL_UNLOCK(dhd, flags);
return -1;
}
/* Common msg buf hdr */
ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
ioct_rqst->cmn_hdr.flags = 0;
ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
ioct_rqst->cmd = htol32(cmd);
prot->curr_ioctl_cmd = cmd;
ioct_rqst->output_buf_len = htol16(resplen);
prot->ioctl_trans_id++;
ioct_rqst->trans_id = prot->ioctl_trans_id;
/* populate ioctl buffer info */
ioct_rqst->input_buf_len = htol16(rqstlen);
ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
/* copy ioct payload */
ioct_buf = (void *) prot->ioctbuf.va;
if (buf) {
memcpy(ioct_buf, buf, len);
}
OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
DHD_ERROR(("host ioct address unaligned !!!!! \n"));
}
DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
ioct_rqst->trans_id));
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
} /* dhd_fillup_ioct_reqst */
/**
* dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
* DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
* information is posted to the dongle.
*
* Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
* each flowring in pool of flowrings.
*
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
static int
dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
uint16 max_items, uint16 item_len, uint16 ringid)
{
int dma_buf_alloced = BCME_NOMEM;
uint32 dma_buf_len = max_items * item_len;
dhd_prot_t *prot = dhd->prot;
ASSERT(ring);
ASSERT(name);
ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
/* Init name */
strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
ring->idx = ringid;
ring->max_items = max_items;
ring->item_len = item_len;
/* A contiguous space may be reserved for all flowrings */
if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) {
/* Carve out from the contiguous DMA-able flowring buffer */
uint16 flowid;
uint32 base_offset;
dhd_dma_buf_t *dma_buf = &ring->dma_buf;
dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
flowid = DHD_RINGID_TO_FLOWID(ringid);
base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
dma_buf->len = dma_buf_len;
dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
dma_buf->dmah = rsv_buf->dmah;
dma_buf->secdma = rsv_buf->secdma;
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
} else {
/* Allocate a dhd_dma_buf */
dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
if (dma_buf_alloced != BCME_OK) {
return BCME_NOMEM;
}
}
/* CAUTION: Save ring::base_addr in little endian format! */
dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
#ifdef BCM_SECURE_DMA
if (SECURE_DMA_ENAB(prot->osh)) {
ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
if (ring->dma_buf.secdma == NULL) {
goto free_dma_buf;
}
}
#endif /* BCM_SECURE_DMA */
DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
"ring start %p buf phys addr %x:%x \n",
ring->name, ring->max_items, ring->item_len,
dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
ltoh32(ring->base_addr.low_addr)));
return BCME_OK;
#ifdef BCM_SECURE_DMA
free_dma_buf:
if (dma_buf_alloced == BCME_OK) {
dhd_dma_buf_free(dhd, &ring->dma_buf);
}
#endif /* BCM_SECURE_DMA */
return BCME_NOMEM;
} /* dhd_prot_ring_attach */
/**
* dhd_prot_ring_init - Post the common ring information to dongle.
*
* Used only for common rings.
*
* The flowrings information is passed via the create flowring control message
* (tx_flowring_create_request_t) sent over the H2D control submission common
* ring.
*/
static void
dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
{
ring->wr = 0;
ring->rd = 0;
ring->curr_rd = 0;
/* CAUTION: ring::base_addr already in Little Endian */
dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
sizeof(uint16), RING_MAX_ITEMS, ring->idx);
dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
sizeof(uint16), RING_ITEM_LEN, ring->idx);
dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
sizeof(uint16), RING_WR_UPD, ring->idx);
dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
sizeof(uint16), RING_RD_UPD, ring->idx);
/* ring inited */
ring->inited = TRUE;
} /* dhd_prot_ring_init */
/**
* dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
* Reset WR and RD indices to 0.
*/
static void
dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
{
DHD_TRACE(("%s\n", __FUNCTION__));
dhd_dma_buf_reset(dhd, &ring->dma_buf);
ring->rd = ring->wr = 0;
ring->curr_rd = 0;
}
/**
* dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
* hanging off the msgbuf_ring.
*/
static void
dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
{
dhd_prot_t *prot = dhd->prot;
ASSERT(ring);
ring->inited = FALSE;
/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
#ifdef BCM_SECURE_DMA
if (SECURE_DMA_ENAB(prot->osh)) {
if (ring->dma_buf.secdma) {
SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
ring->dma_buf.secdma = NULL;
}
}
#endif /* BCM_SECURE_DMA */
/* If the DMA-able buffer was carved out of a pre-reserved contiguous
* memory, then simply stop using it.
*/
if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) {
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
} else {
dhd_dma_buf_free(dhd, &ring->dma_buf);
}
} /* dhd_prot_ring_detach */
/*
* +----------------------------------------------------------------------------
* Flowring Pool
*
* Unlike common rings, which are attached very early on (dhd_prot_attach),
* flowrings are dynamically instantiated. Moreover, flowrings may require a
* larger DMA-able buffer. To avoid issues with fragmented cache coherent
* DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
* The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
*
* Each DMA-able buffer may be allocated independently, or may be carved out
* of a single large contiguous region that is registered with the protocol
* layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
* may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
*
* No flowring pool action is performed in dhd_prot_attach(), as the number
* of h2d rings is not yet known.
*
* In dhd_prot_init(), the dongle advertized number of h2d rings is used to
* determine the number of flowrings required, and a pool of msgbuf_rings are
* allocated and a DMA-able buffer (carved or allocated) is attached.
* See: dhd_prot_flowrings_pool_attach()
*
* A flowring msgbuf_ring object may be fetched from this pool during flowring
* creation, using the flowid. Likewise, flowrings may be freed back into the
* pool on flowring deletion.
* See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
*
* In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
* are detached (returned back to the carved region or freed), and the pool of
* msgbuf_ring and any objects allocated against it are freed.
* See: dhd_prot_flowrings_pool_detach()
*
* In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
* state as-if upon an attach. All DMA-able buffers are retained.
* Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
* pool attach will notice that the pool persists and continue to use it. This
* will avoid the case of a fragmented DMA-able region.
*
* +----------------------------------------------------------------------------
*/
/* Fetch number of H2D flowrings given the total number of h2d rings */
#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \
((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS)
/* Conversion of a flowid to a flowring pool index */
#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid)
/* Traverse each flowring in the flowring pool, assigning ring and flowid */
#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \
for ((flowid) = DHD_FLOWRING_START_FLOWID, \
(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
(flowid) < (prot)->h2d_rings_total; \
(flowid)++, (ring)++)
/**
* dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
*
* Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
* Dongle includes common rings when it advertizes the number of H2D rings.
* Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
* allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
*
* dhd_prot_ring_attach is invoked to perform the actual initialization and
* attaching the DMA-able buffer.
*
* Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
* initialized msgbuf_ring_t object.
*
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
static int
dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
{
uint16 flowid;
msgbuf_ring_t *ring;
uint16 h2d_flowrings_total; /* exclude H2D common rings */
dhd_prot_t *prot = dhd->prot;
char ring_name[RING_NAME_MAX_LENGTH];
if (prot->h2d_flowrings_pool != NULL) {
return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
}
ASSERT(prot->h2d_rings_total == 0);
/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
__FUNCTION__, prot->h2d_rings_total));
return BCME_ERROR;
}
/* Subtract number of H2D common rings, to determine number of flowrings */
h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
/* Allocate pool of msgbuf_ring_t objects for all flowrings */
prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
if (prot->h2d_flowrings_pool == NULL) {
DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
__FUNCTION__, h2d_flowrings_total));
goto fail;
}
/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
ring_name[RING_NAME_MAX_LENGTH - 1] = '\0';
if (dhd_prot_ring_attach(dhd, ring, ring_name,
H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
goto attach_fail;
}
}
return BCME_OK;
attach_fail:
dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
fail:
prot->h2d_rings_total = 0;
return BCME_NOMEM;
} /* dhd_prot_flowrings_pool_attach */
/**
* dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
* Invokes dhd_prot_ring_reset to perform the actual reset.
*
* The DMA-able buffer is not freed during reset and neither is the flowring
* pool freed.
*
* dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
* the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
* from a previous flowring pool instantiation will be reused.
*
* This will avoid a fragmented DMA-able memory condition, if multiple
* dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
* cycle.
*/
static void
dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
{
uint16 flowid;
msgbuf_ring_t *ring;
dhd_prot_t *prot = dhd->prot;
if (prot->h2d_flowrings_pool == NULL) {
ASSERT(prot->h2d_rings_total == 0);
return;
}
/* Reset each flowring in the flowring pool */
FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
dhd_prot_ring_reset(dhd, ring);
ring->inited = FALSE;
}
/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
}
/**
* dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
* DMA-able buffers for flowrings.
* dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
* de-initialization of each msgbuf_ring_t.
*/
static void
dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
{
int flowid;
msgbuf_ring_t *ring;
int h2d_flowrings_total; /* exclude H2D common rings */
dhd_prot_t *prot = dhd->prot;
if (prot->h2d_flowrings_pool == NULL) {
ASSERT(prot->h2d_rings_total == 0);
return;
}
/* Detach the DMA-able buffer for each flowring in the flowring pool */
FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
dhd_prot_ring_detach(dhd, ring);
}
h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
MFREE(prot->osh, prot->h2d_flowrings_pool,
(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
prot->h2d_rings_total = 0;
} /* dhd_prot_flowrings_pool_detach */
/**
* dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
* msgbuf_ring from the flowring pool, and assign it.
*
* Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
* ring information to the dongle, a flowring's information is passed via a
* flowring create control message.
*
* Only the ring state (WR, RD) index are initialized.
*/
static msgbuf_ring_t *
dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
{
msgbuf_ring_t *ring;
dhd_prot_t *prot = dhd->prot;
ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
ASSERT(flowid < prot->h2d_rings_total);
ASSERT(prot->h2d_flowrings_pool != NULL);
ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
/* ASSERT flow_ring->inited == FALSE */
ring->wr = 0;
ring->rd = 0;
ring->curr_rd = 0;
ring->inited = TRUE;
return ring;
}
/**
* dhd_prot_flowrings_pool_release - release a previously fetched flowring's
* msgbuf_ring back to the flow_ring pool.
*/
void
dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
{
msgbuf_ring_t *ring;
dhd_prot_t *prot = dhd->prot;
ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
ASSERT(flowid < prot->h2d_rings_total);
ASSERT(prot->h2d_flowrings_pool != NULL);
ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
ASSERT(ring == (msgbuf_ring_t*)flow_ring);
/* ASSERT flow_ring->inited == TRUE */
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
ring->wr = 0;
ring->rd = 0;
ring->inited = FALSE;
ring->curr_rd = 0;
}
/* Assumes only one index is updated at a time */
/* If exactly_nitems is true, this function will allocate space for nitems or fail */
/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
/* If exactly_nitems is false, this function will allocate space for nitems or less */
static void *BCMFASTPATH
dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
bool exactly_nitems)
{
void *ret_ptr = NULL;
uint16 ring_avail_cnt;
ASSERT(nitems <= ring->max_items);
ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
if ((ring_avail_cnt == 0) ||
(exactly_nitems && (ring_avail_cnt < nitems) &&
((ring->max_items - ring->wr) >= nitems))) {
DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
ring->name, nitems, ring->wr, ring->rd));
return NULL;
}
*alloced = MIN(nitems, ring_avail_cnt);
/* Return next available space */
ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
/* Update write index */
if ((ring->wr + *alloced) == ring->max_items) {
ring->wr = 0;
} else if ((ring->wr + *alloced) < ring->max_items) {
ring->wr += *alloced;
} else {
/* Should never hit this */
ASSERT(0);
return NULL;
}
return ret_ptr;
} /* dhd_prot_get_ring_space */
/**
* dhd_prot_ring_write_complete - Host updates the new WR index on producing
* new messages in a H2D ring. The messages are flushed from cache prior to
* posting the new WR index. The new WR index will be updated in the DMA index
* array or directly in the dongle's ring state memory.
* A PCIE doorbell will be generated to wake up the dongle.
*/
static void BCMFASTPATH
dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
uint16 nitems)
{
dhd_prot_t *prot = dhd->prot;
/* cache flush */
OSL_CACHE_FLUSH(p, ring->item_len * nitems);
if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
dhd_prot_dma_indx_set(dhd, ring->wr,
H2D_DMA_INDX_WR_UPD, ring->idx);
} else {
dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
sizeof(uint16), RING_WR_UPD, ring->idx);
}
/* raise h2d interrupt */
prot->mb_ring_fn(dhd->bus, ring->wr);
}
/**
* dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
* from a D2H ring. The new RD index will be updated in the DMA Index array or
* directly in dongle's ring state memory.
*/
static void
dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
{
/* update read index */
/* If dma'ing h2d indices supported
* update the r -indices in the
* host memory o/w in TCM
*/
if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
dhd_prot_dma_indx_set(dhd, ring->rd,
D2H_DMA_INDX_RD_UPD, ring->idx);
} else {
dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
sizeof(uint16), RING_RD_UPD, ring->idx);
}
}
/**
* dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
* Dongle will DMA the entire array (if DMA_INDX feature is enabled).
* See dhd_prot_dma_indx_init()
*/
static void
dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
{
uint8 *ptr;
uint16 offset;
dhd_prot_t *prot = dhd->prot;
switch (type) {
case H2D_DMA_INDX_WR_UPD:
ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
offset = DHD_H2D_RING_OFFSET(ringid);
break;
case D2H_DMA_INDX_RD_UPD:
ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
offset = DHD_D2H_RING_OFFSET(ringid);
break;
default:
DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
__FUNCTION__));
return;
}
ASSERT(prot->rw_index_sz != 0);
ptr += offset * prot->rw_index_sz;
*(uint16*)ptr = htol16(new_index);
OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
__FUNCTION__, new_index, type, ringid, ptr, offset));
} /* dhd_prot_dma_indx_set */
/**
* dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
* array.
* Dongle DMAes an entire array to host memory (if the feature is enabled).
* See dhd_prot_dma_indx_init()
*/
static uint16
dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
{
uint8 *ptr;
uint16 data;
uint16 offset;
dhd_prot_t *prot = dhd->prot;
switch (type) {
case H2D_DMA_INDX_WR_UPD:
ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
offset = DHD_H2D_RING_OFFSET(ringid);
break;
case H2D_DMA_INDX_RD_UPD:
ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
offset = DHD_H2D_RING_OFFSET(ringid);
break;
case D2H_DMA_INDX_WR_UPD:
ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
offset = DHD_D2H_RING_OFFSET(ringid);
break;
case D2H_DMA_INDX_RD_UPD:
ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
offset = DHD_D2H_RING_OFFSET(ringid);
break;
default:
DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
__FUNCTION__));
return 0;
}
ASSERT(prot->rw_index_sz != 0);
ptr += offset * prot->rw_index_sz;
OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
data = LTOH16(*((uint16*)ptr));
DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
__FUNCTION__, data, type, ringid, ptr, offset));
return (data);
} /* dhd_prot_dma_indx_get */
/**
* An array of DMA read/write indices, containing information about host rings, can be maintained
* either in host memory or in device memory, dependent on preprocessor options. This function is,
* dependent on these options, called during driver initialization. It reserves and initializes
* blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
* address of these host memory blocks are communicated to the dongle later on. By reading this host
* memory, the dongle learns about the state of the host rings.
*/
static INLINE int
dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
dhd_dma_buf_t *dma_buf, uint32 bufsz)
{
int rc;
if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
return BCME_OK;
rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
return rc;
}
int
dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
{
uint32 bufsz;
dhd_prot_t *prot = dhd->prot;
dhd_dma_buf_t *dma_buf;
if (prot == NULL) {
DHD_ERROR(("prot is not inited\n"));
return BCME_ERROR;
}
/* Dongle advertizes 2B or 4B RW index size */
ASSERT(rw_index_sz != 0);
prot->rw_index_sz = rw_index_sz;
bufsz = rw_index_sz * length;
switch (type) {
case H2D_DMA_INDX_WR_BUF:
dma_buf = &prot->h2d_dma_indx_wr_buf;
if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
goto ret_no_mem;
}
DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
case H2D_DMA_INDX_RD_BUF:
dma_buf = &prot->h2d_dma_indx_rd_buf;
if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
goto ret_no_mem;
}
DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
case D2H_DMA_INDX_WR_BUF:
dma_buf = &prot->d2h_dma_indx_wr_buf;
if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
goto ret_no_mem;
}
DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
case D2H_DMA_INDX_RD_BUF:
dma_buf = &prot->d2h_dma_indx_rd_buf;
if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
goto ret_no_mem;
}
DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
default:
DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
return BCME_BADOPTION;
}
return BCME_OK;
ret_no_mem:
DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
__FUNCTION__, type, bufsz));
return BCME_NOMEM;
} /* dhd_prot_dma_indx_init */
/**
* Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
* from, or NULL if there are no more messages to read.
*/
static uint8*
dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
{
uint16 wr;
uint16 rd;
uint16 depth;
uint16 items;
void *read_addr = NULL; /* address of next msg to be read in ring */
uint16 d2h_wr = 0;
DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
/* Remember the read index in a variable.
* This is becuase ring->rd gets updated in the end of this function
* So if we have to print the exact read index from which the
* message is read its not possible.
*/
ring->curr_rd = ring->rd;
/* update write pointer */
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
/* DMAing write/read indices supported */
d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
ring->wr = d2h_wr;
} else {
dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
}
wr = ring->wr;
rd = ring->rd;
depth = ring->max_items;
/* check for avail space, in number of ring items */
items = READ_AVAIL_SPACE(wr, rd, depth);
if (items == 0) {
return NULL;
}
ASSERT(items < ring->max_items);
/*
* Note that there are builds where Assert translates to just printk
* so, even if we had hit this condition we would never halt. Now
* dhd_prot_process_msgtype can get into an big loop if this
* happens.
*/
if (items >= ring->max_items) {
DHD_ERROR(("\r\n======================= \r\n"));
DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
__FUNCTION__, ring, ring->name, ring->max_items, items));
DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n",
dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack));
DHD_ERROR(("\r\n======================= \r\n"));
*available_len = 0;
return NULL;
}
/* if space is available, calculate address to be read */
read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
/* update read pointer */
if ((ring->rd + items) >= ring->max_items) {
ring->rd = 0;
} else {
ring->rd += items;
}
ASSERT(ring->rd < ring->max_items);
/* convert items to bytes : available_len must be 32bits */
*available_len = (uint32)(items * ring->item_len);
OSL_CACHE_INV(read_addr, *available_len);
/* return read address */
return read_addr;
} /* dhd_prot_get_read_addr */
/** Creates a flow ring and informs dongle of this event */
int
dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
{
tx_flowring_create_request_t *flow_create_rqst;
msgbuf_ring_t *flow_ring;
dhd_prot_t *prot = dhd->prot;
unsigned long flags;
uint16 alloced = 0;
msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
if (flow_ring == NULL) {
DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
__FUNCTION__, flow_ring_node->flowid));
return BCME_NOMEM;
}
DHD_GENERAL_LOCK(dhd, flags);
/* Request for ctrl_ring buffer space */
flow_create_rqst = (tx_flowring_create_request_t *)
dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
if (flow_create_rqst == NULL) {
dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
__FUNCTION__, flow_ring_node->flowid));
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_NOMEM;
}
flow_ring_node->prot_info = (void *)flow_ring;
/* Common msg buf hdr */
flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
flow_create_rqst->msg.request_id = htol32(0); /* TBD */
flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
ctrl_ring->seqnum++;
/* Update flow create message */
flow_create_rqst->tid = flow_ring_node->flow_info.tid;
flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
/* CAUTION: ring::base_addr already in Little Endian */
flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
flow_ring_node->flow_info.ifindex));
/* Update the flow_ring's WRITE index */
if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
dhd_prot_dma_indx_set(dhd, flow_ring->wr,
H2D_DMA_INDX_WR_UPD, flow_ring->idx);
} else {
dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
sizeof(uint16), RING_WR_UPD, flow_ring->idx);
}
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
} /* dhd_prot_flow_ring_create */
/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
static void
dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
{
tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
ltoh16(flow_create_resp->cmplt.status),
ltoh16(flow_create_resp->cmplt.flow_ring_id)));
dhd_bus_flow_ring_create_response(dhd->bus,
ltoh16(flow_create_resp->cmplt.flow_ring_id),
ltoh16(flow_create_resp->cmplt.status));
}
/** called on e.g. flow ring delete */
void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
{
msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
dhd_prot_ring_detach(dhd, flow_ring);
DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
}
void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
struct bcmstrbuf *strbuf, const char * fmt)
{
const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n";
msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
uint16 rd, wr;
uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
if (fmt == NULL) {
fmt = default_fmt;
}
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
ltoh32(flow_ring->base_addr.high_addr),
ltoh32(flow_ring->base_addr.low_addr), dma_buf_len);
}
void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
dhd_prot_t *prot = dhd->prot;
bcm_bprintf(strbuf, "CtrlPost: ");
dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL);
bcm_bprintf(strbuf, "CtrlCpl: ");
dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL);
bcm_bprintf(strbuf, "RxPost: ");
bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost);
dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL);
bcm_bprintf(strbuf, "RxCpl: ");
dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL);
bcm_bprintf(strbuf, "TxCpl: ");
dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL);
bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n",
dhd->prot->active_tx_count,
DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle));
}
int
dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
{
tx_flowring_delete_request_t *flow_delete_rqst;
dhd_prot_t *prot = dhd->prot;
unsigned long flags;
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
flow_delete_rqst = (tx_flowring_delete_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_delete_rqst == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
return BCME_NOMEM;
}
/* Common msg buf hdr */
flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
/* Update Delete info */
flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
flow_delete_rqst->reason = htol16(BCME_OK);
DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
flow_ring_node->flow_info.ifindex));
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
}
static void
dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
{
tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
DHD_INFO(("%s: Flow Delete Response status = %d \n", __FUNCTION__,
flow_delete_resp->cmplt.status));
dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
flow_delete_resp->cmplt.status);
}
int
dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
{
tx_flowring_flush_request_t *flow_flush_rqst;
dhd_prot_t *prot = dhd->prot;
unsigned long flags;
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
flow_flush_rqst = (tx_flowring_flush_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_flush_rqst == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
return BCME_NOMEM;
}
/* Common msg buf hdr */
flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
flow_flush_rqst->reason = htol16(BCME_OK);
DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
} /* dhd_prot_flow_ring_flush */
static void
dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
{
tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
flow_flush_resp->cmplt.status));
dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
flow_flush_resp->cmplt.status);
}
/**
* Request dongle to configure soft doorbells for D2H rings. Host populated soft
* doorbell information is transferred to dongle via the d2h ring config control
* message.
*/
void
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
{
#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
uint16 ring_idx;
uint8 *msg_next;
void *msg_start;
uint16 alloced = 0;
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
ring_config_req_t *ring_config_req;
bcmpcie_soft_doorbell_t *soft_doorbell;
msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
DHD_GENERAL_LOCK(dhd, flags);
msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
if (msg_start == NULL) {
DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
__FUNCTION__, d2h_rings));
DHD_GENERAL_UNLOCK(dhd, flags);
return;
}
msg_next = (uint8*)msg_start;
for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
/* position the ring_config_req into the ctrl subm ring */
ring_config_req = (ring_config_req_t *)msg_next;
/* Common msg header */
ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
ring_config_req->msg.if_id = 0;
ring_config_req->msg.flags = 0;
ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
ctrl_ring->seqnum++;
ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
/* Ring Config subtype and d2h ring_id */
ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
/* Host soft doorbell configuration */
soft_doorbell = &prot->soft_doorbell[ring_idx];
ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
ring_config_req->soft_doorbell.haddr.high =
htol32(soft_doorbell->haddr.high);
ring_config_req->soft_doorbell.haddr.low =
htol32(soft_doorbell->haddr.low);
ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
ring_config_req->soft_doorbell.haddr.low,
ring_config_req->soft_doorbell.value));
msg_next = msg_next + ctrl_ring->item_len;
}
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
DHD_GENERAL_UNLOCK(dhd, flags);
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
}
static void
dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg)
{
DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
}
static int
dhd_prot_debug_info_print(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *flow_ring;
uint16 rd, wr;
uint32 intstatus = 0;
uint32 intmask = 0;
uint32 mbintstatus = 0;
uint32 d2h_mb_data = 0;
DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
flow_ring = &prot->h2dring_ctrl_subn;
DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", flow_ring->rd, flow_ring->wr));
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
flow_ring = &prot->d2hring_ctrl_cpln;
DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", flow_ring->rd, flow_ring->wr));
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
PCID2H_MailBox, 0, 0);
dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,",
intstatus, intmask, mbintstatus));
DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask));
return 0;
}
int
dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
uint32 *ptr;
uint32 value;
uint32 i;
uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
dhd->prot->d2h_dma_indx_wr_buf.len);
ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
value = ltoh32(*ptr);
bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
ptr++;
value = ltoh32(*ptr);
bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
ptr++;
bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
value = ltoh32(*ptr);
bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
ptr++;
}
OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
dhd->prot->h2d_dma_indx_rd_buf.len);
ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
value = ltoh32(*ptr);
bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
ptr++;
value = ltoh32(*ptr);
bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
ptr++;
value = ltoh32(*ptr);
bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
return 0;
}
uint32
dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
{
dhd_prot_t *prot = dhd->prot;
#if DHD_DBG_SHOW_METADATA
prot->metadata_dbg = val;
#endif
return (uint32)prot->metadata_dbg;
}
uint32
dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
return (uint32)prot->metadata_dbg;
}
uint32
dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
{
dhd_prot_t *prot = dhd->prot;
if (rx)
prot->rx_metadata_offset = (uint16)val;
else
prot->tx_metadata_offset = (uint16)val;
return dhd_prot_metadatalen_get(dhd, rx);
}
uint32
dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
{
dhd_prot_t *prot = dhd->prot;
if (rx)
return prot->rx_metadata_offset;
else
return prot->tx_metadata_offset;
}
/** optimization to write "n" tx items at a time to ring */
uint32
dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
{
dhd_prot_t *prot = dhd->prot;
if (set)
prot->txp_threshold = (uint16)val;
val = prot->txp_threshold;
return val;
}
#ifdef DHD_RX_CHAINING
static INLINE void BCMFASTPATH
dhd_rxchain_reset(rxchain_info_t *rxchain)
{
rxchain->pkt_count = 0;
}
static void BCMFASTPATH
dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
{
uint8 *eh;
uint8 prio;
dhd_prot_t *prot = dhd->prot;
rxchain_info_t *rxchain = &prot->rxchain;
ASSERT(!PKTISCHAINED(pkt));
ASSERT(PKTCLINK(pkt) == NULL);
ASSERT(PKTCGETATTR(pkt) == 0);
eh = PKTDATA(dhd->osh, pkt);
prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
rxchain->h_da, rxchain->h_prio))) {
/* Different flow - First release the existing chain */
dhd_rxchain_commit(dhd);
}
/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
/* so that the chain can be handed off to CTF bridge as is. */
if (rxchain->pkt_count == 0) {
/* First packet in chain */
rxchain->pkthead = rxchain->pkttail = pkt;
/* Keep a copy of ptr to ether_da, ether_sa and prio */
rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
rxchain->h_prio = prio;
rxchain->ifidx = ifidx;
rxchain->pkt_count++;
} else {
/* Same flow - keep chaining */
PKTSETCLINK(rxchain->pkttail, pkt);
rxchain->pkttail = pkt;
rxchain->pkt_count++;
}
if ((!ETHER_ISMULTI(rxchain->h_da)) &&
((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
PKTSETCHAINED(dhd->osh, pkt);
PKTCINCRCNT(rxchain->pkthead);
PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
} else {
dhd_rxchain_commit(dhd);
return;
}
/* If we have hit the max chain length, dispatch the chain and reset */
if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
dhd_rxchain_commit(dhd);
}
}
static void BCMFASTPATH
dhd_rxchain_commit(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
rxchain_info_t *rxchain = &prot->rxchain;
if (rxchain->pkt_count == 0)
return;
/* Release the packets to dhd_linux */
dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
/* Reset the chain */
dhd_rxchain_reset(rxchain);
}
#endif /* DHD_RX_CHAINING */
| gpl-2.0 |
rimistri/mediatek | mt6732/mediatek/kernel/drivers/trustzone/tz_mem.c | 8746 |
/* Release secure chunk memory for normal world usage.
*
* For better utilization, part of secure chunk memory (pre-defined size) can be used by
* normal world through memory TA.
* After release, pre-defined secure memory can be read/write by normal world through memroy TA,
* and can not be used by secure world. After append, it can be used for secure world again.
* For easy usage at user level, a block device can be registered, and it can access released
* secure chunk memory by memory TA.
*
* How to use secure chunk memory at user level:
* 1) Create a block device node, ex: /dev/tzmem
* 2) Release secure chunk memory by UREE_ReleaseSecurechunkmem.
* After releasing, the pre-defined chunk memory will be used by normal world only.
* 3) Open /dev/tzmem for read/write
* 4) If finishing to use, close it.
* 5) Append secure chunk memory back to secure world usage by UREE_AppendSecurechunkmem.
* After appending, the pre-defined chunk memory will not be used by normal world.
*
* Or simply, using APIs
* 1) UREE_ReleaseTzmem for release secure chunk memory to normal world usage.
* 2) UREE_AppendTzmem for append secure chunk memory back to secure world.
*
*/
/* ----------------------------------------------------------------------------- */
/* Include files */
/* ----------------------------------------------------------------------------- */
#include <trustzone/kree/mem.h>
#include "trustzone/kree/system.h"
#include <tz_cross/ta_mem.h>
#include <linux/xlog.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include "trustzone/kree/tz_mem.h"
/* Use this define to enable module for TZMEM
*/
#define MTEE_TZMEM_ENABLE
/* enable debug logs
*/
#define MTEE_TZMEM_DBG
#define KREE_RELEASECM_MAX_SIZE 4096 /* bytes */
typedef struct {
uint32_t control; /* 0 = not released, 1 = released */
uint32_t size; /* real released pool size in bytes */
uint32_t pool_size;
struct gendisk *disk;
KREE_SESSION_HANDLE session;
KREE_RELEASECM_HANDLE handle;
} tzmem_diskinfo_t;
#ifdef MTEE_TZMEM_ENABLE
static uint32_t tzmem_poolIndex; /* currently, always 0. for future extension... */
static tzmem_diskinfo_t _tzmem_diskInfo[IO_NODE_NUMBER_TZMEM];
static DEFINE_MUTEX(tzmem_probe_mutex);
static DEFINE_SPINLOCK(tzmem_blk_lock);
static TZ_RESULT _tzmem_get_poolsize(uint32_t *size)
{
KREE_SESSION_HANDLE session;
int ret = TZ_RESULT_SUCCESS;
ret = KREE_CreateSession(TZ_TA_MEM_UUID, &session);
if (ret != TZ_RESULT_SUCCESS) {
xlog_printk(ANDROID_LOG_ERROR, MTEE_TZMEM_TAG,
"[%s] _tzmem_get_poolsize: KREE_CreateSession Error = 0x%x\n",
MODULE_NAME, ret);
return ret;
}
/* get ta preset tzmem size */
ret = KREE_GetSecurechunkReleaseSize(session, size);
if (ret != TZ_RESULT_SUCCESS) {
xlog_printk(ANDROID_LOG_ERROR, MTEE_TZMEM_TAG,
"[%s] _tzmem_get_poolsize: KREE_GetSecurechunkReleaseSize Error = 0x%x\n",
MODULE_NAME, ret);
KREE_CloseSession(session);
return ret;
}
ret = KREE_CloseSession(session);
if (ret != TZ_RESULT_SUCCESS) {
xlog_printk(ANDROID_LOG_ERROR, MTEE_TZMEM_TAG,
"[%s] _tzmem_get_poolsize: KREE_CloseSession Error = 0x%x\n",
MODULE_NAME, ret);
return ret;
}
return ret;
}
static long tzmem_gen_ioctl(dev_t dev, unsigned int cmd, unsigned long arg)
{
int ret = 0;
#ifdef MTEE_TZMEM_DBG
pr_info("====> tzmem_gen_ioctl\n");
#endif
switch (cmd) {
default:
ret = -EINVAL;
}
return ret;
}
static void do_tzmem_blk_request(struct request_queue *q)
{
struct request *req;
uint32_t i;
#ifdef MTEE_TZMEM_DBG
pr_info("====> do_tzmem_blk_request\n");
#endif
req = blk_fetch_request(q);
while (req) {
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
int err = 0;
struct gendisk *disk = req->rq_disk;
tzmem_diskinfo_t *diskInfo = (tzmem_diskinfo_t *) disk->private_data;
KREE_SESSION_HANDLE session;
session = diskInfo->session;
#ifdef MTEE_TZMEM_DBG
pr_info("====> 0x%x 0x%x\n", (uint32_t) session, diskInfo->size);
#endif
if ((start + len > diskInfo->size) || (start > diskInfo->size)
|| (len > diskInfo->size)) {
err = -EIO;
goto done;
}
if (rq_data_dir(req) == READ) {
#ifdef MTEE_TZMEM_DBG
pr_info("====> do_tzmem_blk_request: read = 0x%x, 0x%x\n", (uint32_t) start,
(uint32_t) len);
#endif
for (i = 0; i < len / KREE_RELEASECM_MAX_SIZE; i++) {
KREE_ReadSecurechunkmem((KREE_SESSION_HANDLE) session,
start + i * KREE_RELEASECM_MAX_SIZE,
KREE_RELEASECM_MAX_SIZE,
req->buffer + i * KREE_RELEASECM_MAX_SIZE);
}
if (len % KREE_RELEASECM_MAX_SIZE) {
KREE_ReadSecurechunkmem((KREE_SESSION_HANDLE) session,
start + i * KREE_RELEASECM_MAX_SIZE,
len % KREE_RELEASECM_MAX_SIZE,
req->buffer + i * KREE_RELEASECM_MAX_SIZE);
}
} else {
#ifdef MTEE_TZMEM_DBG
pr_info("====> do_tzmem_blk_request: write = 0x%x, 0x%x\n",
(uint32_t) start, (uint32_t) len);
#endif
for (i = 0; i < len / KREE_RELEASECM_MAX_SIZE; i++) {
KREE_WriteSecurechunkmem((KREE_SESSION_HANDLE) session,
start + i * KREE_RELEASECM_MAX_SIZE,
KREE_RELEASECM_MAX_SIZE,
req->buffer + i * KREE_RELEASECM_MAX_SIZE);
}
if (len % KREE_RELEASECM_MAX_SIZE) {
KREE_WriteSecurechunkmem((KREE_SESSION_HANDLE) session,
start + i * KREE_RELEASECM_MAX_SIZE,
len % KREE_RELEASECM_MAX_SIZE,
req->buffer + i * KREE_RELEASECM_MAX_SIZE);
}
}
done:
if (!__blk_end_request_cur(req, err)) {
req = blk_fetch_request(q);
}
}
}
static int tzmem_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg)
{
return tzmem_gen_ioctl(bdev->bd_dev, cmd, arg);
}
/* block device module info
*/
static const struct block_device_operations tzmem_blk_fops = {
.owner = THIS_MODULE,
.ioctl = tzmem_blk_ioctl,
};
/* block device probe function
*/
/* Create disk on demand. So we won't create lots of disk for un-used devices. */
static struct kobject *tzmem_blk_probe(dev_t dev, int *part, void *data)
{
uint32_t len;
struct gendisk *disk;
struct kobject *kobj;
struct request_queue *queue;
tzmem_diskinfo_t *diskInfo;
int ret;
KREE_SESSION_HANDLE session;
#ifdef MTEE_TZMEM_DBG
pr_info("====> tzmem_blk_probe\n");
#endif
mutex_lock(&tzmem_probe_mutex);
diskInfo = (tzmem_diskinfo_t *) &_tzmem_diskInfo[tzmem_poolIndex];
if (diskInfo->disk == NULL) {
disk = alloc_disk(1);
if (!disk) {
goto out_info;
}
queue = blk_init_queue(do_tzmem_blk_request, &tzmem_blk_lock);
if (!queue) {
goto out_queue;
}
blk_queue_max_hw_sectors(queue, 1024);
blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);
if (_tzmem_get_poolsize(&len)) {
goto out_init;
}
disk->major = IO_NODE_MAJOR_TZMEM;
disk->first_minor = MINOR(dev);
disk->fops = &tzmem_blk_fops;
disk->private_data = &_tzmem_diskInfo;
snprintf(disk->disk_name, sizeof(disk->disk_name), "tzmem%d", MINOR(dev));
disk->queue = queue;
set_capacity(disk, len / 512);
add_disk(disk);
ret = KREE_CreateSession(TZ_TA_MEM_UUID, &session);
if (ret != TZ_RESULT_SUCCESS) {
xlog_printk(ANDROID_LOG_ERROR, MTEE_TZMEM_TAG,
"[%s] _tzmem_get_poolsize: KREE_CreateSession Error = 0x%x\n",
MODULE_NAME, ret);
goto out_init;
}
diskInfo->session = session;
diskInfo->pool_size = len;
diskInfo->disk = disk;
diskInfo->size = len;
}
*part = 0;
kobj = diskInfo ? get_disk(diskInfo->disk) : ERR_PTR(-ENOMEM);
mutex_unlock(&tzmem_probe_mutex);
return kobj;
out_init:
blk_cleanup_queue(queue);
out_queue:
put_disk(disk);
out_info:
mutex_unlock(&tzmem_probe_mutex);
return ERR_PTR(-ENOMEM);
}
/*
* tzmem block device module init
*/
/* static struct class* pTzClass = NULL; */
/* static struct device* pTzDevice = NULL; */
static dev_t tz_client_dev;
static int __init tzmem_blkdev_init(void)
{
#ifdef MTEE_TZMEM_DBG
pr_info("====> tzmem_blkdev_init\n");
#endif
if (register_blkdev(IO_NODE_MAJOR_TZMEM, DEV_TZMEM)) {
xlog_printk(ANDROID_LOG_ERROR, MTEE_TZMEM_TAG,
"[%s] tzmem_blkdev_init: register_blkdev error\n", MODULE_NAME);
return -EFAULT;
}
tz_client_dev = MKDEV(IO_NODE_MAJOR_TZMEM, IO_NODE_MINOR_TZMEM);
blk_register_region(tz_client_dev, IO_NODE_NUMBER_TZMEM,
THIS_MODULE, tzmem_blk_probe, NULL, NULL);
#if 0
/* create /dev/tzmem automaticly */
pTzClass = class_create(THIS_MODULE, DEV_TZMEM);
if (IS_ERR(pTzClass)) {
int ret = PTR_ERR(pTzClass);
xlog_printk(ANDROID_LOG_ERROR, MTEE_TZMEM_TAG , "[%s] could not create class for the device, ret:%d\n", MODULE_NAME, ret);
return ret;
}
pTzDevice = device_create(pTzClass, NULL, tz_client_dev, NULL, DEV_TZMEM);
#endif
return 0;
}
module_init(tzmem_blkdev_init);
#endif
| gpl-2.0 |
johnparker007/mame | src/devices/bus/ti8x/ti8x.cpp | 14113 | // license:BSD-3-Clause
// copyright-holders:Vas Crabb
#include "emu.h"
#include "ti8x.h"
#define LOG_GENERAL (1U << 0)
#define LOG_BITPROTO (1U << 1)
#define LOG_BYTEPROTO (1U << 2)
//#define VERBOSE (LOG_GENERAL | LOG_BITPROTO | LOG_BYTEPROTO)
#define LOG_OUTPUT_FUNC device().logerror
#include "logmacro.h"
#define LOGBITPROTO(...) LOGMASKED(LOG_BITPROTO, __VA_ARGS__)
#define LOGBYTEPROTO(...) LOGMASKED(LOG_BYTEPROTO, __VA_ARGS__)
DEFINE_DEVICE_TYPE(TI8X_LINK_PORT, ti8x_link_port_device, "ti8x_link_port", "TI-8x Link Port")
ti8x_link_port_device::ti8x_link_port_device(
machine_config const &mconfig,
char const *tag,
device_t *owner,
uint32_t clock)
: ti8x_link_port_device(mconfig, TI8X_LINK_PORT, tag, owner, clock)
{
}
ti8x_link_port_device::ti8x_link_port_device(
machine_config const &mconfig,
device_type type,
char const *tag,
device_t *owner,
uint32_t clock)
: device_t(mconfig, type, tag, owner, clock)
, device_single_card_slot_interface<device_ti8x_link_port_interface>(mconfig, *this)
, m_tip_handler(*this)
, m_ring_handler(*this)
, m_dev(nullptr)
, m_tip_in(true)
, m_tip_out(true)
, m_ring_in(true)
, m_ring_out(true)
{
}
WRITE_LINE_MEMBER(ti8x_link_port_device::tip_w)
{
if (bool(state) != m_tip_out)
{
m_tip_out = bool(state);
if (m_dev)
m_dev->input_tip(m_tip_out ? 1 : 0);
}
}
WRITE_LINE_MEMBER(ti8x_link_port_device::ring_w)
{
if (bool(state) != m_ring_out)
{
m_ring_out = bool(state);
if (m_dev)
m_dev->input_ring(m_ring_out ? 1 : 0);
}
}
void ti8x_link_port_device::device_start()
{
m_tip_handler.resolve_safe();
m_ring_handler.resolve_safe();
save_item(NAME(m_tip_in));
save_item(NAME(m_tip_out));
save_item(NAME(m_ring_in));
save_item(NAME(m_ring_out));
m_tip_in = m_tip_out = true;
m_ring_in = m_ring_out = true;
}
void ti8x_link_port_device::device_config_complete()
{
m_dev = get_card_device();
}
device_ti8x_link_port_interface::device_ti8x_link_port_interface(
machine_config const &mconfig,
device_t &device)
: device_interface(device, "ti8xlink")
, m_port(dynamic_cast<ti8x_link_port_device *>(device.owner()))
{
}
device_ti8x_link_port_bit_interface::device_ti8x_link_port_bit_interface(
machine_config const &mconfig,
device_t &device)
: device_ti8x_link_port_interface(mconfig, device)
, m_error_timer(nullptr)
, m_bit_phase(IDLE)
, m_tx_bit_buffer(EMPTY)
, m_tip_in(true)
, m_ring_in(true)
{
}
void device_ti8x_link_port_bit_interface::interface_pre_start()
{
device_ti8x_link_port_interface::interface_pre_start();
if (!m_error_timer)
m_error_timer = device().machine().scheduler().timer_alloc(timer_expired_delegate(FUNC(device_ti8x_link_port_bit_interface::bit_timeout), this));
m_bit_phase = IDLE;
m_tx_bit_buffer = EMPTY;
m_tip_in = m_ring_in = true;
}
void device_ti8x_link_port_bit_interface::interface_post_start()
{
device_ti8x_link_port_interface::interface_post_start();
device().save_item(NAME(m_bit_phase));
device().save_item(NAME(m_tx_bit_buffer));
device().save_item(NAME(m_tip_in));
device().save_item(NAME(m_ring_in));
}
void device_ti8x_link_port_bit_interface::interface_pre_reset()
{
device_ti8x_link_port_interface::interface_pre_reset();
m_error_timer->reset();
m_bit_phase = (m_tip_in && m_ring_in) ? IDLE : WAIT_IDLE;
m_tx_bit_buffer = EMPTY;
output_tip(1);
output_ring(1);
}
void device_ti8x_link_port_bit_interface::send_bit(bool data)
{
LOGBITPROTO("queue %d bit\n", data ? 1 : 0);
if (EMPTY != m_tx_bit_buffer)
device().logerror("device_ti8x_link_port_bit_interface: warning: transmit buffer overrun\n");
m_tx_bit_buffer = data ? PENDING_1 : PENDING_0;
if (IDLE == m_bit_phase)
check_tx_bit_buffer();
else if (WAIT_IDLE == m_bit_phase)
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
}
void device_ti8x_link_port_bit_interface::accept_bit()
{
switch (m_bit_phase)
{
// can't accept a bit that isn't being held
case IDLE:
case WAIT_ACK_0:
case WAIT_ACK_1:
case WAIT_REL_0:
case WAIT_REL_1:
case ACK_0:
case ACK_1:
case WAIT_IDLE:
fatalerror("device_ti8x_link_port_bit_interface: attempt to accept bit when not holding");
break;
// release the acknowledgement - if the ring doesn't rise we've lost sync
case HOLD_0:
assert(m_tip_in);
output_ring(1);
if (m_ring_in)
{
LOGBITPROTO("accepted 0 bit\n");
check_tx_bit_buffer();
}
else
{
LOGBITPROTO("accepted 0 bit, ring low (collision) - waiting for bus idle\n");
m_error_timer->reset((EMPTY == m_tx_bit_buffer) ? attotime::never : attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_IDLE;
bit_collision();
}
break;
// release the acknowledgement - if the tip doesn't rise we've lost sync
case HOLD_1:
assert(m_ring_in);
output_tip(1);
if (m_tip_in)
{
LOGBITPROTO("accepted 1 bit\n");
check_tx_bit_buffer();
}
else
{
LOGBITPROTO("accepted 1 bit, tip low (collision) - waiting for bus idle\n");
m_error_timer->reset((EMPTY == m_tx_bit_buffer) ? attotime::never : attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_IDLE;
bit_collision();
}
break;
// something very bad happened (heap smash?)
default:
throw false;
}
}
WRITE_LINE_MEMBER(device_ti8x_link_port_bit_interface::input_tip)
{
m_tip_in = bool(state);
switch (m_bit_phase)
{
// if tip falls while idle, it's the beginning of an incoming 0
case IDLE:
if (!m_tip_in)
{
LOGBITPROTO("falling edge on tip, acknowledging 0 bit\n");
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = ACK_0;
output_ring(0);
}
break;
// we're driving tip low in this state, ignore it
case WAIT_ACK_0:
case ACK_1:
case HOLD_1:
break;
// tip must fall to acknowledge outgoing 1
case WAIT_ACK_1:
if (!m_tip_in)
{
LOGBITPROTO("falling edge on tip, 1 bit acknowledged, confirming\n");
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_REL_1;
output_ring(1);
}
break;
// if tip falls now, we've lost sync
case WAIT_REL_0:
case HOLD_0:
if (!m_tip_in)
{
LOGBITPROTO("falling edge on tip, lost sync, waiting for bus idle\n");
m_error_timer->reset((EMPTY == m_tx_bit_buffer) ? attotime::never : attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_IDLE;
output_ring(1);
bit_collision();
}
break;
// tip must rise to complete outgoing 1 sequence
case WAIT_REL_1:
if (m_tip_in)
{
assert(!m_ring_in);
LOGBITPROTO("rising edge on tip, 1 bit sent\n");
check_tx_bit_buffer();
bit_sent();
}
break;
// tip must rise to accept our acknowledgement
case ACK_0:
if (m_tip_in)
{
LOGBITPROTO("rising edge on tip, 0 bit acknowledge confirmed, holding\n");
m_error_timer->reset();
m_bit_phase = HOLD_0;
bit_received(false);
}
break;
// if the bus is available, check for bit to send
case WAIT_IDLE:
if (m_tip_in && m_ring_in)
{
LOGBITPROTO("rising edge on tip, bus idle detected\n");
check_tx_bit_buffer();
}
break;
// something very bad happened (heap smash?)
default:
throw false;
}
}
WRITE_LINE_MEMBER(device_ti8x_link_port_bit_interface::input_ring)
{
m_ring_in = bool(state);
switch (m_bit_phase)
{
// if ring falls while idle, it's the beginning of an incoming 1
case IDLE:
if (!m_ring_in)
{
LOGBITPROTO("falling edge on ring, acknowledging 1 bit\n");
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = ACK_1;
output_tip(0);
}
break;
// ring must fall to acknowledge outgoing 0
case WAIT_ACK_0:
if (!m_ring_in)
{
LOGBITPROTO("falling edge on ring, 0 bit acknowledged, confirming\n");
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_REL_0;
output_tip(1);
}
break;
// we're driving ring low in this state, ignore it
case WAIT_ACK_1:
case ACK_0:
case HOLD_0:
break;
// ring must rise to complete outgoing 0 sequence
case WAIT_REL_0:
if (m_ring_in)
{
assert(!m_tip_in);
LOGBITPROTO("rising edge on ring, 0 bit sent\n");
check_tx_bit_buffer();
bit_sent();
}
break;
// if ring falls now, we've lost sync
case WAIT_REL_1:
case HOLD_1:
if (!m_ring_in)
{
LOGBITPROTO("falling edge on ring, lost sync, waiting for bus idle\n");
m_error_timer->reset((EMPTY == m_tx_bit_buffer) ? attotime::never : attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_IDLE;
output_tip(1);
bit_collision();
}
break;
// ring must rise to accept our acknowledgement
case ACK_1:
if (m_ring_in)
{
LOGBITPROTO("rising edge on ring, 1 bit acknowledge confirmed, holding\n");
m_error_timer->reset();
m_bit_phase = HOLD_1;
bit_received(true);
}
break;
// if the bus is available, check for bit to send
case WAIT_IDLE:
if (m_tip_in && m_ring_in)
{
LOGBITPROTO("rising edge on tip, bus idle detected\n");
check_tx_bit_buffer();
}
break;
// something very bad happened (heap smash?)
default:
throw false;
}
}
TIMER_CALLBACK_MEMBER(device_ti8x_link_port_bit_interface::bit_timeout)
{
switch (m_bit_phase)
{
// something very bad happened (heap smash?)
case IDLE:
case HOLD_0:
case HOLD_1:
default:
throw false;
// receive timeout
case ACK_0:
case ACK_1:
LOGBITPROTO("timeout acknowledging %d bit\n", (ACK_0 == m_bit_phase) ? 0 : 1);
output_tip(1);
output_ring(1);
if (m_tip_in && m_ring_in)
{
check_tx_bit_buffer();
}
else
{
LOGBITPROTO("waiting for bus idle\n");
m_error_timer->reset((EMPTY == m_tx_bit_buffer) ? attotime::never : attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_IDLE;
}
bit_receive_timeout();
break;
// send timeout:
case WAIT_IDLE:
assert(EMPTY != m_tx_bit_buffer);
[[fallthrough]];
case WAIT_ACK_0:
case WAIT_ACK_1:
case WAIT_REL_0:
case WAIT_REL_1:
LOGBITPROTO("timeout sending bit\n");
m_error_timer->reset();
m_bit_phase = (m_tip_in && m_ring_in) ? IDLE : WAIT_IDLE;
m_tx_bit_buffer = EMPTY;
output_tip(1);
output_ring(1);
bit_send_timeout();
break;
}
}
void device_ti8x_link_port_bit_interface::check_tx_bit_buffer()
{
assert(m_tip_in);
assert(m_ring_in);
switch (m_tx_bit_buffer)
{
// nothing to do
case EMPTY:
LOGBITPROTO("no pending bit, entering idle state\n");
m_error_timer->reset();
m_bit_phase = IDLE;
break;
// pull tip low and wait for acknowledgement
case PENDING_0:
LOGBITPROTO("sending 0 bit, pulling tip low\n");
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_ACK_0;
m_tx_bit_buffer = EMPTY;
output_tip(0);
break;
// pull ring low and wait for acknowledgement
case PENDING_1:
LOGBITPROTO("sending 1 bit, pulling ring low\n");
m_error_timer->reset(attotime(1, 0)); // TODO: configurable timeout
m_bit_phase = WAIT_ACK_1;
m_tx_bit_buffer = EMPTY;
output_ring(0);
break;
// something very bad happened (heap smash?)
default:
throw false;
}
}
device_ti8x_link_port_byte_interface::device_ti8x_link_port_byte_interface(
machine_config const &mconfig,
device_t &device)
: device_ti8x_link_port_bit_interface(mconfig, device)
, m_tx_byte_buffer(0U)
, m_rx_byte_buffer(0U)
{
}
void device_ti8x_link_port_byte_interface::interface_pre_start()
{
device_ti8x_link_port_bit_interface::interface_pre_start();
m_tx_byte_buffer = m_rx_byte_buffer = 0U;
}
void device_ti8x_link_port_byte_interface::interface_post_start()
{
device_ti8x_link_port_bit_interface::interface_post_start();
device().save_item(NAME(m_tx_byte_buffer));
device().save_item(NAME(m_rx_byte_buffer));
}
void device_ti8x_link_port_byte_interface::interface_pre_reset()
{
device_ti8x_link_port_bit_interface::interface_pre_reset();
m_tx_byte_buffer = m_rx_byte_buffer = 0U;
}
void device_ti8x_link_port_byte_interface::send_byte(u8 data)
{
if (m_tx_byte_buffer)
device().logerror("device_ti8x_link_port_byte_interface: warning: transmit buffer overrun\n");
LOGBYTEPROTO("sending byte 0x%02X\n", data);
m_tx_byte_buffer = 0x0080 | u16(data >> 1);
send_bit(BIT(data, 0));
}
void device_ti8x_link_port_byte_interface::accept_byte()
{
assert(BIT(m_rx_byte_buffer, 8));
LOGBYTEPROTO("accepting final bit of byte\n");
m_rx_byte_buffer = 0U;
accept_bit();
}
void device_ti8x_link_port_byte_interface::bit_collision()
{
LOGBYTEPROTO("bit collection, clearing byte buffers\n");
m_tx_byte_buffer = m_rx_byte_buffer = 0U;
byte_collision();
}
void device_ti8x_link_port_byte_interface::bit_send_timeout()
{
LOGBYTEPROTO("bit send timeout, clearing send byte buffer\n");
m_tx_byte_buffer = 0U;
byte_send_timeout();
}
void device_ti8x_link_port_byte_interface::bit_receive_timeout()
{
LOGBYTEPROTO("bit receive timeout, clearing receive byte buffer\n");
m_rx_byte_buffer = 0U;
byte_receive_timeout();
}
void device_ti8x_link_port_byte_interface::bit_sent()
{
assert(m_tx_byte_buffer);
bool const data(BIT(m_tx_byte_buffer, 0));
if (m_tx_byte_buffer >>= 1)
{
LOGBYTEPROTO("bit sent, sending next bit of byte\n");
send_bit(data);
}
else
{
assert(data);
LOGBYTEPROTO("final bit of byte sent\n");
byte_sent();
}
}
void device_ti8x_link_port_byte_interface::bit_received(bool data)
{
assert(!BIT(m_rx_byte_buffer, 8));
m_rx_byte_buffer = (!m_rx_byte_buffer ? 0x8000 : (m_rx_byte_buffer >> 1)) | (data ? 0x0080U : 0x0000U);
if (BIT(m_rx_byte_buffer, 8))
{
LOGBYTEPROTO("received final bit of byte 0x%02X\n", u8(m_rx_byte_buffer));
byte_received(u8(m_rx_byte_buffer));
}
else
{
LOGBYTEPROTO("bit received, accepting\n");
accept_bit();
}
}
#include "bitsocket.h"
#include "graphlinkhle.h"
#include "teeconn.h"
#include "tispeaker.h"
void default_ti8x_link_devices(device_slot_interface &device)
{
device.option_add("bitsock", TI8X_BIT_SOCKET);
device.option_add("glinkhle", TI8X_GRAPH_LINK_HLE);
device.option_add("tee", TI8X_TEE_CONNECTOR);
device.option_add("monospkr", TI8X_SPEAKER_MONO);
device.option_add("stereospkr", TI8X_SPEAKER_STEREO);
}
| gpl-2.0 |
golismero/golismero | tools/sqlmap/tamper/apostrophenullencode.py | 503 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOWEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces apostrophe character with its illegal double unicode counterpart
>>> tamper("1 AND '1'='1")
'1 AND %00%271%00%27=%00%271'
"""
return payload.replace('\'', "%00%27") if payload else payload
| gpl-2.0 |
siteslab/profile | sites/all/libraries/tcpdf/examples/barcodes/example_2d_pdf417_png.php | 1979 | <?php
//============================================================+
// File name : example_2d_png.php
// Version : 1.0.000
// Begin : 2011-07-21
// Last Update : 2013-03-17
// Author : Nicola Asuni - Tecnick.com LTD - www.tecnick.com - [email protected]
// License : GNU-LGPL v3 (http://www.gnu.org/copyleft/lesser.html)
// -------------------------------------------------------------------
// Copyright (C) 2009-2013 Nicola Asuni - Tecnick.com LTD
//
// This file is part of TCPDF software library.
//
// TCPDF is free software: you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// TCPDF is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with TCPDF. If not, see <http://www.gnu.org/licenses/>.
//
// See LICENSE.TXT file for more information.
// -------------------------------------------------------------------
//
// Description : Example for tcpdf_barcodes_2d.php class
//
//============================================================+
/**
* @file
* Example for tcpdf_barcodes_2d.php class
* @package com.tecnick.tcpdf
* @author Nicola Asuni
* @version 1.0.009
*/
// include 2D barcode class
require_once(dirname(__FILE__).'/../../tcpdf_barcodes_2d.php');
// set the barcode content and type
$barcodeobj = new TCPDF2DBarcode('http://www.tcpdf.org', 'PDF417');
// output the barcode as PNG image
$barcodeobj->getBarcodePNG(4, 4, array(0,0,0));
//============================================================+
// END OF FILE
//============================================================+
| gpl-2.0 |
shyamalschandra/rtt | rtt/ServiceRequester.hpp | 7747 | /***************************************************************************
tag: The SourceWorks Tue Sep 7 00:55:18 CEST 2010 ServiceRequester.hpp
ServiceRequester.hpp - description
-------------------
begin : Tue September 07 2010
copyright : (C) 2010 The SourceWorks
email : [email protected]
***************************************************************************
* This library is free software; you can redistribute it and/or *
* modify it under the terms of the GNU General Public *
* License as published by the Free Software Foundation; *
* version 2 of the License. *
* *
* As a special exception, you may use this file as part of a free *
* software library without restriction. Specifically, if other files *
* instantiate templates or use macros or inline functions from this *
* file, or you compile this file and link it with other files to *
* produce an executable, this file does not by itself cause the *
* resulting executable to be covered by the GNU General Public *
* License. This exception does not however invalidate any other *
* reasons why the executable file might be covered by the GNU General *
* Public License. *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU General Public *
* License along with this library; if not, write to the Free Software *
* Foundation, Inc., 59 Temple Place, *
* Suite 330, Boston, MA 02111-1307 USA *
* *
***************************************************************************/
#ifndef ORO_SERVICEREQUESTER_HPP_
#define ORO_SERVICEREQUESTER_HPP_
#include "rtt-config.h"
#include "rtt-fwd.hpp"
#include "base/OperationCallerBaseInvoker.hpp"
#include "Service.hpp"
#include <map>
#include <vector>
#include <string>
#include <boost/enable_shared_from_this.hpp>
#if BOOST_VERSION >= 104000 && BOOST_VERSION < 105300
#include <boost/smart_ptr/enable_shared_from_this2.hpp>
#endif
#if BOOST_VERSION >= 105300
#include <boost/smart_ptr/enable_shared_from_raw.hpp>
#endif
namespace RTT
{
/**
* An object that expresses you wish to use a service.
* The ServiceRequester is symmetrical to the Service.
* Where a Service registers operations that a component can
* execute ('provides'), the ServiceRequester registers the methods that a caller
* wishes to call ('requires'). One method in a ServiceRequester maps
* to one operation in a Service.
*
* Typical use is to inherit from ServiceRequester and add named OperationCaller objects
* to it using addOperationCaller. @see RTT::Scripting for an example.
* @ingroup Services
*/
class RTT_API ServiceRequester :
#if BOOST_VERSION >= 104000
#if BOOST_VERSION < 105300
public boost::enable_shared_from_this2<ServiceRequester>
#else
public boost::enable_shared_from_raw
#endif
#else
public boost::enable_shared_from_this<ServiceRequester>
#endif
{
public:
typedef std::vector<std::string> RequesterNames;
typedef std::vector<std::string> OperationCallerNames;
typedef boost::shared_ptr<ServiceRequester> shared_ptr;
typedef boost::shared_ptr<const ServiceRequester> shared_constptr;
#if BOOST_VERSION >= 105300
ServiceRequester::shared_ptr shared_from_this() { return boost::shared_from_raw(this); }
ServiceRequester::shared_constptr shared_from_this() const { return boost::shared_from_raw(this); }
#endif
ServiceRequester(const std::string& name, TaskContext* owner = 0);
virtual ~ServiceRequester();
const std::string& getRequestName() const { return mrname; }
RequesterNames getRequesterNames() const;
/**
* The owner is the top-level TaskContext owning this service
* (indirectly).
*/
TaskContext* getServiceOwner() const { return mrowner; }
/**
* Sets the owning TaskContext that is considered as the
* caller of requested operations.
*/
void setOwner(TaskContext* new_owner);
/**
* Returns the service we're referencing.
* In case you used connectTo to more than one service,
* this returns the service which was used when connectTo
* first returned true.
*/
Service::shared_ptr getReferencedService() { return mprovider; }
bool addOperationCaller( base::OperationCallerBaseInvoker& mbi);
OperationCallerNames getOperationCallerNames() const;
base::OperationCallerBaseInvoker* getOperationCaller(const std::string& name);
ServiceRequester::shared_ptr requires();
ServiceRequester::shared_ptr requires(const std::string& service_name);
/**
* Add a new ServiceRequester to this TaskContext.
*
* @param obj This object becomes owned by this TaskContext.
*
* @return true if it could be added, false if such
* service requester already exists.
*/
bool addServiceRequester(shared_ptr obj);
/**
* Query if this service requires certain sub-services.
* @param service_name
* @return
*/
bool requiresService(const std::string& service_name) {
return mrequests.find(service_name) != mrequests.end();
}
/**
* Connects this service's methods to the operations provided by op.
* This method tries to match as many as possible method-operation pairs.
*
* You may call this function with different instances of sp to 'resolve'
* missing functions, only the non-connected methods will be further filled in.
* @param sp An interface-compatible Service.
*
* @return true if all methods of that are required are provided, false
* if not all methods could yet be matched.
*/
virtual bool connectTo(Service::shared_ptr sp);
/**
* Returns true when all methods were resolved.
* @return
*/
virtual bool ready() const;
/**
* Disconnects all methods from their implementation.
*/
virtual void disconnect();
/**
* Remove all operation callers from this service requester.
*/
virtual void clear();
protected:
typedef std::map< std::string, ServiceRequester::shared_ptr > Requests;
/// the services we implement.
Requests mrequests;
/// Our methods
typedef std::map<std::string, base::OperationCallerBaseInvoker*> OperationCallers;
OperationCallers mmethods;
std::string mrname;
TaskContext* mrowner;
Service::shared_ptr mprovider;
};
}
#endif /* ORO_SERVICEREQUESTER_HPP_ */
| gpl-2.0 |
tkaniowski/bj25 | components/com_users/controllers/registration.php | 6815 | <?php
/**
* @package Joomla.Site
* @subpackage com_users
*
* @copyright Copyright (C) 2005 - 2018 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
defined('_JEXEC') or die;
JLoader::register('UsersController', JPATH_COMPONENT . '/controller.php');
/**
* Registration controller class for Users.
*
* @since 1.6
*/
class UsersControllerRegistration extends UsersController
{
/**
* Method to activate a user.
*
* @return boolean True on success, false on failure.
*
* @since 1.6
*/
public function activate()
{
$user = JFactory::getUser();
$input = JFactory::getApplication()->input;
$uParams = JComponentHelper::getParams('com_users');
// Check for admin activation. Don't allow non-super-admin to delete a super admin
if ($uParams->get('useractivation') != 2 && $user->get('id'))
{
$this->setRedirect('index.php');
return true;
}
// If user registration or account activation is disabled, throw a 403.
if ($uParams->get('useractivation') == 0 || $uParams->get('allowUserRegistration') == 0)
{
JError::raiseError(403, JText::_('JLIB_APPLICATION_ERROR_ACCESS_FORBIDDEN'));
return false;
}
$model = $this->getModel('Registration', 'UsersModel');
$token = $input->getAlnum('token');
// Check that the token is in a valid format.
if ($token === null || strlen($token) !== 32)
{
JError::raiseError(403, JText::_('JINVALID_TOKEN'));
return false;
}
// Get the User ID
$userIdToActivate = $model->getUserIdFromToken($token);
if (!$userIdToActivate)
{
JError::raiseError(403, JText::_('COM_USERS_ACTIVATION_TOKEN_NOT_FOUND'));
return false;
}
// Get the user we want to activate
$userToActivate = JFactory::getUser($userIdToActivate);
// Admin activation is on and admin is activating the account
if (($uParams->get('useractivation') == 2) && $userToActivate->getParam('activate', 0))
{
// If a user admin is not logged in, redirect them to the login page with a error message
if (!$user->authorise('core.create', 'com_users'))
{
$activationUrl = 'index.php?option=com_users&task=registration.activate&token=' . $token;
$loginUrl = 'index.php?option=com_users&view=login&return=' . base64_encode($activationUrl);
// In case we still run into this in the second step the user does not have the right permissions
$message = JText::_('COM_USERS_REGISTRATION_ACL_ADMIN_ACTIVATION_PERMISSIONS');
// When we are not logged in we should login
if ($user->guest)
{
$message = JText::_('COM_USERS_REGISTRATION_ACL_ADMIN_ACTIVATION');
}
$this->setMessage($message);
$this->setRedirect(JRoute::_($loginUrl, false));
return false;
}
}
// Attempt to activate the user.
$return = $model->activate($token);
// Check for errors.
if ($return === false)
{
// Redirect back to the home page.
$this->setMessage(JText::sprintf('COM_USERS_REGISTRATION_SAVE_FAILED', $model->getError()), 'error');
$this->setRedirect('index.php');
return false;
}
$useractivation = $uParams->get('useractivation');
// Redirect to the login screen.
if ($useractivation == 0)
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_SAVE_SUCCESS'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=login', false));
}
elseif ($useractivation == 1)
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_ACTIVATE_SUCCESS'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=login', false));
}
elseif ($return->getParam('activate'))
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_VERIFY_SUCCESS'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=registration&layout=complete', false));
}
else
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_ADMINACTIVATE_SUCCESS'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=registration&layout=complete', false));
}
return true;
}
/**
* Method to register a user.
*
* @return boolean True on success, false on failure.
*
* @since 1.6
*/
public function register()
{
// Check for request forgeries.
$this->checkToken();
// If registration is disabled - Redirect to login page.
if (JComponentHelper::getParams('com_users')->get('allowUserRegistration') == 0)
{
$this->setRedirect(JRoute::_('index.php?option=com_users&view=login', false));
return false;
}
$app = JFactory::getApplication();
$model = $this->getModel('Registration', 'UsersModel');
// Get the user data.
$requestData = $this->input->post->get('jform', array(), 'array');
// Validate the posted data.
$form = $model->getForm();
if (!$form)
{
JError::raiseError(500, $model->getError());
return false;
}
$data = $model->validate($form, $requestData);
// Check for validation errors.
if ($data === false)
{
// Get the validation messages.
$errors = $model->getErrors();
// Push up to three validation messages out to the user.
for ($i = 0, $n = count($errors); $i < $n && $i < 3; $i++)
{
if ($errors[$i] instanceof Exception)
{
$app->enqueueMessage($errors[$i]->getMessage(), 'error');
}
else
{
$app->enqueueMessage($errors[$i], 'error');
}
}
// Save the data in the session.
$app->setUserState('com_users.registration.data', $requestData);
// Redirect back to the registration screen.
$this->setRedirect(JRoute::_('index.php?option=com_users&view=registration', false));
return false;
}
// Attempt to save the data.
$return = $model->register($data);
// Check for errors.
if ($return === false)
{
// Save the data in the session.
$app->setUserState('com_users.registration.data', $data);
// Redirect back to the edit screen.
$this->setMessage($model->getError(), 'error');
$this->setRedirect(JRoute::_('index.php?option=com_users&view=registration', false));
return false;
}
// Flush the data from the session.
$app->setUserState('com_users.registration.data', null);
// Redirect to the profile screen.
if ($return === 'adminactivate')
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_COMPLETE_VERIFY'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=registration&layout=complete', false));
}
elseif ($return === 'useractivate')
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_COMPLETE_ACTIVATE'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=registration&layout=complete', false));
}
else
{
$this->setMessage(JText::_('COM_USERS_REGISTRATION_SAVE_SUCCESS'));
$this->setRedirect(JRoute::_('index.php?option=com_users&view=login', false));
}
return true;
}
}
| gpl-2.0 |
qgis/QGIS-Django | qgis-app/plugins/tests/HelloWorld/2.3-full-changed-repository/HelloWorld/__init__.py | 183 | # -*- coding: utf-8 -*-
"""
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
from HelloWorld import HelloWorld
return HelloWorld(iface)
| gpl-2.0 |
wjflyhigh/glibc-2.21 | sysdeps/i386/dl-machine.h | 23316 | /* Machine-dependent ELF dynamic relocation inline functions. i386 version.
Copyright (C) 1995-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef dl_machine_h
#define dl_machine_h
#define ELF_MACHINE_NAME "i386"
#include <sys/param.h>
#include <sysdep.h>
#include <tls.h>
#include <dl-tlsdesc.h>
/* Return nonzero iff ELF header is compatible with the running host. */
static inline int __attribute__ ((unused))
elf_machine_matches_host (const Elf32_Ehdr *ehdr)
{
return ehdr->e_machine == EM_386;
}
/* Return the link-time address of _DYNAMIC. Conveniently, this is the
first element of the GOT, a special entry that is never relocated. */
static inline Elf32_Addr __attribute__ ((unused, const))
elf_machine_dynamic (void)
{
/* This produces a GOTOFF reloc that resolves to zero at link time, so in
fact just loads from the GOT register directly. By doing it without
an asm we can let the compiler choose any register. */
extern const Elf32_Addr _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0];
}
/* Return the run-time load address of the shared object. */
static inline Elf32_Addr __attribute__ ((unused))
elf_machine_load_address (void)
{
/* Compute the difference between the runtime address of _DYNAMIC as seen
by a GOTOFF reference, and the link-time address found in the special
unrelocated first GOT entry. */
extern Elf32_Dyn bygotoff[] asm ("_DYNAMIC") attribute_hidden;
return (Elf32_Addr) &bygotoff - elf_machine_dynamic ();
}
/* Set up the loaded object described by L so its unrelocated PLT
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused, always_inline))
elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
{
Elf32_Addr *got;
extern void _dl_runtime_resolve (Elf32_Word) attribute_hidden;
extern void _dl_runtime_profile (Elf32_Word) attribute_hidden;
if (l->l_info[DT_JMPREL] && lazy)
{
/* The GOT entries for functions in the PLT have not yet been filled
in. Their initial contents will arrange when called to push an
offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
/* If a library is prelinked but we have to relocate anyway,
we have to be able to undo the prelinking of .got.plt.
The prelinker saved us here address of .plt + 0x16. */
if (got[1])
{
l->l_mach.plt = got[1] + l->l_addr;
l->l_mach.gotplt = (Elf32_Addr) &got[3];
}
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
called to get the address of a so far unresolved function and
jump to it. The profiling extension of the dynamic linker allows
to intercept the calls to collect information. In this case we
don't store the address in the GOT so that all future calls also
end in this function. */
if (__glibc_unlikely (profile))
{
got[2] = (Elf32_Addr) &_dl_runtime_profile;
if (GLRO(dl_profile) != NULL
&& _dl_name_match_p (GLRO(dl_profile), l))
/* This is the object we are looking for. Say that we really
want profiling and the timers are started. */
GL(dl_profile_map) = l;
}
else
/* This function will get called to fix up the GOT entry indicated by
the offset on the stack, and then jump to the resolved address. */
got[2] = (Elf32_Addr) &_dl_runtime_resolve;
}
return lazy;
}
#ifdef IN_DL_RUNTIME
# ifndef PROF
/* We add a declaration of this function here so that in dl-runtime.c
the ELF_MACHINE_RUNTIME_TRAMPOLINE macro really can pass the parameters
in registers.
We cannot use this scheme for profiling because the _mcount call
destroys the passed register information. */
#define ARCH_FIXUP_ATTRIBUTE __attribute__ ((regparm (3), stdcall, unused))
extern ElfW(Addr) _dl_fixup (struct link_map *l,
ElfW(Word) reloc_offset)
ARCH_FIXUP_ATTRIBUTE;
extern ElfW(Addr) _dl_profile_fixup (struct link_map *l,
ElfW(Word) reloc_offset,
ElfW(Addr) retaddr, void *regs,
long int *framesizep)
ARCH_FIXUP_ATTRIBUTE;
# endif
#endif
/* Mask identifying addresses reserved for the user program,
where the dynamic linker should not map anything. */
#define ELF_MACHINE_USER_ADDRESS_MASK 0xf8000000UL
/* Initial entry point code for the dynamic linker.
The C function `_dl_start' is the real entry point;
its return value is the user program's entry point. */
#define RTLD_START asm ("\n\
.text\n\
.align 16\n\
0: movl (%esp), %ebx\n\
ret\n\
.align 16\n\
.globl _start\n\
.globl _dl_start_user\n\
_start:\n\
# Note that _dl_start gets the parameter in %eax.\n\
movl %esp, %eax\n\
call _dl_start\n\
_dl_start_user:\n\
# Save the user entry point address in %edi.\n\
movl %eax, %edi\n\
# Point %ebx at the GOT.\n\
call 0b\n\
addl $_GLOBAL_OFFSET_TABLE_, %ebx\n\
# See if we were run as a command with the executable file\n\
# name as an extra leading argument.\n\
movl _dl_skip_args@GOTOFF(%ebx), %eax\n\
# Pop the original argument count.\n\
popl %edx\n\
# Adjust the stack pointer to skip _dl_skip_args words.\n\
leal (%esp,%eax,4), %esp\n\
# Subtract _dl_skip_args from argc.\n\
subl %eax, %edx\n\
# Push argc back on the stack.\n\
push %edx\n\
# The special initializer gets called with the stack just\n\
# as the application's entry point will see it; it can\n\
# switch stacks if it moves these contents over.\n\
" RTLD_START_SPECIAL_INIT "\n\
# Load the parameters again.\n\
# (eax, edx, ecx, *--esp) = (_dl_loaded, argc, argv, envp)\n\
movl _rtld_local@GOTOFF(%ebx), %eax\n\
leal 8(%esp,%edx,4), %esi\n\
leal 4(%esp), %ecx\n\
movl %esp, %ebp\n\
# Make sure _dl_init is run with 16 byte aligned stack.\n\
andl $-16, %esp\n\
pushl %eax\n\
pushl %eax\n\
pushl %ebp\n\
pushl %esi\n\
# Clear %ebp, so that even constructors have terminated backchain.\n\
xorl %ebp, %ebp\n\
# Call the function to run the initializers.\n\
call _dl_init\n\
# Pass our finalizer function to the user in %edx, as per ELF ABI.\n\
leal _dl_fini@GOTOFF(%ebx), %edx\n\
# Restore %esp _start expects.\n\
movl (%esp), %esp\n\
# Jump to the user's entry point.\n\
jmp *%edi\n\
.previous\n\
");
#ifndef RTLD_START_SPECIAL_INIT
# define RTLD_START_SPECIAL_INIT /* nothing */
#endif
/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
TLS variable, so undefined references should not be allowed to
define the value.
ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
of the main executable's symbols, as for a COPY reloc. */
# define elf_machine_type_class(type) \
((((type) == R_386_JMP_SLOT || (type) == R_386_TLS_DTPMOD32 \
|| (type) == R_386_TLS_DTPOFF32 || (type) == R_386_TLS_TPOFF32 \
|| (type) == R_386_TLS_TPOFF || (type) == R_386_TLS_DESC) \
* ELF_RTYPE_CLASS_PLT) \
| (((type) == R_386_COPY) * ELF_RTYPE_CLASS_COPY))
/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
#define ELF_MACHINE_JMP_SLOT R_386_JMP_SLOT
/* The i386 never uses Elf32_Rela relocations for the dynamic linker.
Prelinked libraries may use Elf32_Rela though. */
#define ELF_MACHINE_PLT_REL 1
/* We define an initialization functions. This is called very early in
_dl_sysdep_start. */
#define DL_PLATFORM_INIT dl_platform_init ()
static inline void __attribute__ ((unused))
dl_platform_init (void)
{
if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
/* Avoid an empty string which would disturb us. */
GLRO(dl_platform) = NULL;
}
static inline Elf32_Addr
elf_machine_fixup_plt (struct link_map *map, lookup_t t,
const Elf32_Rel *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
{
return *reloc_addr = value;
}
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc,
Elf32_Addr value)
{
return value;
}
/* Names of the architecture-specific auditing callback functions. */
#define ARCH_LA_PLTENTER i86_gnu_pltenter
#define ARCH_LA_PLTEXIT i86_gnu_pltexit
#endif /* !dl_machine_h */
/* The i386 never uses Elf32_Rela relocations for the dynamic linker.
Prelinked libraries may use Elf32_Rela though. */
#define ELF_MACHINE_NO_RELA defined RTLD_BOOTSTRAP
#define ELF_MACHINE_NO_REL 0
#ifdef RESOLVE_MAP
/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
MAP is the object containing the reloc. */
auto inline void
__attribute ((always_inline))
elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc,
const Elf32_Sym *sym, const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
{
Elf32_Addr *const reloc_addr = reloc_addr_arg;
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
# if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
if (__glibc_unlikely (r_type == R_386_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
/* This is defined in rtld.c, but nowhere in the static libc.a;
make the reference weak so static programs can still link.
This declaration cannot be done when compiling rtld.c
(i.e. #ifdef RTLD_BOOTSTRAP) because rtld.c contains the
common defn for _dl_rtld_map, which is incompatible with a
weak decl in the same file. */
# ifndef SHARED
weak_extern (_dl_rtld_map);
# endif
if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
# endif
*reloc_addr += map->l_addr;
}
# ifndef RTLD_BOOTSTRAP
else if (__glibc_unlikely (r_type == R_386_NONE))
return;
# endif
else
# endif /* !RTLD_BOOTSTRAP and have no -z combreloc */
{
# ifndef RTLD_BOOTSTRAP
const Elf32_Sym *const refsym = sym;
# endif
struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
Elf32_Addr value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
if (sym != NULL
&& __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC,
0)
&& __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
&& __builtin_expect (!skip_ifunc, 1))
value = ((Elf32_Addr (*) (void)) value) ();
switch (r_type)
{
# ifndef RTLD_BOOTSTRAP
case R_386_SIZE32:
/* Set to symbol size plus addend. */
*reloc_addr += sym->st_size;
break;
# endif
case R_386_GLOB_DAT:
case R_386_JMP_SLOT:
*reloc_addr = value;
break;
case R_386_TLS_DTPMOD32:
# ifdef RTLD_BOOTSTRAP
/* During startup the dynamic linker is always the module
with index 1.
XXX If this relocation is necessary move before RESOLVE
call. */
*reloc_addr = 1;
# else
/* Get the information from the link map returned by the
resolv function. */
if (sym_map != NULL)
*reloc_addr = sym_map->l_tls_modid;
# endif
break;
case R_386_TLS_DTPOFF32:
# ifndef RTLD_BOOTSTRAP
/* During relocation all TLS symbols are defined and used.
Therefore the offset is already correct. */
if (sym != NULL)
*reloc_addr = sym->st_value;
# endif
break;
case R_386_TLS_DESC:
{
struct tlsdesc volatile *td =
(struct tlsdesc volatile *)reloc_addr;
# ifndef RTLD_BOOTSTRAP
if (! sym)
td->entry = _dl_tlsdesc_undefweak;
else
# endif
{
# ifndef RTLD_BOOTSTRAP
# ifndef SHARED
CHECK_STATIC_TLS (map, sym_map);
# else
if (!TRY_STATIC_TLS (map, sym_map))
{
td->arg = _dl_make_tlsdesc_dynamic
(sym_map, sym->st_value + (ElfW(Word))td->arg);
td->entry = _dl_tlsdesc_dynamic;
}
else
# endif
# endif
{
td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ (ElfW(Word))td->arg);
td->entry = _dl_tlsdesc_return;
}
}
break;
}
case R_386_TLS_TPOFF32:
/* The offset is positive, backward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
*reloc_addr += map->l_tls_offset - sym->st_value;
# else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be subtracted from the
thread pointer. To get the variable position in the TLS
block we subtract the offset from that of the TLS block. */
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr += sym_map->l_tls_offset - sym->st_value;
}
# endif
break;
case R_386_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
*reloc_addr += sym->st_value - map->l_tls_offset;
# else
/* We know the offset of object the symbol is contained in.
It is a negative value which will be added to the
thread pointer. */
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr += sym->st_value - sym_map->l_tls_offset;
}
# endif
break;
# ifndef RTLD_BOOTSTRAP
case R_386_32:
*reloc_addr += value;
break;
case R_386_PC32:
*reloc_addr += (value - (Elf32_Addr) reloc_addr);
break;
case R_386_COPY:
if (sym == NULL)
/* This can happen in trace mode if an object could not be
found. */
break;
if (__builtin_expect (sym->st_size > refsym->st_size, 0)
|| (__builtin_expect (sym->st_size < refsym->st_size, 0)
&& GLRO(dl_verbose)))
{
const char *strtab;
strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
_dl_error_printf ("\
%s: Symbol `%s' has different size in shared object, consider re-linking\n",
RTLD_PROGNAME, strtab + refsym->st_name);
}
memcpy (reloc_addr_arg, (void *) value,
MIN (sym->st_size, refsym->st_size));
break;
case R_386_IRELATIVE:
value = map->l_addr + *reloc_addr;
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
break;
default:
_dl_reloc_bad_type (map, r_type, 0);
break;
# endif /* !RTLD_BOOTSTRAP */
}
}
}
# ifndef RTLD_BOOTSTRAP
auto inline void
__attribute__ ((always_inline))
elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
const Elf32_Sym *sym, const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
{
Elf32_Addr *const reloc_addr = reloc_addr_arg;
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
if (ELF32_R_TYPE (reloc->r_info) == R_386_RELATIVE)
*reloc_addr = map->l_addr + reloc->r_addend;
else if (r_type != R_386_NONE)
{
# ifndef RESOLVE_CONFLICT_FIND_MAP
const Elf32_Sym *const refsym = sym;
# endif
struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
Elf32_Addr value = sym == NULL ? 0 : sym_map->l_addr + sym->st_value;
if (sym != NULL
&& __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
&& __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
&& __builtin_expect (!skip_ifunc, 1))
value = ((Elf32_Addr (*) (void)) value) ();
switch (ELF32_R_TYPE (reloc->r_info))
{
case R_386_SIZE32:
/* Set to symbol size plus addend. */
value = sym->st_size;
case R_386_GLOB_DAT:
case R_386_JMP_SLOT:
case R_386_32:
*reloc_addr = value + reloc->r_addend;
break;
# ifndef RESOLVE_CONFLICT_FIND_MAP
/* Not needed for dl-conflict.c. */
case R_386_PC32:
*reloc_addr = (value + reloc->r_addend - (Elf32_Addr) reloc_addr);
break;
case R_386_TLS_DTPMOD32:
/* Get the information from the link map returned by the
resolv function. */
if (sym_map != NULL)
*reloc_addr = sym_map->l_tls_modid;
break;
case R_386_TLS_DTPOFF32:
/* During relocation all TLS symbols are defined and used.
Therefore the offset is already correct. */
*reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
break;
case R_386_TLS_DESC:
{
struct tlsdesc volatile *td =
(struct tlsdesc volatile *)reloc_addr;
# ifndef RTLD_BOOTSTRAP
if (!sym)
{
td->arg = (void*)reloc->r_addend;
td->entry = _dl_tlsdesc_undefweak;
}
else
# endif
{
# ifndef RTLD_BOOTSTRAP
# ifndef SHARED
CHECK_STATIC_TLS (map, sym_map);
# else
if (!TRY_STATIC_TLS (map, sym_map))
{
td->arg = _dl_make_tlsdesc_dynamic
(sym_map, sym->st_value + reloc->r_addend);
td->entry = _dl_tlsdesc_dynamic;
}
else
# endif
# endif
{
td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
}
}
break;
case R_386_TLS_TPOFF32:
/* The offset is positive, backward from the thread pointer. */
/* We know the offset of object the symbol is contained in.
It is a positive value which will be subtracted from the
thread pointer. To get the variable position in the TLS
block we subtract the offset from that of the TLS block. */
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = sym_map->l_tls_offset - sym->st_value
+ reloc->r_addend;
}
break;
case R_386_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
/* We know the offset of object the symbol is contained in.
It is a negative value which will be added to the
thread pointer. */
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = sym->st_value - sym_map->l_tls_offset
+ reloc->r_addend;
}
break;
case R_386_COPY:
if (sym == NULL)
/* This can happen in trace mode if an object could not be
found. */
break;
if (__builtin_expect (sym->st_size > refsym->st_size, 0)
|| (__builtin_expect (sym->st_size < refsym->st_size, 0)
&& GLRO(dl_verbose)))
{
const char *strtab;
strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
_dl_error_printf ("\
%s: Symbol `%s' has different size in shared object, consider re-linking\n",
RTLD_PROGNAME, strtab + refsym->st_name);
}
memcpy (reloc_addr_arg, (void *) value,
MIN (sym->st_size, refsym->st_size));
break;
# endif /* !RESOLVE_CONFLICT_FIND_MAP */
case R_386_IRELATIVE:
value = map->l_addr + reloc->r_addend;
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
break;
default:
/* We add these checks in the version to relocate ld.so only
if we are still debugging. */
_dl_reloc_bad_type (map, r_type, 0);
break;
}
}
}
# endif /* !RTLD_BOOTSTRAP */
auto inline void
__attribute ((always_inline))
elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc,
void *const reloc_addr_arg)
{
Elf32_Addr *const reloc_addr = reloc_addr_arg;
assert (ELF32_R_TYPE (reloc->r_info) == R_386_RELATIVE);
*reloc_addr += l_addr;
}
# ifndef RTLD_BOOTSTRAP
auto inline void
__attribute__ ((always_inline))
elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
void *const reloc_addr_arg)
{
Elf32_Addr *const reloc_addr = reloc_addr_arg;
*reloc_addr = l_addr + reloc->r_addend;
}
# endif /* !RTLD_BOOTSTRAP */
auto inline void
__attribute__ ((always_inline))
elf_machine_lazy_rel (struct link_map *map,
Elf32_Addr l_addr, const Elf32_Rel *reloc,
int skip_ifunc)
{
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
if (__glibc_likely (r_type == R_386_JMP_SLOT))
{
if (__builtin_expect (map->l_mach.plt, 0) == 0)
*reloc_addr += l_addr;
else
*reloc_addr = (map->l_mach.plt
+ (((Elf32_Addr) reloc_addr) - map->l_mach.gotplt) * 4);
}
else if (__glibc_likely (r_type == R_386_TLS_DESC))
{
struct tlsdesc volatile * __attribute__((__unused__)) td =
(struct tlsdesc volatile *)reloc_addr;
/* Handle relocations that reference the local *ABS* in a simple
way, so as to preserve a potential addend. */
if (ELF32_R_SYM (reloc->r_info) == 0)
td->entry = _dl_tlsdesc_resolve_abs_plus_addend;
/* Given a known-zero addend, we can store a pointer to the
reloc in the arg position. */
else if (td->arg == 0)
{
td->arg = (void*)reloc;
td->entry = _dl_tlsdesc_resolve_rel;
}
else
{
/* We could handle non-*ABS* relocations with non-zero addends
by allocating dynamically an arg to hold a pointer to the
reloc, but that sounds pointless. */
const Elf32_Rel *const r = reloc;
/* The code below was borrowed from elf_dynamic_do_rel(). */
const ElfW(Sym) *const symtab =
(const void *) D_PTR (map, l_info[DT_SYMTAB]);
# ifdef RTLD_BOOTSTRAP
/* The dynamic linker always uses versioning. */
assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL);
# else
if (map->l_info[VERSYMIDX (DT_VERSYM)])
# endif
{
const ElfW(Half) *const version =
(const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff;
elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)],
&map->l_versions[ndx],
(void *) (l_addr + r->r_offset), skip_ifunc);
}
# ifndef RTLD_BOOTSTRAP
else
elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL,
(void *) (l_addr + r->r_offset), skip_ifunc);
# endif
}
}
else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
{
Elf32_Addr value = map->l_addr + *reloc_addr;
if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
}
else
_dl_reloc_bad_type (map, r_type, 1);
}
# ifndef RTLD_BOOTSTRAP
auto inline void
__attribute__ ((always_inline))
elf_machine_lazy_rela (struct link_map *map,
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
if (__glibc_likely (r_type == R_386_JMP_SLOT))
;
else if (__glibc_likely (r_type == R_386_TLS_DESC))
{
struct tlsdesc volatile * __attribute__((__unused__)) td =
(struct tlsdesc volatile *)reloc_addr;
td->arg = (void*)reloc;
td->entry = _dl_tlsdesc_resolve_rela;
}
else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
{
Elf32_Addr value = map->l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
}
else
_dl_reloc_bad_type (map, r_type, 1);
}
# endif /* !RTLD_BOOTSTRAP */
#endif /* RESOLVE_MAP */
| gpl-2.0 |
evaautomation/linux | drivers/net/bonding/bond_main.c | 136682 | /*
* originally based on the dummy device.
*
* Copyright 1999, Thomas Davis, [email protected].
* Licensed under the GPL. Based on dummy.c, and eql.c devices.
*
* bonding.c: an Ethernet Bonding driver
*
* This is useful to talk to a Cisco EtherChannel compatible equipment:
* Cisco 5500
* Sun Trunking (Solaris)
* Alteon AceDirector Trunks
* Linux Bonding
* and probably many L2 switches ...
*
* How it works:
* ifconfig bond0 ipaddress netmask up
* will setup a network device, with an ip address. No mac address
* will be assigned at this time. The hw mac address will come from
* the first slave bonded to the channel. All slaves will then use
* this hw mac address.
*
* ifconfig bond0 down
* will release all slaves, marking them as down.
*
* ifenslave bond0 eth0
* will attach eth0 to bond0 as a slave. eth0 hw mac address will either
* a: be used as initial mac address
* b: if a hw mac address already is there, eth0's hw mac address
* will then be set from bond0.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <net/ip.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <asm/dma.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
#include <net/route.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_dissector.h>
#include <net/switchdev.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
#include "bonding_priv.h"
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int tx_queues = BOND_DEFAULT_TX_QUEUES;
static int num_peer_notif = 1;
static int miimon;
static int updelay;
static int downdelay;
static int use_carrier = 1;
static char *mode;
static char *primary;
static char *primary_reselect;
static char *lacp_rate;
static int min_links;
static char *ad_select;
static char *xmit_hash_policy;
static int arp_interval;
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *arp_all_targets;
static char *fail_over_mac;
static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
static int packets_per_slave = 1;
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
"failover event (alias of num_unsol_na)");
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
"failover event (alias of num_grat_arp)");
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
"0 for off, 1 for on (default)");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
"3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
"6 for balance-alb");
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
"once it comes up; "
"0 for always (default), "
"1 for only if speed of primary is "
"better, "
"2 for only on active slave "
"failure");
module_param(lacp_rate, charp, 0);
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
"0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
"0 for stable (default), 1 for bandwidth, "
"2 for count");
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
"2 for layer 2+3, 3 for encap layer 2+3, "
"4 for encap layer 3+4");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
module_param(arp_validate, charp, 0);
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
"0 for none (default), 1 for active, "
"2 for backup, 3 for all");
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
module_param(fail_over_mac, charp, 0);
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
"the same MAC; 0 for none (default), "
"1 for active, 2 for follow");
module_param(all_slaves_active, int, 0);
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
"by setting active flag for all slaves; "
"0 for never (default), 1 for always.");
module_param(resend_igmp, int, 0);
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
"link failure");
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
"mode; 0 for a random slave, 1 packet per "
"slave (default), >1 packets per slave.");
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
"the bonding driver sends learning packets to "
"each slaves peer switch. The default is 1.");
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
unsigned int bond_net_id __read_mostly;
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
static void bond_uninit(struct net_device *bond_dev);
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats);
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
/*---------------------------- General routines -----------------------------*/
const char *bond_mode_name(int mode)
{
static const char *names[] = {
[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
[BOND_MODE_XOR] = "load balancing (xor)",
[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
[BOND_MODE_TLB] = "transmit load balancing",
[BOND_MODE_ALB] = "adaptive load balancing",
};
if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
return "unknown";
return names[mode];
}
/*---------------------------------- VLAN -----------------------------------*/
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
* @bond: bond device that got this skb for tx.
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
if (unlikely(netpoll_tx_running(bond->dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
}
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
* We don't protect the slave list iteration with a lock because:
* a. This operation is performed in IOCTL context,
* b. The operation is protected by the RTNL semaphore in the 8021q code,
* c. Holding a lock with BH disabled while directly calling a base driver
* entry point is generally a BAD idea.
*
* The design of synchronization/protection for this operation in the 8021q
* module is good for one or more VLAN devices over a single physical device
* and cannot be extended for a teaming solution like bonding, so there is a
* potential race condition here where a net device from the vlan group might
* be referenced (either by a base driver or the 8021q code) while it is being
* removed from the system. However, it turns out we're not making matters
* worse, and if it works for regular VLAN usage it will work here too.
*/
/**
* bond_vlan_rx_add_vid - Propagates adding an id to slaves
* @bond_dev: bonding net device that got called
* @vid: vlan id being added
*/
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
struct list_head *iter;
int res;
bond_for_each_slave(bond, slave, iter) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
}
return 0;
unwind:
/* unwind to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
vlan_vid_del(rollback_slave->dev, proto, vid);
}
return res;
}
/**
* bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
* @bond_dev: bonding net device that got called
* @vid: vlan id being removed
*/
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
vlan_vid_del(slave->dev, proto, vid);
if (bond_is_lb(bond))
bond_alb_clear_vlan(bond, vid);
return 0;
}
/*------------------------------- Link status -------------------------------*/
/* Set the carrier state for the master according to the state of its
* slaves. If any slaves are up, the master is up. In 802.3ad mode,
* do special 802.3ad magic.
*
* Returns zero if carrier state does not change, nonzero if it does.
*/
int bond_set_carrier(struct bonding *bond)
{
struct list_head *iter;
struct slave *slave;
if (!bond_has_slaves(bond))
goto down;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
return 1;
}
return 0;
}
}
down:
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
return 1;
}
return 0;
}
/* Get link speed and duplex from the slave's base driver
* using ethtool. If for some reason the call fails or the
* values are invalid, set speed and duplex to -1,
* and return. Return 1 if speed or duplex settings are
* UNKNOWN; 0 otherwise.
*/
static int bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
struct ethtool_link_ksettings ecmd;
int res;
slave->speed = SPEED_UNKNOWN;
slave->duplex = DUPLEX_UNKNOWN;
res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
if (res < 0)
return 1;
if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
return 1;
switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
default:
return 1;
}
slave->speed = ecmd.base.speed;
slave->duplex = ecmd.base.duplex;
return 0;
}
const char *bond_slave_link_status(s8 link)
{
switch (link) {
case BOND_LINK_UP:
return "up";
case BOND_LINK_FAIL:
return "going down";
case BOND_LINK_DOWN:
return "down";
case BOND_LINK_BACK:
return "going back";
default:
return "unknown";
}
}
/* if <dev> supports MII link status reporting, check its link status.
*
* We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
* depending upon the setting of the use_carrier parameter.
*
* Return either BMSR_LSTATUS, meaning that the link is up (or we
* can't tell and just pretend it is), or 0, meaning that the link is
* down.
*
* If reporting is non-zero, instead of faking link up, return -1 if
* both ETHTOOL and MII ioctls fail (meaning the device does not
* support them). If use_carrier is set, return whatever it says.
* It'd be nice if there was a good way to tell if a driver supports
* netif_carrier, but there really isn't.
*/
static int bond_check_dev_link(struct bonding *bond,
struct net_device *slave_dev, int reporting)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
int (*ioctl)(struct net_device *, struct ifreq *, int);
struct ifreq ifr;
struct mii_ioctl_data *mii;
if (!reporting && !netif_running(slave_dev))
return 0;
if (bond->params.use_carrier)
return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
/* Try to get link status using Ethtool first. */
if (slave_dev->ethtool_ops->get_link)
return slave_dev->ethtool_ops->get_link(slave_dev) ?
BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
ioctl = slave_ops->ndo_do_ioctl;
if (ioctl) {
/* TODO: set pointer to correct ioctl on a per team member
* bases to make this more efficient. that is, once
* we determine the correct ioctl, we will always
* call it and not the others for that team
* member.
*/
/* We cannot assume that SIOCGMIIPHY will also read a
* register; not all network drivers (e.g., e100)
* support that.
*/
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
return mii->val_out & BMSR_LSTATUS;
}
}
/* If reporting, report that either there's no dev->do_ioctl,
* or both SIOCGMIIREG and get_link failed (meaning that we
* cannot report link status). If not reporting, pretend
* we're ok.
*/
return reporting ? -1 : BMSR_LSTATUS;
}
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
static int bond_set_promiscuity(struct bonding *bond, int inc)
{
struct list_head *iter;
int err = 0;
if (bond_uses_primary(bond)) {
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
if (curr_active)
err = dev_set_promiscuity(curr_active->dev, inc);
} else {
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
}
}
return err;
}
/* Push the allmulti flag down to all slaves */
static int bond_set_allmulti(struct bonding *bond, int inc)
{
struct list_head *iter;
int err = 0;
if (bond_uses_primary(bond)) {
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
if (curr_active)
err = dev_set_allmulti(curr_active->dev, inc);
} else {
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
}
}
return err;
}
/* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mcast_work.work);
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
rtnl_unlock();
}
/* Flush bond's hardware addresses from slave */
static void bond_hw_addr_flush(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
dev_mc_del(slave_dev, lacpdu_multicast);
}
}
/*--------------------------- Active slave change ---------------------------*/
/* Update the hardware address list and promisc/allmulti for the new and
* old active slaves (if any). Modes that are not using primary keep all
* slaves up date at all times; only the modes that use primary need to call
* this function to swap these settings during a failover.
*/
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
if (old_active) {
if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(old_active->dev, -1);
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
/* FIXME: Signal errors upstream. */
if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(new_active->dev, 1);
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
netif_addr_lock_bh(bond->dev);
dev_uc_sync(new_active->dev, bond->dev);
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
}
}
/**
* bond_set_dev_addr - clone slave's address to bond
* @bond_dev: bond net device
* @slave_dev: slave net device
*
* Should be called with RTNL held.
*/
static void bond_set_dev_addr(struct net_device *bond_dev,
struct net_device *slave_dev)
{
netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
static struct slave *bond_get_old_active(struct bonding *bond,
struct slave *new_active)
{
struct slave *slave;
struct list_head *iter;
bond_for_each_slave(bond, slave, iter) {
if (slave == new_active)
continue;
if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
return slave;
}
return NULL;
}
/* bond_do_fail_over_mac
*
* Perform special MAC address swapping for fail_over_mac settings
*
* Called with RTNL
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
{
u8 tmp_mac[MAX_ADDR_LEN];
struct sockaddr_storage ss;
int rv;
switch (bond->params.fail_over_mac) {
case BOND_FOM_ACTIVE:
if (new_active)
bond_set_dev_addr(bond->dev, new_active->dev);
break;
case BOND_FOM_FOLLOW:
/* if new_active && old_active, swap them
* if just old_active, do nothing (going to no active slave)
* if just new_active, set new_active to bond's MAC
*/
if (!new_active)
return;
if (!old_active)
old_active = bond_get_old_active(bond, new_active);
if (old_active) {
bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
new_active->dev->addr_len);
bond_hw_addr_copy(ss.__data,
old_active->dev->dev_addr,
old_active->dev->addr_len);
ss.ss_family = new_active->dev->type;
} else {
bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
}
rv = dev_set_mac_address(new_active->dev,
(struct sockaddr *)&ss);
if (rv) {
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
goto out;
}
if (!old_active)
goto out;
bond_hw_addr_copy(ss.__data, tmp_mac,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev,
(struct sockaddr *)&ss);
if (rv)
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
out:
break;
default:
netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
bond->params.fail_over_mac);
break;
}
}
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
if (!prim || prim->link != BOND_LINK_UP) {
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
}
if (bond->force_primary) {
bond->force_primary = false;
return prim;
}
if (!curr || curr->link != BOND_LINK_UP)
return prim;
/* At this point, prim and curr are both up */
switch (bond->params.primary_reselect) {
case BOND_PRI_RESELECT_ALWAYS:
return prim;
case BOND_PRI_RESELECT_BETTER:
if (prim->speed < curr->speed)
return curr;
if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
return curr;
return prim;
case BOND_PRI_RESELECT_FAILURE:
return curr;
default:
netdev_err(bond->dev, "impossible primary_reselect %d\n",
bond->params.primary_reselect);
return curr;
}
}
/**
* bond_find_best_slave - select the best available slave to be the active one
* @bond: our bonding struct
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
struct slave *slave, *bestslave = NULL;
struct list_head *iter;
int mintime = bond->params.updelay;
slave = bond_choose_primary_or_current(bond);
if (slave)
return slave;
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
return slave;
if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
slave->delay < mintime) {
mintime = slave->delay;
bestslave = slave;
}
}
return bestslave;
}
static bool bond_should_notify_peers(struct bonding *bond)
{
struct slave *slave;
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
rcu_read_unlock();
netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
!netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
return true;
}
/**
* change_active_interface - change the active slave into the specified one
* @bond: our bonding struct
* @new: the new slave to make the active one
*
* Set the new slave to the bond's settings and unset them on the old
* curr_active_slave.
* Setting include flags, mc-list, promiscuity, allmulti, etc.
*
* If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
* Caller must hold RTNL.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
struct slave *old_active;
ASSERT_RTNL();
old_active = rtnl_dereference(bond->curr_active_slave);
if (old_active == new_active)
return;
if (new_active) {
new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (bond_uses_primary(bond)) {
netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
}
new_active->delay = 0;
bond_set_slave_link_state(new_active, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (bond_uses_primary(bond)) {
netdev_info(bond->dev, "making interface %s the new active one\n",
new_active->dev->name);
}
}
}
if (bond_uses_primary(bond))
bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
if (old_active)
bond_set_slave_inactive_flags(old_active,
BOND_SLAVE_NOTIFY_NOW);
if (new_active)
bond_set_slave_active_flags(new_active,
BOND_SLAVE_NOTIFY_NOW);
} else {
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
bond_set_slave_inactive_flags(old_active,
BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
bool should_notify_peers = false;
bond_set_slave_active_flags(new_active,
BOND_SLAVE_NOTIFY_NOW);
if (bond->params.fail_over_mac)
bond_do_fail_over_mac(bond, new_active,
old_active);
if (netif_running(bond->dev)) {
bond->send_peer_notif =
bond->params.num_peer_notif;
should_notify_peers =
bond_should_notify_peers(bond);
}
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
}
}
/* resend IGMP joins since active slave has changed or
* all were sent on curr_active_slave.
* resend only if bond is brought up with the affected
* bonding modes and the retransmission is enabled
*/
if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
((bond_uses_primary(bond) && new_active) ||
BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
}
}
/**
* bond_select_active_slave - select a new active slave, if needed
* @bond: our bonding struct
*
* This functions should be called when one of the following occurs:
* - The old curr_active_slave has been released or lost its link.
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
* Caller must hold RTNL.
*/
void bond_select_active_slave(struct bonding *bond)
{
struct slave *best_slave;
int rv;
ASSERT_RTNL();
best_slave = bond_find_best_slave(bond);
if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
bond_change_active_slave(bond, best_slave);
rv = bond_set_carrier(bond);
if (!rv)
return;
if (netif_carrier_ok(bond->dev))
netdev_info(bond->dev, "first active interface up!\n");
else
netdev_info(bond->dev, "now running without any active interface!\n");
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline int slave_enable_netpoll(struct slave *slave)
{
struct netpoll *np;
int err = 0;
np = kzalloc(sizeof(*np), GFP_KERNEL);
err = -ENOMEM;
if (!np)
goto out;
err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
}
slave->np = np;
out:
return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
struct netpoll *np = slave->np;
if (!np)
return;
slave->np = NULL;
__netpoll_free_async(np);
}
static void bond_poll_controller(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
struct ad_info ad_info;
struct netpoll_info *ni;
const struct net_device_ops *ops;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
if (bond_3ad_get_active_agg_info(bond, &ad_info))
return;
bond_for_each_slave_rcu(bond, slave, iter) {
ops = slave->dev->netdev_ops;
if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
continue;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg =
SLAVE_AD_INFO(slave)->port.aggregator;
if (agg &&
agg->aggregator_identifier != ad_info.aggregator_id)
continue;
}
ni = rcu_dereference_bh(slave->dev->npinfo);
if (down_trylock(&ni->dev_lock))
continue;
ops->ndo_poll_controller(slave->dev);
up(&ni->dev_lock);
}
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
if (bond_slave_is_up(slave))
slave_disable_netpoll(slave);
}
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
struct slave *slave;
int err = 0;
bond_for_each_slave(bond, slave, iter) {
err = slave_enable_netpoll(slave);
if (err) {
bond_netpoll_cleanup(dev);
break;
}
}
return err;
}
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif
/*---------------------------------- IOCTL ----------------------------------*/
static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
netdev_features_t mask;
struct slave *slave;
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
}
features = netdev_add_tso_features(features, mask);
return features;
}
#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
static void bond_compute_features(struct bonding *bond)
{
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
struct net_device *bond_dev = bond->dev;
struct list_head *iter;
struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int gso_max_size = GSO_MAX_SIZE;
u16 gso_max_segs = GSO_MAX_SEGS;
if (!bond_has_slaves(bond))
goto done;
vlan_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
enc_features = netdev_increment_features(enc_features,
slave->dev->hw_enc_features,
BOND_ENC_FEATURES);
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
}
bond_dev->hard_header_len = max_hard_header_len;
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
netdev_change_features(bond_dev);
}
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
bond_dev->hard_header_len = slave_dev->hard_header_len;
bond_dev->addr_len = slave_dev->addr_len;
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
}
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for alb non-mcast/bcast.
*/
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
struct slave *slave,
struct bonding *bond)
{
if (bond_is_slave_inactive(slave)) {
if (BOND_MODE(bond) == BOND_MODE_ALB &&
skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return false;
return true;
}
return false;
}
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
*pskb = skb;
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
consume_skb(skb);
return ret;
}
}
/* don't change skb->dev for link-local packets */
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
return RX_HANDLER_PASS;
if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
skb->dev = bond->dev;
if (BOND_MODE(bond) == BOND_MODE_ALB &&
bond->dev->priv_flags & IFF_BRIDGE_PORT &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
skb->data - skb_mac_header(skb)))) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
bond->dev->addr_len);
}
return ret;
}
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
case BOND_MODE_ACTIVEBACKUP:
return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
case BOND_MODE_BROADCAST:
return NETDEV_LAG_TX_TYPE_BROADCAST;
case BOND_MODE_XOR:
case BOND_MODE_8023AD:
return NETDEV_LAG_TX_TYPE_HASH;
default:
return NETDEV_LAG_TX_TYPE_UNKNOWN;
}
}
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave)
{
struct netdev_lag_upper_info lag_upper_info;
int err;
lag_upper_info.tx_type = bond_lag_tx_type(bond);
err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
&lag_upper_info);
if (err)
return err;
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
return 0;
}
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
{
netdev_upper_dev_unlink(slave->dev, bond->dev);
slave->dev->flags &= ~IFF_SLAVE;
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
}
static struct slave *bond_alloc_slave(struct bonding *bond)
{
struct slave *slave = NULL;
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave)
return NULL;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
GFP_KERNEL);
if (!SLAVE_AD_INFO(slave)) {
kfree(slave);
return NULL;
}
}
return slave;
}
static void bond_free_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
kfree(slave);
}
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
info->bond_mode = BOND_MODE(bond);
info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt;
}
static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
info->state = bond_slave_state(slave);
info->link_failure_count = slave->link_failure_count;
}
static void bond_netdev_notify(struct net_device *dev,
struct netdev_bonding_info *info)
{
rtnl_lock();
netdev_bonding_info_change(dev, info);
rtnl_unlock();
}
static void bond_netdev_notify_work(struct work_struct *_work)
{
struct netdev_notify_work *w =
container_of(_work, struct netdev_notify_work, work.work);
bond_netdev_notify(w->dev, &w->bonding_info);
dev_put(w->dev);
kfree(w);
}
void bond_queue_slave_event(struct slave *slave)
{
struct bonding *bond = slave->bond;
struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
if (!nnw)
return;
dev_hold(slave->dev);
nnw->dev = slave->dev;
bond_fill_ifslave(slave, &nnw->bonding_info.slave);
bond_fill_ifbond(bond, &nnw->bonding_info.master);
INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
queue_delayed_work(slave->bond->wq, &nnw->work, 0);
}
void bond_lower_state_changed(struct slave *slave)
{
struct netdev_lag_lower_state_info info;
info.link_up = slave->link == BOND_LINK_UP ||
slave->link == BOND_LINK_FAIL;
info.tx_enabled = bond_is_active_slave(slave);
netdev_lower_state_changed(slave->dev, &info);
}
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
int link_reporting;
int res = 0, i;
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
netdev_warn(bond_dev, "no link monitoring support for %s\n",
slave_dev->name);
}
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
netdev_err(bond_dev,
"Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}
if (bond_dev == slave_dev) {
netdev_err(bond_dev, "cannot enslave bond to itself.\n");
return -EPERM;
}
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
slave_dev->name);
if (vlan_uses_dev(bond_dev)) {
netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
slave_dev->name, bond_dev->name);
return -EPERM;
} else {
netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
slave_dev->name, slave_dev->name,
bond_dev->name);
}
} else {
netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
slave_dev->name);
}
/* Old ifenslave binaries are no longer supported. These can
* be identified with moderate accuracy by the state of the slave:
* the current ifenslave will set the interface down prior to
* enslaving it; the old ifenslave will not.
*/
if (slave_dev->flags & IFF_UP) {
netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
return -EPERM;
}
/* set bonding device ether type by slave - bonding netdevices are
* created with ether_setup, so when the slave type is not ARPHRD_ETHER
* there is a need to override some of the type dependent attribs/funcs.
*
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
netdev_dbg(bond_dev, "change device type from %d to %d\n",
bond_dev->type, slave_dev->type);
res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
bond_dev);
res = notifier_to_errno(res);
if (res) {
netdev_err(bond_dev, "refused to change device type\n");
return -EBUSY;
}
/* Flush unicast and multicast addresses */
dev_uc_flush(bond_dev);
dev_mc_flush(bond_dev);
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
else {
ether_setup(bond_dev);
bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
}
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
slave_dev->name, slave_dev->type, bond_dev->type);
return -EINVAL;
}
if (slave_dev->type == ARPHRD_INFINIBAND &&
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
slave_dev->type);
res = -EOPNOTSUPP;
goto err_undo_flags;
}
if (!slave_ops->ndo_set_mac_address ||
slave_dev->type == ARPHRD_INFINIBAND) {
netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
if (!bond_has_slaves(bond)) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
} else {
netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
}
}
call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's.
*/
if (!bond_has_slaves(bond) &&
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
new_slave = bond_alloc_slave(bond);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
new_slave->bond = bond;
new_slave->dev = slave_dev;
/* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
*/
new_slave->queue_id = 0;
/* Save slave's original mtu and then set it to match the bond */
new_slave->original_mtu = slave_dev->mtu;
res = dev_set_mtu(slave_dev, bond->dev->mtu);
if (res) {
netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
goto err_free;
}
/* Save slave's original ("permanent") mac address for modes
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
slave_dev->addr_len);
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
ss.ss_family = slave_dev->type;
res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
if (res) {
netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
goto err_restore_mtu;
}
}
/* set slave flag before open to prevent IPv6 addrconf */
slave_dev->flags |= IFF_SLAVE;
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
goto err_restore_mac;
}
slave_dev->priv_flags |= IFF_BONDING;
/* initialize slave stats */
dev_get_stats(new_slave->dev, &new_slave->slave_stats);
if (bond_is_lb(bond)) {
/* bond_alb_init_slave() must be called before all other stages since
* it might fail and we do not want to have to undo everything
*/
res = bond_alb_init_slave(bond, new_slave);
if (res)
goto err_close;
}
/* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
if (!bond_uses_primary(bond)) {
/* set promiscuity level to new slave */
if (bond_dev->flags & IFF_PROMISC) {
res = dev_set_promiscuity(slave_dev, 1);
if (res)
goto err_close;
}
/* set allmulti level to new slave */
if (bond_dev->flags & IFF_ALLMULTI) {
res = dev_set_allmulti(slave_dev, 1);
if (res)
goto err_close;
}
netif_addr_lock_bh(bond_dev);
dev_mc_sync_multiple(slave_dev, bond_dev);
dev_uc_sync_multiple(slave_dev, bond_dev);
netif_addr_unlock_bh(bond_dev);
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
dev_mc_add(slave_dev, lacpdu_multicast);
}
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
slave_dev->name);
goto err_close;
}
prev_slave = bond_last_slave(bond);
new_slave->delay = 0;
new_slave->link_failure_count = 0;
if (bond_update_speed_duplex(new_slave))
new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
if ((link_reporting == -1) && !bond->params.arp_interval) {
/* miimon is set but a bonded network driver
* does not support ETHTOOL/MII and
* arp_interval is not set. Note: if
* use_carrier is enabled, we will never go
* here (because netif_carrier is always
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
slave_dev->name);
}
}
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_NOW);
new_slave->delay = bond->params.updelay;
} else {
bond_set_slave_link_state(new_slave,
BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
}
} else {
bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
}
} else if (bond->params.arp_interval) {
bond_set_slave_link_state(new_slave,
(netif_carrier_ok(slave_dev) ?
BOND_LINK_UP : BOND_LINK_DOWN),
BOND_SLAVE_NOTIFY_NOW);
} else {
bond_set_slave_link_state(new_slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
}
if (new_slave->link != BOND_LINK_DOWN)
new_slave->last_link_up = jiffies;
netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
new_slave->link == BOND_LINK_DOWN ? "DOWN" :
(new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (bond_uses_primary(bond) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
rcu_assign_pointer(bond->primary_slave, new_slave);
bond->force_primary = true;
}
}
switch (BOND_MODE(bond)) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave,
BOND_SLAVE_NOTIFY_NOW);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
* will activate the slaves in the selected
* aggregator
*/
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
SLAVE_AD_INFO(new_slave)->id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
}
bond_3ad_bind_slave(new_slave);
break;
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
/* always active in trunk mode */
bond_set_active_slave(new_slave);
/* In trunking mode there is little meaning to curr_active_slave
* anyway (it holds no special properties of the bond device),
* so we can change it without calling change_active_interface()
*/
if (!rcu_access_pointer(bond->curr_active_slave) &&
new_slave->link == BOND_LINK_UP)
rcu_assign_pointer(bond->curr_active_slave, new_slave);
break;
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
goto err_detach;
}
}
#endif
if (!(bond_dev->features & NETIF_F_LRO))
dev_disable_lro(slave_dev);
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
goto err_detach;
}
res = bond_master_upper_dev_link(bond, new_slave);
if (res) {
netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
goto err_unregister;
}
res = bond_sysfs_slave_add(new_slave);
if (res) {
netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
goto err_upper_unlink;
}
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
if (bond_uses_primary(bond)) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
slave_dev->name,
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
/* Undo stages on error */
err_upper_unlink:
bond_upper_dev_unlink(bond, new_slave);
err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
if (!bond_uses_primary(bond))
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
block_netpoll_tx();
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
/* either primary_slave or curr_active_slave might've changed */
synchronize_rcu();
slave_disable_netpoll(new_slave);
err_close:
slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_restore_mac:
slave_dev->flags &= ~IFF_SLAVE;
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
}
err_restore_mtu:
dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free:
bond_free_slave(new_slave);
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
if (!bond_has_slaves(bond)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr,
slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
if (bond_dev->type != ARPHRD_ETHER) {
dev_close(bond_dev);
ether_setup(bond_dev);
bond_dev->flags |= IFF_MASTER;
bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
}
}
return res;
}
/* Try to release the slave device <slave> from the bond device <master>
* It is legal to access curr_active_slave without a lock because all the function
* is RTNL-locked. If "all" is true it means that the function is being called
* while destroying a bond interface and all slaves are being released.
*
* The rules for slave state should be:
* for Active/Backup:
* Active stays on all backups go down
* for Bonded connections:
* The first up interface should be left on and all others downed.
*/
static int __bond_release_one(struct net_device *bond_dev,
struct net_device *slave_dev,
bool all, bool unregister)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
netdev_dbg(bond_dev, "cannot release %s\n",
slave_dev->name);
return -EINVAL;
}
block_netpoll_tx();
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
netdev_info(bond_dev, "%s not enslaved\n",
slave_dev->name);
unblock_netpoll_tx();
return -EINVAL;
}
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
bond_get_stats(bond->dev, &bond->bond_stats);
bond_upper_dev_unlink(bond, slave);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
netdev_rx_handler_unregister(slave_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, slave);
netdev_info(bond_dev, "Releasing %s interface %s\n",
bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
oldcurrent = rcu_access_pointer(bond->curr_active_slave);
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
if (!all && (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
slave_dev->name, slave->perm_hwaddr,
bond_dev->name, slave_dev->name);
}
if (rtnl_dereference(bond->primary_slave) == slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
* detached from the list and the curr_active_slave
* has been cleared (if our_slave == old_current),
* but before a new active slave is selected.
*/
bond_alb_deinit_slave(bond, slave);
}
if (all) {
RCU_INIT_POINTER(bond->curr_active_slave, NULL);
} else if (oldcurrent == slave) {
/* Note that we hold RTNL over this sequence, so there
* is no concern that another slave add/remove event
* will interfere.
*/
bond_select_active_slave(bond);
}
if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
}
unblock_netpoll_tx();
synchronize_rcu();
bond->slave_cnt--;
if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
slave_dev->name, bond_dev->name);
vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode uses primary, then this case was handled above by
* bond_change_active_slave(..., NULL)
*/
if (!bond_uses_primary(bond)) {
/* unset promiscuity level from slave
* NOTE: The NETDEV_CHANGEADDR call above may change the value
* of the IFF_PROMISC flag in the bond_dev, but we need the
* value of that flag before that change, as that was the value
* when this slave was attached, so we cache at the start of the
* function and use it here. Same goes for ALLMULTI below
*/
if (old_flags & IFF_PROMISC)
dev_set_promiscuity(slave_dev, -1);
/* unset allmulti level from slave */
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
}
if (unregister)
__dev_set_mtu(slave_dev, slave->original_mtu);
else
dev_set_mtu(slave_dev, slave->original_mtu);
slave_dev->priv_flags &= ~IFF_BONDING;
bond_free_slave(slave);
return 0;
}
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
return __bond_release_one(bond_dev, slave_dev, false, false);
}
/* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
*/
static int bond_release_and_destroy(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int ret;
ret = __bond_release_one(bond_dev, slave_dev, false, true);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond %s\n",
bond_dev->name);
bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
return ret;
}
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
bond_fill_ifbond(bond, info);
}
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
int i = 0, res = -ENODEV;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
bond_fill_ifslave(slave, info);
break;
}
}
return res;
}
/*-------------------------------- Monitoring -------------------------------*/
/* called with rcu_read_lock() */
static int bond_miimon_inspect(struct bonding *bond)
{
int link_state, commit = 0;
struct list_head *iter;
struct slave *slave;
bool ignore_updelay;
ignore_updelay = !rcu_dereference(bond->curr_active_slave);
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
switch (slave->link) {
case BOND_LINK_UP:
if (link_state)
continue;
bond_propose_link_state(slave, BOND_LINK_FAIL);
slave->delay = bond->params.downdelay;
if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
(bond_is_active_slave(slave) ?
"active " : "backup ") : "",
slave->dev->name,
bond->params.downdelay * bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
slave->dev->name);
commit++;
continue;
}
if (slave->delay <= 0) {
slave->new_link = BOND_LINK_DOWN;
commit++;
continue;
}
slave->delay--;
break;
case BOND_LINK_DOWN:
if (!link_state)
continue;
bond_propose_link_state(slave, BOND_LINK_BACK);
slave->delay = bond->params.updelay;
if (slave->delay) {
netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
slave->dev->name,
ignore_updelay ? 0 :
bond->params.updelay *
bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
(bond->params.updelay - slave->delay) *
bond->params.miimon,
slave->dev->name);
commit++;
continue;
}
if (ignore_updelay)
slave->delay = 0;
if (slave->delay <= 0) {
slave->new_link = BOND_LINK_UP;
commit++;
ignore_updelay = false;
continue;
}
slave->delay--;
break;
}
}
return commit;
}
static void bond_miimon_commit(struct bonding *bond)
{
struct list_head *iter;
struct slave *slave, *primary;
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
case BOND_LINK_UP:
if (bond_update_speed_duplex(slave)) {
slave->link = BOND_LINK_DOWN;
netdev_warn(bond->dev,
"failed to get link speed/duplex for %s\n",
slave->dev->name);
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
slave->last_link_up = jiffies;
primary = rtnl_dereference(bond->primary_slave);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
} else if (slave != primary) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
}
netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
slave->dev->name,
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave, BOND_LINK_UP);
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, slave,
BOND_LINK_UP);
if (BOND_MODE(bond) == BOND_MODE_XOR)
bond_update_slave_arr(bond, NULL);
if (!bond->curr_active_slave || slave == primary)
goto do_failover;
continue;
case BOND_LINK_DOWN:
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
BOND_MODE(bond) == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
slave->dev->name);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave,
BOND_LINK_DOWN);
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, slave,
BOND_LINK_DOWN);
if (BOND_MODE(bond) == BOND_MODE_XOR)
bond_update_slave_arr(bond, NULL);
if (slave == rcu_access_pointer(bond->curr_active_slave))
goto do_failover;
continue;
default:
netdev_err(bond->dev, "invalid new link %d on slave %s\n",
slave->new_link, slave->dev->name);
slave->new_link = BOND_LINK_NOCHANGE;
continue;
}
do_failover:
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
bond_set_carrier(bond);
}
/* bond_mii_monitor
*
* Really a wrapper that splits the mii monitor into two phases: an
* inspection, then (if inspection indicates something needs to be done)
* an acquisition of appropriate locks followed by a commit phase to
* implement whatever link state changes are indicated.
*/
static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
unsigned long delay;
struct slave *slave;
struct list_head *iter;
delay = msecs_to_jiffies(bond->params.miimon);
if (!bond_has_slaves(bond))
goto re_arm;
rcu_read_lock();
should_notify_peers = bond_should_notify_peers(bond);
if (bond_miimon_inspect(bond)) {
rcu_read_unlock();
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
should_notify_peers = false;
goto re_arm;
}
bond_for_each_slave(bond, slave, iter) {
bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
}
bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
} else
rcu_read_unlock();
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
if (should_notify_peers) {
if (!rtnl_trylock())
return;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
}
static int bond_upper_dev_walk(struct net_device *upper, void *data)
{
__be32 ip = *((__be32 *)data);
return ip == bond_confirm_addr(upper, 0, ip);
}
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
{
bool ret = false;
if (ip == bond_confirm_addr(bond->dev, 0, ip))
return true;
rcu_read_lock();
if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip))
ret = true;
rcu_read_unlock();
return ret;
}
/* We go to the (large) trouble of VLAN tagging ARP frames because
* switches in VLAN mode (especially if ports are configured as
* "native" to a VLAN) might not pass non-tagged frames.
*/
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
__be32 dest_ip, __be32 src_ip,
struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
struct bond_vlan_tag *outer_tag = tags;
netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
arp_op, slave_dev->name, &dest_ip, &src_ip);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
net_err_ratelimited("ARP packet allocation failed\n");
return;
}
if (!tags || tags->vlan_proto == VLAN_N_VID)
goto xmit;
tags++;
/* Go through all the tags backwards and add them to the packet */
while (tags->vlan_proto != VLAN_N_VID) {
if (!tags->vlan_id) {
tags++;
continue;
}
netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), tags->vlan_id);
skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
return;
}
tags++;
}
/* Set the outer tag */
if (outer_tag->vlan_id) {
netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
outer_tag->vlan_id);
}
xmit:
arp_xmit(skb);
}
/* Validate the device path between the @start_dev and the @end_dev.
* The path is valid if the @end_dev is reachable through device
* stacking.
* When the path is validated, collect any vlan information in the
* path.
*/
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
struct net_device *end_dev,
int level)
{
struct bond_vlan_tag *tags;
struct net_device *upper;
struct list_head *iter;
if (start_dev == end_dev) {
tags = kzalloc(sizeof(*tags) * (level + 1), GFP_ATOMIC);
if (!tags)
return ERR_PTR(-ENOMEM);
tags[level].vlan_proto = VLAN_N_VID;
return tags;
}
netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
tags = bond_verify_device_path(upper, end_dev, level + 1);
if (IS_ERR_OR_NULL(tags)) {
if (IS_ERR(tags))
return tags;
continue;
}
if (is_vlan_dev(upper)) {
tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
tags[level].vlan_id = vlan_dev_vlan_id(upper);
}
return tags;
}
return NULL;
}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
struct rtable *rt;
struct bond_vlan_tag *tags;
__be32 *targets = bond->params.arp_targets, addr;
int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
tags = NULL;
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
*/
if (bond->params.arp_validate)
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
0, tags);
continue;
}
/* bond device itself */
if (rt->dst.dev == bond->dev)
goto found;
rcu_read_lock();
tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
rcu_read_unlock();
if (!IS_ERR_OR_NULL(tags))
goto found;
/* Not our device - skip */
netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
&targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
ip_rt_put(rt);
continue;
found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
addr, tags);
kfree(tags);
}
}
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
{
int i;
if (!sip || !bond_has_this_ip(bond, tip)) {
netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
&sip, &tip);
return;
}
i = bond_get_targets_ip(bond->params.arp_targets, sip);
if (i == -1) {
netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
&sip);
return;
}
slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
if (!slave_do_arp_validate(bond, slave)) {
if ((slave_do_arp_validate_only(bond) && is_arp) ||
!slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
} else if (!is_arp) {
return RX_HANDLER_ANOTHER;
}
alen = arp_hdr_len(bond->dev);
netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
skb->dev->name);
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
goto out_unlock;
if (skb_copy_bits(skb, 0, arp, alen) < 0)
goto out_unlock;
}
if (arp->ar_hln != bond->dev->addr_len ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
arp->ar_hrd != htons(ARPHRD_ETHER) ||
arp->ar_pro != htons(ETH_P_IP) ||
arp->ar_pln != 4)
goto out_unlock;
arp_ptr = (unsigned char *)(arp + 1);
arp_ptr += bond->dev->addr_len;
memcpy(&sip, arp_ptr, 4);
arp_ptr += 4 + bond->dev->addr_len;
memcpy(&tip, arp_ptr, 4);
netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
slave->dev->name, bond_slave_state(slave),
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
curr_arp_slave = rcu_dereference(bond->current_arp_slave);
/* We 'trust' the received ARP enough to validate it if:
*
* (a) the slave receiving the ARP is active (which includes the
* current ARP slave, if any), or
*
* (b) the receiving slave isn't active, but there is a currently
* active slave and it received valid arp reply(s) after it became
* the currently active slave, or
*
* (c) there is an ARP slave that sent an ARP during the prior ARP
* interval, and we receive an ARP reply on any slave. We accept
* these because switch FDB update delays may deliver the ARP
* reply to a slave other than the sender of the ARP request.
*
* Note: for (b), backup slaves are receiving the broadcast ARP
* request, not a reply. This request passes from the sending
* slave through the L2 switch(es) to the receiving slave. Since
* this is checking the request, sip/tip are swapped for
* validation.
*
* This is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
bond_time_in_interval(bond,
dev_trans_start(curr_arp_slave->dev), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
if (arp != (struct arphdr *)skb->data)
kfree(arp);
return RX_HANDLER_ANOTHER;
}
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
*/
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod)
{
int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
return time_in_range(jiffies,
last_act - delta_in_ticks,
last_act + mod * delta_in_ticks + delta_in_ticks/2);
}
/* This function is called regularly to monitor each slave's link
* ensuring that traffic is being sent and received when arp monitoring
* is used in load-balancing mode. if the adapter has been dormant, then an
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
static void bond_loadbalance_arp_mon(struct bonding *bond)
{
struct slave *slave, *oldcurrent;
struct list_head *iter;
int do_failover = 0, slave_state_changed = 0;
if (!bond_has_slaves(bond))
goto re_arm;
rcu_read_lock();
oldcurrent = rcu_dereference(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->last_link_up is not
* needed here because we send an arp on each slave and give a slave
* as long as it needs to get the tx/rx within the delta.
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->new_link = BOND_LINK_UP;
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
* mode. the window of a slave being up and
* curr_active_slave being null after enslaving
* is closed.
*/
if (!oldcurrent) {
netdev_info(bond->dev, "link status definitely up for interface %s\n",
slave->dev->name);
do_failover = 1;
} else {
netdev_info(bond->dev, "interface %s is now up\n",
slave->dev->name);
}
}
} else {
/* slave->link == BOND_LINK_UP */
/* not all switches will respond to an arp request
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
netdev_info(bond->dev, "interface %s is now down\n",
slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
}
}
/* note: if switch is in round-robin mode, all links
* must tx arp to ensure all links rx an arp - otherwise
* links may oscillate or not come up at all; if switch is
* in something like xor mode, there is nothing we can
* do - all replies will be rx'ed on same link causing slaves
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
bond_arp_send_all(bond, slave);
}
rcu_read_unlock();
if (do_failover || slave_state_changed) {
if (!rtnl_trylock())
goto re_arm;
bond_for_each_slave(bond, slave, iter) {
if (slave->new_link != BOND_LINK_NOCHANGE)
slave->link = slave->new_link;
}
if (slave_state_changed) {
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
bond_update_slave_arr(bond, NULL);
}
if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
rtnl_unlock();
}
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work,
msecs_to_jiffies(bond->params.arp_interval));
}
/* Called to inspect slaves for active-backup mode ARP monitor link state
* changes. Sets new_link in slaves to specify what action should take
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
*
* Called with rcu_read_lock held.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
unsigned long trans_start, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_rx, 1)) {
slave->new_link = BOND_LINK_UP;
commit++;
}
continue;
}
/* Give slaves 2*delta after being enslaved or made
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
if (bond_time_in_interval(bond, slave->last_link_up, 2))
continue;
/* Backup slave is down if:
* - No current_arp_slave AND
* - more than 3*delta since last receive AND
* - the bond has an IP address
*
* Note: a non-null current_arp_slave indicates
* the curr_active_slave went down and we are
* searching for a new one; under this condition
* we only take the curr_active_slave down - this
* gives each slave a chance to tx/rx traffic
* before being taken out
*/
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
!bond_time_in_interval(bond, last_rx, 3)) {
slave->new_link = BOND_LINK_DOWN;
commit++;
}
/* Active slave is down if:
* - more than 2*delta since transmitting OR
* - (more than 2*delta since receive AND
* the bond has an IP address)
*/
trans_start = dev_trans_start(slave->dev);
if (bond_is_active_slave(slave) &&
(!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, last_rx, 2))) {
slave->new_link = BOND_LINK_DOWN;
commit++;
}
}
return commit;
}
/* Called to commit link state changes noted by inspection step of
* active-backup mode ARP monitor.
*
* Called with RTNL hold.
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
unsigned long trans_start;
struct list_head *iter;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
case BOND_LINK_UP:
trans_start = dev_trans_start(slave->dev);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
bond_time_in_interval(bond, trans_start, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
if (current_arp_slave) {
bond_set_slave_inactive_flags(
current_arp_slave,
BOND_SLAVE_NOTIFY_NOW);
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
}
netdev_info(bond->dev, "link status definitely up for interface %s\n",
slave->dev->name);
if (!rtnl_dereference(bond->curr_active_slave) ||
slave == rtnl_dereference(bond->primary_slave))
goto do_failover;
}
continue;
case BOND_LINK_DOWN:
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
slave->dev->name);
if (slave == rtnl_dereference(bond->curr_active_slave)) {
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
goto do_failover;
}
continue;
default:
netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
slave->new_link, slave->dev->name);
continue;
}
do_failover:
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
bond_set_carrier(bond);
}
/* Send ARP probes for active-backup mode ARP monitor.
*
* Called with rcu_read_lock held.
*/
static bool bond_ab_arp_probe(struct bonding *bond)
{
struct slave *slave, *before = NULL, *new_slave = NULL,
*curr_arp_slave = rcu_dereference(bond->current_arp_slave),
*curr_active_slave = rcu_dereference(bond->curr_active_slave);
struct list_head *iter;
bool found = false;
bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
if (curr_arp_slave && curr_active_slave)
netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
curr_arp_slave->dev->name,
curr_active_slave->dev->name);
if (curr_active_slave) {
bond_arp_send_all(bond, curr_active_slave);
return should_notify_rtnl;
}
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
if (!curr_arp_slave) {
curr_arp_slave = bond_first_slave_rcu(bond);
if (!curr_arp_slave)
return should_notify_rtnl;
}
bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && bond_slave_is_up(slave))
before = slave;
if (found && !new_slave && bond_slave_is_up(slave))
new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
* simultaneous link failures and
* reselect_active_interface doesn't make this
* one the current slave so it is still marked
* up when it is actually down
*/
if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_LATER);
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
netdev_info(bond->dev, "backup interface %s is now down\n",
slave->dev->name);
}
if (slave == curr_arp_slave)
found = true;
}
if (!new_slave && before)
new_slave = before;
if (!new_slave)
goto check_state;
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
check_state:
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->should_notify || slave->should_notify_link) {
should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
break;
}
}
return should_notify_rtnl;
}
static void bond_activebackup_arp_mon(struct bonding *bond)
{
bool should_notify_peers = false;
bool should_notify_rtnl = false;
int delta_in_ticks;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (!bond_has_slaves(bond))
goto re_arm;
rcu_read_lock();
should_notify_peers = bond_should_notify_peers(bond);
if (bond_ab_arp_inspect(bond)) {
rcu_read_unlock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
bond_ab_arp_commit(bond);
rtnl_unlock();
rcu_read_lock();
}
should_notify_rtnl = bond_ab_arp_probe(bond);
rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
if (should_notify_peers || should_notify_rtnl) {
if (!rtnl_trylock())
return;
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
}
rtnl_unlock();
}
}
static void bond_arp_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_activebackup_arp_mon(bond);
else
bond_loadbalance_arp_mon(bond);
}
/*-------------------------- netdev event handling --------------------------*/
/* Change device name */
static int bond_event_changename(struct bonding *bond)
{
bond_remove_proc_entry(bond);
bond_create_proc_entry(bond);
bond_debug_reregister(bond);
return NOTIFY_DONE;
}
static int bond_master_netdev_event(unsigned long event,
struct net_device *bond_dev)
{
struct bonding *event_bond = netdev_priv(bond_dev);
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
case NETDEV_NOTIFY_PEERS:
if (event_bond->send_peer_notif)
event_bond->send_peer_notif--;
break;
default:
break;
}
return NOTIFY_DONE;
}
static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
struct bonding *bond;
struct net_device *bond_dev;
/* A netdev event can be generated while enslaving a device
* before netdev_rx_handler_register is called in which case
* slave will be NULL
*/
if (!slave)
return NOTIFY_DONE;
bond_dev = slave->bond->dev;
bond = slave->bond;
primary = rtnl_dereference(bond->primary_slave);
switch (event) {
case NETDEV_UNREGISTER:
if (bond_dev->type != ARPHRD_ETHER)
bond_release_and_destroy(bond_dev, slave_dev);
else
__bond_release_one(bond_dev, slave_dev, false, true);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
bond_update_speed_duplex(slave);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
/* Fallthrough */
case NETDEV_DOWN:
/* Refresh slave-array if applicable!
* If the setup does not use miimon or arpmon (mode-specific!),
* then these events will not cause the slave-array to be
* refreshed. This will cause xmit to use a slave that is not
* usable. Avoid such situation by refeshing the array at these
* events. If these (miimon/arpmon) parameters are configured
* then array gets refreshed twice and that should be fine!
*/
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
break;
case NETDEV_CHANGEMTU:
/* TODO: Should slaves be allowed to
* independently alter their MTU? For
* an active-backup bond, slaves need
* not be the same type of device, so
* MTUs may vary. For other modes,
* slaves arguably should have the
* same MTUs. To do this, we'd need to
* take over the slave's change_mtu
* function for the duration of their
* servitude.
*/
break;
case NETDEV_CHANGENAME:
/* we don't care if we don't have primary set */
if (!bond_uses_primary(bond) ||
!bond->params.primary[0])
break;
if (slave == primary) {
/* slave's name changed - he's no longer primary */
RCU_INIT_POINTER(bond->primary_slave, NULL);
} else if (!strcmp(slave_dev->name, bond->params.primary)) {
/* we have a new primary slave */
rcu_assign_pointer(bond->primary_slave, slave);
} else { /* we didn't change primary - exit */
break;
}
netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
primary ? slave_dev->name : "none");
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, slave->bond->dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
/* bond_netdev_event: handle netdev notifier chain events.
*
* This function receives events for the netdev chain. The caller (an
* ioctl handler calling blocking_notifier_call_chain) holds the necessary
* locks for us to safely manipulate the slave devices (RTNL lock,
* dev_probe_lock).
*/
static int bond_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
netdev_dbg(event_dev, "event: %lx\n", event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
netdev_dbg(event_dev, "IFF_MASTER\n");
return bond_master_netdev_event(event, event_dev);
}
if (event_dev->flags & IFF_SLAVE) {
netdev_dbg(event_dev, "IFF_SLAVE\n");
return bond_slave_netdev_event(event, event_dev);
}
return NOTIFY_DONE;
}
static struct notifier_block bond_netdev_notifier = {
.notifier_call = bond_netdev_event,
};
/*---------------------------- Hashing Policies -----------------------------*/
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb)
{
struct ethhdr *ep, hdr_tmp;
ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
if (ep)
return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
return 0;
}
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
struct flow_keys *fk)
{
const struct ipv6hdr *iph6;
const struct iphdr *iph;
int noff, proto = -1;
if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
return skb_flow_dissect_flow_keys(skb, fk, 0);
fk->ports.ports = 0;
noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
return false;
iph = ip_hdr(skb);
iph_to_flow_copy_v4addrs(fk, iph);
noff += iph->ihl << 2;
if (!ip_is_fragment(iph))
proto = iph->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
return false;
iph6 = ipv6_hdr(skb);
iph_to_flow_copy_v6addrs(fk, iph6);
noff += sizeof(*iph6);
proto = iph6->nexthdr;
} else {
return false;
}
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
return true;
}
/**
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
* @skb: buffer to use for headers
*
* This function will extract the necessary headers from the skb buffer and use
* them to generate a hash based on the xmit_policy set in the bonding device
*/
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
skb->l4_hash)
return skb->hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
hash = bond_eth_hash(skb);
else
hash = (__force u32)flow.ports.ports;
hash ^= (__force u32)flow_get_u32_dst(&flow) ^
(__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
return hash;
}
/*-------------------------- Device entry points ----------------------------*/
void bond_work_init_all(struct bonding *bond)
{
INIT_DELAYED_WORK(&bond->mcast_work,
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
static void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
cancel_delayed_work_sync(&bond->alb_work);
cancel_delayed_work_sync(&bond->ad_work);
cancel_delayed_work_sync(&bond->mcast_work);
cancel_delayed_work_sync(&bond->slave_arr_work);
}
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
if (bond_uses_primary(bond) &&
slave != rcu_access_pointer(bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
bond_set_slave_active_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
}
}
}
if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer
* is started.
*/
if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
return -ENOMEM;
if (bond->params.tlb_dynamic_lb)
queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
if (bond->params.miimon) /* link check interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->mii_work, 0);
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
bond->recv_probe = bond_arp_rcv;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
}
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
return 0;
}
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
if (bond_is_lb(bond))
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
return 0;
}
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
* that some drivers can provide 32bit values only.
*/
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
const struct rtnl_link_stats64 *_new,
const struct rtnl_link_stats64 *_old)
{
const u64 *new = (const u64 *)_new;
const u64 *old = (const u64 *)_old;
u64 *res = (u64 *)_res;
int i;
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
s64 delta = nv - ov;
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
delta = (s64)(s32)((u32)nv - (u32)ov);
/* filter anomalies, some drivers reset their stats
* at down/up events.
*/
if (delta > 0)
res[i] += delta;
}
}
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
struct list_head *iter;
struct slave *slave;
spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
bond_fold_stats(stats, new, &slave->slave_stats);
/* save off the slave stats for the next run */
memcpy(&slave->slave_stats, new, sizeof(*new));
}
rcu_read_unlock();
memcpy(&bond->bond_stats, stats, sizeof(*stats));
spin_unlock(&bond->stats_lock);
}
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = NULL;
struct ifbond k_binfo;
struct ifbond __user *u_binfo = NULL;
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
struct bond_opt_value newval;
struct net *net;
int res = 0;
netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
switch (cmd) {
case SIOCGMIIPHY:
mii = if_mii(ifr);
if (!mii)
return -EINVAL;
mii->phy_id = 0;
/* Fall Through */
case SIOCGMIIREG:
/* We do this again just in case we were called by SIOCGMIIREG
* instead of SIOCGMIIPHY.
*/
mii = if_mii(ifr);
if (!mii)
return -EINVAL;
if (mii->reg_num == 1) {
mii->val_out = 0;
if (netif_carrier_ok(bond->dev))
mii->val_out = BMSR_LSTATUS;
}
return 0;
case BOND_INFO_QUERY_OLD:
case SIOCBONDINFOQUERY:
u_binfo = (struct ifbond __user *)ifr->ifr_data;
if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
return -EFAULT;
bond_info_query(bond_dev, &k_binfo);
if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
return -EFAULT;
return 0;
case BOND_SLAVE_INFO_QUERY_OLD:
case SIOCBONDSLAVEINFOQUERY:
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
return -EFAULT;
res = bond_slave_info_query(bond_dev, &k_sinfo);
if (res == 0 &&
copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
return -EFAULT;
return res;
default:
break;
}
net = dev_net(bond_dev);
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
if (!slave_dev)
return -ENODEV;
netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
switch (cmd) {
case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
res = bond_enslave(bond_dev, slave_dev);
break;
case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
bond_set_dev_addr(bond_dev, slave_dev);
res = 0;
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
bond_opt_initstr(&newval, slave_dev->name);
res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
&newval);
break;
default:
res = -EOPNOTSUPP;
}
return res;
}
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
{
struct bonding *bond = netdev_priv(bond_dev);
if (change & IFF_PROMISC)
bond_set_promiscuity(bond,
bond_dev->flags & IFF_PROMISC ? 1 : -1);
if (change & IFF_ALLMULTI)
bond_set_allmulti(bond,
bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
rcu_read_lock();
if (bond_uses_primary(bond)) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
} else {
bond_for_each_slave_rcu(bond, slave, iter) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
rcu_read_unlock();
}
static int bond_neigh_init(struct neighbour *n)
{
struct bonding *bond = netdev_priv(n->dev);
const struct net_device_ops *slave_ops;
struct neigh_parms parms;
struct slave *slave;
int ret;
slave = bond_first_slave(bond);
if (!slave)
return 0;
slave_ops = slave->dev->netdev_ops;
if (!slave_ops->ndo_neigh_setup)
return 0;
parms.neigh_setup = NULL;
parms.neigh_cleanup = NULL;
ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
if (ret)
return ret;
/* Assign slave's neigh_cleanup to neighbour in case cleanup is called
* after the last slave has been detached. Assumes that all slaves
* utilize the same neigh_cleanup (true at this writing as only user
* is ipoib).
*/
n->parms->neigh_cleanup = parms.neigh_cleanup;
if (!parms.neigh_setup)
return 0;
return parms.neigh_setup(n);
}
/* The bonding ndo_neigh_setup is called at init time beofre any
* slave exists. So we must declare proxy setup function which will
* be used at run time to resolve the actual slave neigh param setup.
*
* It's also called by master devices (such as vlans) to setup their
* underlying devices. In that case - do nothing, we're already set up from
* our init.
*/
static int bond_neigh_setup(struct net_device *dev,
struct neigh_parms *parms)
{
/* modify only our neigh_parms */
if (parms->dev == dev)
parms->neigh_setup = bond_neigh_init;
return 0;
}
/* Change the MTU of all of a master's slaves to match the master */
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
struct list_head *iter;
int res = 0;
netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
bond_for_each_slave(bond, slave, iter) {
netdev_dbg(bond_dev, "s %p c_m %p\n",
slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
if (res) {
/* If we failed to set the slave's mtu to the new value
* we must abort the operation even in ACTIVE_BACKUP
* mode, because if we allow the backup slaves to have
* different mtu values than the active slave we'll
* need to change their mtu when doing a failover. That
* means changing their mtu from timer context, which
* is probably not a good idea.
*/
netdev_dbg(bond_dev, "err %d %s\n", res,
slave->dev->name);
goto unwind;
}
}
bond_dev->mtu = new_mtu;
return 0;
unwind:
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
if (rollback_slave == slave)
break;
tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
if (tmp_res) {
netdev_dbg(bond_dev, "unwind err %d dev %s\n",
tmp_res, rollback_slave->dev->name);
}
}
return res;
}
/* Change HW address
*
* Note that many devices must be down to change the HW address, and
* downing the master releases all slaves. We can make bonds full of
* bonding devices to test this, however.
*/
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
struct sockaddr_storage *ss = addr, tmp_ss;
struct list_head *iter;
int res = 0;
if (BOND_MODE(bond) == BOND_MODE_ALB)
return bond_alb_set_mac_address(bond_dev, addr);
netdev_dbg(bond_dev, "bond=%p\n", bond);
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
if (bond->params.fail_over_mac &&
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(ss->__data))
return -EADDRNOTAVAIL;
bond_for_each_slave(bond, slave, iter) {
netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
res = dev_set_mac_address(slave->dev, addr);
if (res) {
/* TODO: consider downing the slave
* and retry ?
* User should expect communications
* breakage anyway until ARP finish
* updating, so...
*/
netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
goto unwind;
}
}
/* success */
memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
return 0;
unwind:
memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
tmp_ss.ss_family = bond_dev->type;
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
if (rollback_slave == slave)
break;
tmp_res = dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&tmp_ss);
if (tmp_res) {
netdev_dbg(bond_dev, "unwind err %d dev %s\n",
tmp_res, rollback_slave->dev->name);
}
}
return res;
}
/**
* bond_xmit_slave_id - transmit skb through slave with slave_id
* @bond: bonding device that is transmitting
* @skb: buffer to transmit
* @slave_id: slave id up to slave_cnt-1 through which to transmit
*
* This function tries to transmit through slave with slave_id but in case
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
struct list_head *iter;
struct slave *slave;
int i = slave_id;
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
}
}
/* Here we start from the first slave up to slave_id */
i = slave_id;
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
}
/* no slave that can tx has been found */
bond_tx_drop(bond->dev, skb);
}
/**
* bond_rr_gen_slave_id - generate slave id based on packets_per_slave
* @bond: bonding device to use
*
* Based on the value of the bonding device's packets_per_slave parameter
* this function generates a slave id, which is usually used as the next
* slave to transmit through.
*/
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
u32 slave_id;
struct reciprocal_value reciprocal_packets_per_slave;
int packets_per_slave = bond->params.packets_per_slave;
switch (packets_per_slave) {
case 0:
slave_id = prandom_u32();
break;
case 1:
slave_id = bond->rr_tx_counter;
break;
default:
reciprocal_packets_per_slave =
bond->params.reciprocal_packets_per_slave;
slave_id = reciprocal_divide(bond->rr_tx_counter,
reciprocal_packets_per_slave);
break;
}
bond->rr_tx_counter++;
return slave_id;
}
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
* needs to maintain some consistency for the interface that will
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
} else {
int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
bond_tx_drop(bond_dev, skb);
}
}
return NETDEV_TX_OK;
}
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
*/
static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
/* Use this to update slave_array when (a) it's not appropriate to update
* slave_array right away (note that update_slave_array() may sleep)
* and / or (b) RTNL is not held.
*/
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
{
queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
slave_arr_work.work);
int ret;
if (!rtnl_trylock())
goto err;
ret = bond_update_slave_arr(bond, NULL);
rtnl_unlock();
if (ret) {
pr_warn_ratelimited("Failed to update slave array from WT\n");
goto err;
}
return;
err:
bond_slave_arr_work_rearm(bond, 1);
}
/* Build the usable slaves array in control path for modes that use xmit-hash
* to determine the slave interface -
* (a) BOND_MODE_8023AD
* (b) BOND_MODE_XOR
* (c) BOND_MODE_TLB && tlb_dynamic_lb == 0
*
* The caller is expected to hold RTNL only and NO other lock!
*/
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
struct slave *slave;
struct list_head *iter;
struct bond_up_slave *new_arr, *old_arr;
int agg_id = 0;
int ret = 0;
#ifdef CONFIG_LOCKDEP
WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif
new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
GFP_KERNEL);
if (!new_arr) {
ret = -ENOMEM;
pr_err("Failed to build slave-array.\n");
goto out;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("bond_3ad_get_active_agg_info failed\n");
kfree_rcu(new_arr, rcu);
/* No active aggragator means it's not safe to use
* the previous array.
*/
old_arr = rtnl_dereference(bond->slave_arr);
if (old_arr) {
RCU_INIT_POINTER(bond->slave_arr, NULL);
kfree_rcu(old_arr, rcu);
}
goto out;
}
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg;
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (!agg || agg->aggregator_identifier != agg_id)
continue;
}
if (!bond_slave_can_tx(slave))
continue;
if (skipslave == slave)
continue;
new_arr->arr[new_arr->count++] = slave;
}
old_arr = rtnl_dereference(bond->slave_arr);
rcu_assign_pointer(bond->slave_arr, new_arr);
if (old_arr)
kfree_rcu(old_arr, rcu);
out:
if (ret != 0 && skipslave) {
int idx;
/* Rare situation where caller has asked to skip a specific
* slave but allocation failed (most likely!). BTW this is
* only possible when the call is initiated from
* __bond_release_one(). In this situation; overwrite the
* skipslave entry in the array with the last entry from the
* array to avoid a situation where the xmit path may choose
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
for (idx = 0; idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];
old_arr->count--;
break;
}
}
}
return ret;
}
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
*/
static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
struct bond_up_slave *slaves;
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
count = slaves ? ACCESS_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
} else {
bond_tx_drop(dev, skb);
}
return NETDEV_TX_OK;
}
/* in broadcast mode, we send everything to all usable interfaces. */
static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_is_last_slave(bond, slave))
break;
if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
bond_dev_queue_xmit(bond, skb2, slave->dev);
}
}
if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
/*------------------------- Device initialization ---------------------------*/
/* Lookup the slave that corresponds to a qid */
static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *slave = NULL;
struct list_head *iter;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb->queue_mapping) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}
/* If the slave isn't UP, use default transmit policy. */
break;
}
}
return 1;
}
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
* skb_tx_hash and will put the skbs in the queue we expect on their
* way down to the bonding driver.
*/
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
/* Save the original txq to restore before passing to the driver */
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
txq -= dev->real_num_tx_queues;
} while (txq >= dev->real_num_tx_queues);
}
return txq;
}
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
if (bond_should_override_tx_queue(bond) &&
!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
return bond_xmit_broadcast(skb, dev);
case BOND_MODE_ALB:
return bond_alb_xmit(skb, dev);
case BOND_MODE_TLB:
return bond_tlb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
bond_tx_drop(dev, skb);
return NETDEV_TX_OK;
}
}
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
netdev_tx_t ret = NETDEV_TX_OK;
/* If we risk deadlock from transmitting this in the
* netpoll path, tell netpoll to queue the frame for later tx
*/
if (unlikely(is_netpoll_tx_blocked(dev)))
return NETDEV_TX_BUSY;
rcu_read_lock();
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
}
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
struct ethtool_link_ksettings *cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
unsigned long speed = 0;
struct list_head *iter;
struct slave *slave;
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
* do not need to check mode. Though link speed might not represent
* the true receive or transmit bandwidth (not all modes are symmetric)
* this is an accurate maximum.
*/
bond_for_each_slave(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
if (cmd->base.duplex == DUPLEX_UNKNOWN &&
slave->duplex != DUPLEX_UNKNOWN)
cmd->base.duplex = slave->duplex;
}
}
cmd->base.speed = speed ? : SPEED_UNKNOWN;
return 0;
}
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = bond_ethtool_get_link_ksettings,
};
static const struct net_device_ops bond_netdev_ops = {
.ndo_init = bond_init,
.ndo_uninit = bond_uninit,
.ndo_open = bond_open,
.ndo_stop = bond_close,
.ndo_start_xmit = bond_start_xmit,
.ndo_select_queue = bond_select_queue,
.ndo_get_stats64 = bond_get_stats,
.ndo_do_ioctl = bond_do_ioctl,
.ndo_change_rx_flags = bond_change_rx_flags,
.ndo_set_rx_mode = bond_set_rx_mode,
.ndo_change_mtu = bond_change_mtu,
.ndo_set_mac_address = bond_set_mac_address,
.ndo_neigh_setup = bond_neigh_setup,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
.ndo_poll_controller = bond_poll_controller,
#endif
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
};
static const struct device_type bond_type = {
.name = "bond",
};
static void bond_destructor(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
if (bond->wq)
destroy_workqueue(bond->wq);
}
void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
bond->dev = bond_dev;
/* Initialize the device entry points */
ether_setup(bond_dev);
bond_dev->max_mtu = ETH_MAX_MTU;
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
bond_dev->needs_free_netdev = true;
bond_dev->priv_destructor = bond_destructor;
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
/* Initialize the device options */
bond_dev->flags |= IFF_MASTER;
bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* don't acquire bond device's netif_tx_lock when transmitting */
bond_dev->features |= NETIF_F_LLTX;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
* care is taken in the various xmit functions
* when there are slaves that are not hw accel
* capable
*/
/* Don't allow bond devices to change network namespaces. */
bond_dev->features |= NETIF_F_NETNS_LOCAL;
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
}
/* Destroy a bonding device.
* Must be under rtnl_lock when this function is called.
*/
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
struct bond_up_slave *arr;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
arr = rtnl_dereference(bond->slave_arr);
if (arr) {
RCU_INIT_POINTER(bond->slave_arr, NULL);
kfree_rcu(arr, rcu);
}
list_del(&bond->bond_list);
bond_debug_unregister(bond);
}
/*------------------------- Module initialization ---------------------------*/
static int bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
struct bond_opt_value newval;
const struct bond_opt_value *valptr;
int arp_all_targets_value = 0;
u16 ad_actor_sys_prio = 0;
u16 ad_user_port_key = 0;
__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
int arp_ip_count;
int bond_mode = BOND_MODE_ROUNDROBIN;
int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
int lacp_fast = 0;
int tlb_dynamic_lb = 0;
/* Convert string parameters. */
if (mode) {
bond_opt_initstr(&newval, mode);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
if (!valptr) {
pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
return -EINVAL;
}
bond_mode = valptr->value;
}
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD) &&
(bond_mode != BOND_MODE_TLB)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, xmit_hash_policy);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
&newval);
if (!valptr) {
pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
xmit_hash_policy);
return -EINVAL;
}
xmit_hashtype = valptr->value;
}
}
if (lacp_rate) {
if (bond_mode != BOND_MODE_8023AD) {
pr_info("lacp_rate param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, lacp_rate);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
&newval);
if (!valptr) {
pr_err("Error: Invalid lacp rate \"%s\"\n",
lacp_rate);
return -EINVAL;
}
lacp_fast = valptr->value;
}
}
if (ad_select) {
bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval);
if (!valptr) {
pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
return -EINVAL;
}
params->ad_select = valptr->value;
if (bond_mode != BOND_MODE_8023AD)
pr_warn("ad_select param only affects 802.3ad mode\n");
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
miimon, INT_MAX);
miimon = 0;
}
if (updelay < 0) {
pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
use_carrier);
use_carrier = 1;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
num_peer_notif);
num_peer_notif = 1;
}
/* reset values for 802.3ad/TLB/ALB */
if (!bond_mode_uses_arp(bond_mode)) {
if (!miimon) {
pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warn("Forcing miimon to 100msec\n");
miimon = BOND_DEFAULT_MIIMON;
}
}
if (tx_queues < 1 || tx_queues > 255) {
pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
tx_queues, BOND_DEFAULT_TX_QUEUES);
tx_queues = BOND_DEFAULT_TX_QUEUES;
}
if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
all_slaves_active);
all_slaves_active = 0;
}
if (resend_igmp < 0 || resend_igmp > 255) {
pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
resend_igmp, BOND_DEFAULT_RESEND_IGMP);
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
bond_opt_initval(&newval, packets_per_slave);
if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
packets_per_slave, USHRT_MAX);
packets_per_slave = 1;
}
if (bond_mode == BOND_MODE_ALB) {
pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
updelay);
}
if (!miimon) {
if (updelay || downdelay) {
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
updelay, miimon, (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
downdelay, miimon,
(downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
arp_interval, INT_MAX);
arp_interval = 0;
}
for (arp_ip_count = 0, i = 0;
(arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
__be32 ip;
/* not a complete check, but good enough to catch mistakes */
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
!bond_is_ip_target_ok(ip)) {
pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;
} else {
if (bond_get_targets_ip(arp_target, ip) == -1)
arp_target[arp_ip_count++] = ip;
else
pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
&ip);
}
}
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
arp_interval);
arp_interval = 0;
}
if (arp_validate) {
if (!arp_interval) {
pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
}
bond_opt_initstr(&newval, arp_validate);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
&newval);
if (!valptr) {
pr_err("Error: invalid arp_validate \"%s\"\n",
arp_validate);
return -EINVAL;
}
arp_validate_value = valptr->value;
} else {
arp_validate_value = 0;
}
if (arp_all_targets) {
bond_opt_initstr(&newval, arp_all_targets);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
&newval);
if (!valptr) {
pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
arp_all_targets);
arp_all_targets_value = 0;
} else {
arp_all_targets_value = valptr->value;
}
}
if (miimon) {
pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
arp_validate_value);
pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
pr_cont(" %s", arp_ip_target[i]);
pr_cont("\n");
} else if (max_bonds) {
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
if (primary && !bond_mode_uses_primary(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
primary, bond_mode_name(bond_mode));
primary = NULL;
}
if (primary && primary_reselect) {
bond_opt_initstr(&newval, primary_reselect);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
&newval);
if (!valptr) {
pr_err("Error: Invalid primary_reselect \"%s\"\n",
primary_reselect);
return -EINVAL;
}
primary_reselect_value = valptr->value;
} else {
primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
}
if (fail_over_mac) {
bond_opt_initstr(&newval, fail_over_mac);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
&newval);
if (!valptr) {
pr_err("Error: invalid fail_over_mac \"%s\"\n",
fail_over_mac);
return -EINVAL;
}
fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(
bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
&newval);
if (!valptr) {
pr_err("Error: No ad_actor_sys_prio default value");
return -EINVAL;
}
ad_actor_sys_prio = valptr->value;
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
&newval);
if (!valptr) {
pr_err("Error: No ad_user_port_key default value");
return -EINVAL;
}
ad_user_port_key = valptr->value;
if (bond_mode == BOND_MODE_TLB) {
bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
&newval);
if (!valptr) {
pr_err("Error: No tlb_dynamic_lb default value");
return -EINVAL;
}
tlb_dynamic_lb = valptr->value;
}
if (lp_interval == 0) {
pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
}
/* fill params struct with the proper values */
params->mode = bond_mode;
params->xmit_policy = xmit_hashtype;
params->miimon = miimon;
params->num_peer_notif = num_peer_notif;
params->arp_interval = arp_interval;
params->arp_validate = arp_validate_value;
params->arp_all_targets = arp_all_targets_value;
params->updelay = updelay;
params->downdelay = downdelay;
params->use_carrier = use_carrier;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
params->primary_reselect = primary_reselect_value;
params->fail_over_mac = fail_over_mac_value;
params->tx_queues = tx_queues;
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
params->tlb_dynamic_lb = tlb_dynamic_lb;
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
} else {
/* reciprocal_packets_per_slave is unused if
* packets_per_slave is 0 or 1, just initialize it
*/
params->reciprocal_packets_per_slave =
(struct reciprocal_value) { 0 };
}
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
params->primary[IFNAMSIZ - 1] = 0;
}
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
return 0;
}
/* Called from registration process */
static int bond_init(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
netdev_dbg(bond_dev, "Begin bond_init\n");
bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
if (!bond->wq)
return -ENOMEM;
netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
bond_debug_register(bond);
/* Ensure valid dev_addr */
if (is_zero_ether_addr(bond_dev->dev_addr) &&
bond_dev->addr_assign_type == NET_ADDR_PERM)
eth_hw_addr_random(bond_dev);
return 0;
}
unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
* set up our sysfs entries.
*/
int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
struct alb_bond_info *bond_info;
int res;
rtnl_lock();
bond_dev = alloc_netdev_mq(sizeof(struct bonding),
name ? name : "bond%d", NET_NAME_UNKNOWN,
bond_setup, tx_queues);
if (!bond_dev) {
pr_err("%s: eek! can't alloc netdev!\n", name);
rtnl_unlock();
return -ENOMEM;
}
/*
* Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
* It is set to 0 by default which is wrong.
*/
bond = netdev_priv(bond_dev);
bond_info = &(BOND_ALB_INFO(bond));
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
rtnl_unlock();
if (res < 0)
free_netdev(bond_dev);
return res;
}
static int __net_init bond_net_init(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
bn->net = net;
INIT_LIST_HEAD(&bn->dev_list);
bond_create_proc_dir(bn);
bond_create_sysfs(bn);
return 0;
}
static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
struct bonding *bond, *tmp_bond;
LIST_HEAD(list);
bond_destroy_sysfs(bn);
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
unregister_netdevice_queue(bond->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
bond_destroy_proc_dir(bn);
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.exit = bond_net_exit,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
static int __init bonding_init(void)
{
int i;
int res;
pr_info("%s", bond_version);
res = bond_check_params(&bonding_defaults);
if (res)
goto out;
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
res = bond_netlink_init();
if (res)
goto err_link;
bond_create_debugfs();
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
goto err;
}
register_netdevice_notifier(&bond_netdev_notifier);
out:
return res;
err:
bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
goto out;
}
static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
bond_destroy_debugfs();
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, [email protected] and many others");
| gpl-2.0 |
Galene/svitanok.ck.ua | templates/jsn_yoyo_pro/error.php | 4569 | <?php
/**
* @author JoomlaShine.com http://www.joomlashine.com
* @copyright Copyright (C) 2008 - 2011 JoomlaShine.com. All rights reserved.
* @license GNU/GPL v2 http://www.gnu.org/licenses/gpl-2.0.html
*/
// No direct access
defined('_JEXEC') or die('Restricted index access');
// Load template framework
if (!defined('JSN_PATH_TPLFRAMEWORK')) {
require_once JPATH_ROOT . '/plugins/system/jsntplframework/jsntplframework.defines.php';
require_once JPATH_ROOT . '/plugins/system/jsntplframework/libraries/joomlashine/loader.php';
}
define('YOURBASEPATH', dirname(__FILE__));
if (!isset($this->error))
{
$this->error = JError::raiseWarning(404, JText::_('JERROR_ALERTNOAUTHOR'));
$this->debug = false;
}
// Preparing template parameters
JSNTplTemplateHelper::prepare(false, false);
// Retrieve document object
$document = JFactory::getDocument();
/* URL where logo image should link to (! without preceding slash !)
Leave this box empty if you want your logo to be clickable. */
$logoLink = $document->logoLink;
if (strpos($logoLink, "http")=== false && $logoLink != '')
{
$utils = JSNTplUtils::getInstance();
$logoLink = $utils->trimPreceddingSlash($logoLink);
$logoLink = $this->baseurl . '/' . $logoLink;
}
?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<!-- <?php echo $document->template; ?> <?php echo $document->version ?> -->
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="<?php echo $this->language; ?>" lang="<?php echo $this->language; ?>" dir="<?php echo $this->direction; ?>">
<head>
<title><?php echo $this->error->getCode(); ?>-<?php echo $this->title; ?></title>
<link rel="stylesheet" href="<?php echo $this->baseurl . '/templates/' . $this->template ?>/css/error.css" type="text/css" />
</head>
<body id="jsn-master" class="jsn-color-<?php echo $document->templateColor ?>">
<div id="jsn-page">
<div id="jsn-page_inner">
<div id="jsn-header">
<div id="jsn-logo">
<a href="<?php echo $logoLink ?>" title="<?php echo $document->logoSlogan; ?>">
<?php
if ($document->logoFile != "")
$logo_path = $document->logoFile;
else
$logo_path = $this->baseurl . '/templates/' . $this->template . "/images/logo.png";
?>
<img src="<?php echo $logo_path; ?>" alt="<?php echo $document->logoSlogan; ?>" />
</a>
</div>
</div>
<div id="jsn-body" class="clearafter">
<div id="jsn-error-heading">
<h1><?php echo $this->error->getCode(); ?> <span class="heading-medium"><?php echo JText::_('JERROR_ERROR'); ?></span></h1>
</div>
<div id="jsn-error-content" class="jsn-error-page">
<div id="jsn-error-content_inner">
<h1><span class="heading-small"><?php echo $this->error->getMessage(); ?></span></h1>
<hr />
<h3><?php echo JText::_('JERROR_LAYOUT_NOT_ABLE_TO_VISIT'); ?></h3>
<ul>
<li><?php echo JText::_('JERROR_LAYOUT_AN_OUT_OF_DATE_BOOKMARK_FAVOURITE'); ?></li>
<li><?php echo JText::_('JERROR_LAYOUT_SEARCH_ENGINE_OUT_OF_DATE_LISTING'); ?></li>
<li><?php echo JText::_('JERROR_LAYOUT_MIS_TYPED_ADDRESS'); ?></li>
<li><?php echo JText::_('JERROR_LAYOUT_YOU_HAVE_NO_ACCESS_TO_THIS_PAGE'); ?></li>
<li><?php echo JText::_('JERROR_LAYOUT_REQUESTED_RESOURCE_WAS_NOT_FOUND'); ?></li>
<li><?php echo JText::_('JERROR_LAYOUT_ERROR_HAS_OCCURRED_WHILE_PROCESSING_YOUR_REQUEST'); ?></li>
</ul>
<hr />
<h3><?php echo JText::_('JSN_TPLFW_ERROR_LAYOUT_SEARCH_ON_THE_WEBSITE'); ?></h3>
<form id="search-form" method="post" action="index.php">
<div class="search">
<input type="text" onfocus="if(this.value=='search...') this.value='';" onblur="if(this.value=='') this.value='search...';" value="" size="20" class="inputbox" alt="Search" maxlength="20" id="mod-search-searchword" name="searchword">
<input type="submit" onclick="this.form.searchword.focus();" class="button link-button" value="Search">
</div>
<input type="hidden" value="search" name="task">
<input type="hidden" value="com_search" name="option">
<input type="hidden" value="435" name="Itemid">
</form>
<p id="link-goback">or <a href="<?php echo $this->baseurl; ?>/index.php" class="link-action" title="<?php echo JText::_('JERROR_LAYOUT_GO_TO_THE_HOME_PAGE'); ?>"><?php echo JText::_('JERROR_LAYOUT_GO_TO_THE_HOME_PAGE'); ?></a></p>
</div>
</div>
</div>
</div>
</div>
</body>
</html> | gpl-2.0 |
pete318/TrinityCore | sql/old/3.3.5a/world/20082_2020_09_15/2020_09_04_02_world_335.sql | 24722 | -- 7907 La baraja de Bestias de la Luna Negra
-- https://es.classic.wowhead.com/quest=7907
SET @ID := 7907;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Oh querido.', 0),
(@ID, 'esMX', 'Oh querido.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Bueno, esto explica mucho. Si alguna vez encuentra una forma de entrar en Ahn\'Qiraj, esté atento al viejo Brann. Me temo que será inútil sin su mono', 0),
(@ID, 'esMX', 'Bueno, esto explica mucho. Si alguna vez encuentra una forma de entrar en Ahn\'Qiraj, esté atento al viejo Brann. Me temo que será inútil sin su mono', 0);
-- 12133 Aplasta la calabaza
-- https://es.wowhead.com/quest=12133
SET @ID := 12133;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¿Sí, $n?', 0),
(@ID, 'esMX', '¿Sí, $n?', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¿Encontraste esto en el interior de la calabaza iluminada del Jinete decapitado? Es un antiguo símbolo de la Luz. Debía de pertenecer al Jinete antes de que fuera maldecido y se convirtiese en el monstruo que es hoy. Gracias, $n. Este símbolo merece estar entre paladines. Me encargaré de que lo reciban.$B$B¡Ah, casi se me olvida! Tienes una admiradora secreta...$B$BUna de las niñas quería regalarte esto, pero le daba mucha vergüenza dártelo personalmente.', 0),
(@ID, 'esMX', '¿Encontraste esto en el interior de la calabaza iluminada del Jinete decapitado? Es un antiguo símbolo de la Luz. Debía de pertenecer al Jinete antes de que fuera maldecido y se convirtiese en el monstruo que es hoy. Gracias, $n. Este símbolo merece estar entre paladines. Me encargaré de que lo reciban.$B$B¡Ah, casi se me olvida! Tienes una admiradora secreta...$B$BUna de las niñas quería regalarte esto, pero le daba mucha vergüenza dártelo personalmente.', 0);
-- 1598 El libro robado
-- https://es.classic.wowhead.com/quest=1598
SET @ID := 1598;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Era demasiado llamativo para mí robar el libro yo mismo. Qué suerte que uno de esos tontos hizo el trabajo sucio por mí.', 0),
(@ID, 'esMX', 'Era demasiado llamativo para mí robar el libro yo mismo. Qué suerte que uno de esos tontos hizo el trabajo sucio por mí.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Excelente ... Nunca pude acceder a este texto, estaba guardado en un ala protegida de la biblioteca y habría parecido sospechoso si me ven merodeando.$B$BBueno, hiciste un buen trabajo al conseguir esto, y estoy convencido de que probablemente puedas manejar a tu propio diablillo, así que no hay razón para que no te muestre cómo convocar a uno.', 0),
(@ID, 'esMX', 'Excelente ... Nunca pude acceder a este texto, estaba guardado en un ala protegida de la biblioteca y habría parecido sospechoso si me ven merodeando.$B$BBueno, hiciste un buen trabajo al conseguir esto, y estoy convencido de que probablemente puedas manejar a tu propio diablillo, así que no hay razón para que no te muestre cómo convocar a uno.', 0);
-- 7929 La baraja de Elementales de la Luna Negra
-- https://es.wowhead.com/quest=7929
-- https://wow-es.gamepedia.com/Misión:La baraja de Elementales de la Luna Negra
SET @ID := 7929;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Veo que has logrado completar un mazo de elementales! ¡Felicidades!', 0),
(@ID, 'esMX', '¡Veo que has logrado completar un mazo de elementales! ¡Felicidades!', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Así que has armado un mazo de Elementales. Su gesto de devolvérnoslo creará un vínculo entre usted y la Luna Negra que no se olvidará pronto. Permíteme presentarte una de las mejores cartas de la Luna Negra como una pequeña muestra de nuestro agradecimiento.', 0),
(@ID, 'esMX', 'Así que has armado un mazo de Elementales. Su gesto de devolvérnoslo creará un vínculo entre usted y la Luna Negra que no se olvidará pronto. Permíteme presentarte una de las mejores cartas de la Luna Negra como una pequeña muestra de nuestro agradecimiento.', 0);
-- 13484 Colectores primaverales
-- https://es.wowhead.com/quest=13484
SET @ID := 13484;
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Sí! ¡Sí, creo que tengo la tarea perfecta para ti!', 0),
(@ID, 'esMX', '¡Sí! ¡Sí, creo que tengo la tarea perfecta para ti!', 0);
-- 13480 La gran búsqueda de huevos
-- https://es.wowhead.com/quest=13480
SET @ID := 13480;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¿Tienes los trozos?', 0),
(@ID, 'esMX', '¿Tienes los trozos?', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Fantástico, fantástico, gracias! Puede que me haga falta más de una muestra para conseguir resultados verdaderamente concluyentes, pero lo has hecho muy bien por hoy.', 0),
(@ID, 'esMX', '¡Fantástico, fantástico, gracias! Puede que me haga falta más de una muestra para conseguir resultados verdaderamente concluyentes, pero lo has hecho muy bien por hoy.', 0);
-- 1685 La invocación de Gakin
-- https://es.wowhead.com/quest=1685
SET @ID := 1685;
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Ya era hora de que aparecieras. Aunque, quizás debería haber enviado a alguien más capaz que Remen para encontrarte.$B$BNo importa.$B$BHas pasado demasiado tiempo sin entrenar, y un brujo sin entrenamiento no mantiene la cabeza sobre los hombros por mucho tiempo.', 0),
(@ID, 'esMX', 'Ya era hora de que aparecieras. Aunque, quizás debería haber enviado a alguien más capaz que Remen para encontrarte.$B$BNo importa.$B$BHas pasado demasiado tiempo sin entrenar, y un brujo sin entrenamiento no mantiene la cabeza sobre los hombros por mucho tiempo.', 0);
-- 1688 Surena Caledon
-- https://es.classic.wowhead.com/quest=1688
SET @ID := 1688;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Incluso los más viejos y sabios pueden caer rendidos ante la gloria de la belleza y la juventud, $n. Un consejo que te vendrá bien recordar toda tu vida.', 0),
(@ID, 'esMX', 'Incluso los más viejos y sabios pueden caer rendidos ante la gloria de la belleza y la juventud, $n. Un consejo que te vendrá bien recordar toda tu vida.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Surena tenía mucho talento, pero no tanto como para conseguir dominar la magia de los brujos sin ayuda. Es una pena ver el desperdicio de talento pero, algunas veces, es necesario.$B$BEspero tener resultados diferentes contigo.', 0),
(@ID, 'esMX', 'Surena tenía mucho talento, pero no tanto como para conseguir dominar la magia de los brujos sin ayuda. Es una pena ver el desperdicio de talento pero, algunas veces, es necesario.$B$BEspero tener resultados diferentes contigo.', 0);
-- 1689 El vínculo
-- https://es.classic.wowhead.com/quest=1689
SET @ID := 1689;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'No puedo enseñarte a controlar a un abisario hasta que no hayas vencido a uno.', 0),
(@ID, 'esMX', 'No puedo enseñarte a controlar a un abisario hasta que no hayas vencido a uno.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Lo has hecho mejor de lo que esperaba, $n, pero has estado a la altura de mis esperanzas. Ahora puedes aprender a dominar a tu abisario sometido, un aliado que será muy valioso en los desafíos que, sin lugar a duda, se te presentarán.$B$BNo tengo nada más que enseñarte en este momento, pero todavía no he acabado contigo, $n.', 0),
(@ID, 'esMX', 'Lo has hecho mejor de lo que esperaba, $n, pero has estado a la altura de mis esperanzas. Ahora puedes aprender a dominar a tu abisario sometido, un aliado que será muy valioso en los desafíos que, sin lugar a duda, se te presentarán.$B$BNo tengo nada más que enseñarte en este momento, pero todavía no he acabado contigo, $n.', 0);
-- 1638 Instrucción de guerrero
-- https://es.classic.wowhead.com/quest=1638
SET @ID := 1638;
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Oh, jo! Tómate un trago conmigo, $gmuchacho:muchacha;. A primera vista pareces muy $gduro:dura;, pero veamos si puedes arreglártelas con algunas jarras de las especialidades más potentes de El Cerdo Borracho$B$BO bien... ¿has venido para recibir instrucción?', 0),
(@ID, 'esMX', '¡Oh, jo! Tómate un trago conmigo, $gmuchacho:muchacha;. A primera vista pareces muy $gduro:dura;, pero veamos si puedes arreglártelas con algunas jarras de las especialidades más potentes de El Cerdo Borracho$B$BO bien... ¿has venido para recibir instrucción?', 0);
-- 1639 Bartleby, el borracho
-- https://es.classic.wowhead.com/quest=1639
SET @ID := 1639;
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Hola $gmuchachito:muchachita;! ¿Qué hace $gun:una; repipi como tú en El Cerdo Borracho? Ten cuidado, se te podría romper una uña...', 0),
(@ID, 'esMX', '¡Hola $gmuchachito:muchachita;! ¿Qué hace $gun:una; repipi como tú en El Cerdo Borracho? Ten cuidado, se te podría romper una uña...', 0);
-- 1640 Vence a Bartleby
-- https://es.classic.wowhead.com/quest=1640
SET @ID := 1640;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Sólo me quitarás la jarra de mis manos frías y muertas...', 0),
(@ID, 'esMX', 'Sólo me quitarás la jarra de mis manos frías y muertas...', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Eres mucho más duro de lo que pareces!', 0),
(@ID, 'esMX', '¡Eres mucho más duro de lo que pareces!', 0);
-- 1665 La jarra de Bartleby
-- https://es.classic.wowhead.com/quest=1665
SET @ID := 1665;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Vi tu combate con Bartleby. ¡Bien hecho! ¿Tienes su jarra?', 0),
(@ID, 'esMX', 'Vi tu combate con Bartleby. ¡Bien hecho! ¿Tienes su jarra?', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡La tienes! Seguro que no quería separarse de ella pero has sido muy $gpersuasivo:persuasiva;. Bien hecho.$B$BPasemos ahora a tu lección...', 0),
(@ID, 'esMX', '¡La tienes! Seguro que no quería separarse de ella pero has sido muy $gpersuasivo:persuasiva;. Bien hecho.$B$BPasemos ahora a tu lección...', 0);
-- 3100 Una carta simple
-- https://es.classic.wowhead.com/quest=3100
SET @ID := 3100;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Ah, recibiste mi carta, $n ... bien.$B$BRecientemente ha habido una afluencia de guerreros en Elwynn, lo que es bueno para Ventormenta, pero malo para los kobolds y Defias de la zona.', 0),
(@ID, 'esMX', 'Ah, recibiste mi carta, $n ... bien.$B$BRecientemente ha habido una afluencia de guerreros en Elwynn, lo que es bueno para Ventormenta, pero malo para los kobolds y Defias de la zona.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Ponte en pie, estudia el terreno y vuelve conmigo cuando necesites formación. Estaré aquí de día o de noche.$B$BLos Caballeros de la Mano de Plata han hecho bien en hacer que este lugar sea bastante seguro, pero cuando conozcas a los otros ciudadanos, creo que encontrarás que todos tienen problemas con los que les vendría bien un poco de ayuda: ayuda que un $c puede brindarles. Buena suerte.', 0),
(@ID, 'esMX', 'Ponte en pie, estudia el terreno y vuelve conmigo cuando necesites formación. Estaré aquí de día o de noche.$B$BLos Caballeros de la Mano de Plata han hecho bien en hacer que este lugar sea bastante seguro, pero cuando conozcas a los otros ciudadanos, creo que encontrarás que todos tienen problemas con los que les vendría bien un poco de ayuda: ayuda que un $c puede brindarles. Buena suerte.', 0);
-- 3102 Una carta cifrada
-- https://es.classic.wowhead.com/quest=3102
SET @ID := 3102;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Lo hiciste a la primera, y no parece que mucha gente te haya visto. Suficientemente bueno para mi. No me gusta que llamen mucho la atención aquí atrás ... agradable y tranquilo. Estoy seguro de que lo entenderás.$B$B¿Tienes algún problema todavía? Me alegra saber que no. Eso cambiará muy pronto.', 0),
(@ID, 'esMX', 'Lo hiciste a la primera, y no parece que mucha gente te haya visto. Suficientemente bueno para mi. No me gusta que llamen mucho la atención aquí atrás ... agradable y tranquilo. Estoy seguro de que lo entenderás.$B$B¿Tienes algún problema todavía? Me alegra saber que no. Eso cambiará muy pronto.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Descubrirás que hay una serie de personajes que codiciarán nuestras habilidades, $n. Aventureros, IV:7... diablos, incluso a la Hermandad Defias no le importaría tener un espía o dos dentro de Ventormenta. Pero recuerda esto, eres tu propio jefe. ¡No dejes que nadie te intimide para que hagas algo que no quieres hacer! Además, tenemos todas las cartas ... al menos, antes de que termine el juego. ¡Tu ya sabes!$B$BDe todos modos, solo quería presentarme y hacerlte saber que estoy aquí si necesitas capacitación. Ven en cualquier momento.', 0),
(@ID, 'esMX', 'Descubrirás que hay una serie de personajes que codiciarán nuestras habilidades, $n. Aventureros, IV:7... diablos, incluso a la Hermandad Defias no le importaría tener un espía o dos dentro de Ventormenta. Pero recuerda esto, eres tu propio jefe. ¡No dejes que nadie te intimide para que hagas algo que no quieres hacer! Además, tenemos todas las cartas ... al menos, antes de que termine el juego. ¡Tu ya sabes!$B$BDe todos modos, solo quería presentarme y hacerlte saber que estoy aquí si necesitas capacitación. Ven en cualquier momento.', 0);
-- 3103 Una carta sacralizada
-- https://es.classic.wowhead.com/quest=3103
SET @ID := 3103;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Al fin llegas; sabía que vendrías. Que la Luz Sagrada te ilumine. Estos son tiempos difíciles, la Legión Ardiente sigue presente en Azeroth. Kalimdor intenta defenderse. Ayuda en todo lo que puedas.', 0),
(@ID, 'esMX', 'Al fin llegas; sabía que vendrías. Que la Luz Sagrada te ilumine. Estos son tiempos difíciles, la Legión Ardiente sigue presente en Azeroth. Kalimdor intenta defenderse. Ayuda en todo lo que puedas.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'A medida que ganas experiencia, ven a verme y te enseñaré lo posible. Hasta entonces, que la sabiduría guíe tus pasos. Recuerda que de ti depende mejorar el mundo.', 0),
(@ID, 'esMX', 'A medida que ganas experiencia, ven a verme y te enseñaré lo posible. Hasta entonces, que la sabiduría guíe tus pasos. Recuerda que de ti depende mejorar el mundo.', 0);
-- 3104 Una carta glífica
-- https://es.classic.wowhead.com/quest=3104
SET @ID := 3104;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Hola, $n, me llamo Khelden. ¿Querías algo?', 0),
(@ID, 'esMX', 'Hola, $n, me llamo Khelden. ¿Querías algo?', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Sabía que mi carta no te apartaría de tu camino. Bien, ¿vienes a aceptar tu destino y enfrentarte a quienes se oponen a tu búsqueda de sabiduría y poder?$B$BTe temerán tanto como te respetarán, $n. Y yo siempre estaré aquí para instruirte; solo tienes que venir a buscarme.', 0),
(@ID, 'esMX', 'Sabía que mi carta no te apartaría de tu camino. Bien, ¿vienes a aceptar tu destino y enfrentarte a quienes se oponen a tu búsqueda de sabiduría y poder?$B$BTe temerán tanto como te respetarán, $n. Y yo siempre estaré aquí para instruirte; solo tienes que venir a buscarme.', 0);
-- 3105 Una carta manchada
-- https://es.classic.wowhead.com/quest=3105
SET @ID := 3105;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Ah, entonces has llegado, y no demasiado pronto, $n. Algunos de los guardias estaban aquí hace un momento y me miraban con curiosidad... mundanos patéticos.', 0),
(@ID, 'esMX', 'Ah, entonces has llegado, y no demasiado pronto, $n. Algunos de los guardias estaban aquí hace un momento y me miraban con curiosidad... mundanos patéticos.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'A medida que crezcas en poder, se sentirás $gtentado:tentada;; siempre debes recordar controlarte. No mentiré: la corrupción puede llegar a cualquier practicante de lo arcano; especialmente uno que trata con criaturas de la oscuridad. Ten paciencia y prudencia ... pero no dejes que eso reprima tu ambición.$B$BA medida que te vuelvas más $gpoderoso:poderosa;, vuelve a mí y te enseñaré más sobre nuestros caminos.', 0),
(@ID, 'esMX', 'A medida que crezcas en poder, se sentirás $gtentado:tentada;; siempre debes recordar controlarte. No mentiré: la corrupción puede llegar a cualquier practicante de lo arcano; especialmente uno que trata con criaturas de la oscuridad. Ten paciencia y prudencia ... pero no dejes que eso reprima tu ambición.$B$BA medida que te vuelvas más $gpoderoso:poderosa;, vuelve a mí y te enseñaré más sobre nuestros caminos.', 0);
-- 3861 ¡CLOQUEA!
-- https://es.classic.wowhead.com/quest=3861
SET @ID := 3861;
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'El pollo te mira con ojos fríos y cara de hambre.$B$B"¡CO_COOOC!$B$BCoo-co-cooo."', 0),
(@ID, 'esMX', 'El pollo te mira con ojos fríos y cara de hambre.$B$B"¡CO_COOOC!$B$BCoo-co-cooo."', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '"¡COO-COOC!"$B$BEl pollo se pone a engullir la comida.$B$BDe pronto se para, como sorprendido, y se agita incómodo. Vaya, este pollo no el un pollo, es una gallina... Será mejor que mires debajo, a ver.', 0),
(@ID, 'esMX', '"¡COO-COOC!"$B$BEl pollo se pone a engullir la comida.$B$BDe pronto se para, como sorprendido, y se agita incómodo. Vaya, este pollo no el un pollo, es una gallina... Será mejor que mires debajo, a ver.', 0);
-- 5805 ¡Te damos la bienvenida!
-- https://es.classic.wowhead.com/quest=5805
SET @ID := 5805;
UPDATE `quest_template_locale` SET `Details` = '¡Bienvenido a World of Warcraft!$B$BComo agradecimiento especial por haber comprado la Edición de coleccionista de World of Warcraft, entrega este vale a Merissa Fontana en Villadorada. Conseguirás un regalo: Un pequeño compañero que te acompañará en tu búsqueda de aventuras y gloria.$B$B¡Gracias de nuevo, y que disfrutes con World of Warcraft!', `VerifiedBuild` = 0 WHERE `ID` = @ID AND locale IN ('esES', 'esMX');
DELETE FROM `quest_request_items_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_request_items_locale` (`ID`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES
(@ID, 'esES', '¡Saludos! ¡Es un placer conocerte!$B$BVeo que tienes un vale especial. Dámelo y te ofreceré algo a cambio.', 0),
(@ID, 'esMX', '¡Saludos! ¡Es un placer conocerte!$B$BVeo que tienes un vale especial. Dámelo y te ofreceré algo a cambio.', 0);
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'De hecho, eres $gun:una; $ghéroe:heroina; especial, $n. ¡Te damos la bienvenida al mundo de Azeroth y te ofrecemos uno de estos regalos únicos!', 0),
(@ID, 'esMX', 'De hecho, eres $gun:una; $ghéroe:heroina; especial, $n. ¡Te damos la bienvenida al mundo de Azeroth y te ofrecemos uno de estos regalos únicos!', 0);
-- 5623 Al servicio de la Luz
-- https://es.classic.wowhead.com/quest=5623
SET @ID := 5623;
DELETE FROM `quest_offer_reward_locale` WHERE `ID` = @ID AND `locale` IN ('esES', 'esMX');
INSERT INTO `quest_offer_reward_locale` (`ID`, `locale`, `RewardText`, `VerifiedBuild`) VALUES
(@ID, 'esES', 'Me alegra que hayas llegado, $n. Tenemos mucho que hablar. Debemos hablar sobre tu futuro y tu camino en la Luz.', 0),
(@ID, 'esMX', 'Me alegra que hayas llegado, $n. Tenemos mucho que hablar. Debemos hablar sobre tu futuro y tu camino en la Luz.', 0);
| gpl-2.0 |
InternetBowser/plutos | u-boot-s805/board/amlogic/configs/m6tv_skt_v1.h | 8361 | #ifndef __CONFIG_M6TV_SKT_V1_H__
#define __CONFIG_M6TV_SKT_V1_H__
#define CONFIG_MACH_MESON6TV_SKT // generate M6TV SKT machid number
//ddrtest and d2pll command support
#define CONFIG_CMD_DDR_TEST 1 //ddrtest & d2pll
//#define TEST_UBOOT_BOOT_SPEND_TIME
//UART Sectoion
#define CONFIG_CONS_INDEX 2
//support "boot,bootd"
//#define CONFIG_CMD_BOOTD 1
//#define CONFIG_AML_I2C 1
//Enable storage devices
//#define CONFIG_CMD_NAND 1 //waiting for nand support
#define CONFIG_CMD_SF 1
#if defined(CONFIG_CMD_SF)
#define CONFIG_AML_MESON_6 1
#define SPI_WRITE_PROTECT 1
#define CONFIG_CMD_MEMORY 1
#endif /*CONFIG_CMD_SF*/
//Amlogic SARADC support
#define CONFIG_SARADC 1
#define CONFIG_EFUSE 1
//#define CONFIG_MACHID_CHECK 1
#define CONFIG_CMD_SUSPEND 1
#define CONFIG_IR_REMOTE 1
#define CONFIG_L2_OFF 1
#define CONFIG_CMD_NET 1
#if defined(CONFIG_CMD_NET)
#define CONFIG_M6 1
#define CONFIG_AML_ETHERNET 1
#define CONFIG_NET_MULTI 1
#define CONFIG_CMD_PING 1
#define CONFIG_CMD_DHCP 1
#define CONFIG_CMD_RARP 1
//#define CONFIG_NET_RGMII
// #define CONFIG_NET_RMII_CLK_EXTERNAL //use external 50MHz clock source
#define CONFIG_AML_ETHERNET 1 /*to link /driver/net/aml_ethernet.c*/
#define CONFIG_HOSTNAME arm_m6tv
#define CONFIG_ETHADDR 00:15:18:01:81:31 /* Ethernet address */
#define CONFIG_IPADDR 10.18.9.97 /* Our ip address */
#define CONFIG_GATEWAYIP 10.18.9.1 /* Our getway ip address */
#define CONFIG_SERVERIP 10.18.9.113 /* Tftp server ip address */
#define CONFIG_NETMASK 255.255.255.0
#endif /* (CONFIG_CMD_NET) */
#define CONFIG_SDIO_B1 1
#define CONFIG_SDIO_A 1
#define CONFIG_SDIO_B 1
#define CONFIG_SDIO_C 1
#define CONFIG_ENABLE_EXT_DEVICE_RETRY 1
#define CONFIG_MMU 1
#define CONFIG_PAGE_OFFSET 0xc0000000
#define CONFIG_SYS_LONGHELP 1
/* USB
* Enable CONFIG_MUSB_HCD for Host functionalities MSC, keyboard
* Enable CONFIG_MUSB_UDD for Device functionalities.
*/
/* #define CONFIG_MUSB_UDC 1 */
#define CONFIG_M6_USBPORT_BASE_A 0xC9040000
#define CONFIG_M6_USBPORT_BASE_B 0xC90C0000
#define CONFIG_M6_USBPORT_BASE_C 0xC9100000
#define CONFIG_M6_USBPORT_BASE_D 0xC9140000
#define CONFIG_USB_STORAGE 1
#define CONFIG_USB_DWC_OTG_HCD 1
#define CONFIG_USB_DWC_OTG_294 1
#define CONFIG_CMD_USB 1
#define CONFIG_MEMSIZE 512 /*unit is MB*/
#if(CONFIG_MEMSIZE == 512)
#define BOARD_INFO_ENV " mem=512M"
#define UBOOTPATH "u-boot-512M-UartB.bin"
#else
#define BOARD_INFO_ENV ""
#define UBOOTPATH "u-boot.bin"
#endif
#define CONFIG_UCL 1
#define CONFIG_SELF_COMPRESS
#define CONFIG_PREBOOT "mw da004004 80000510;mw c81000014 4000;mw c1109900 0"
//#define CONFIG_UBI_SUPPORT
#ifdef CONFIG_UBI_SUPPORT
#define CONFIG_CMD_UBI
#define CONFIG_CMD_UBIFS
#define CONFIG_RBTREE
#define MTDIDS_DEFAULT "nand1=nandflash1\0"
#define MTDPARTS_DEFAULT "mtdparts=nandflash1:256m@168m(system)\0"
#endif
/* Environment information */
#define CONFIG_BOOTDELAY 1
#define CONFIG_BOOTFILE uImage
#define CONFIG_EXTRA_ENV_SETTINGS \
"loadaddr=0x82000000\0" \
"testaddr=0x82400000\0" \
"console=ttyS0,115200n8\0" \
"mmcargs=setenv bootargs console=${console} " \
"boardname=m1_mbox\0" \
"chipname=8726m\0" \
"machid=1124\0" \
"bootargs=init=/init console=ttyS0,115200n8 mem=1024m\0" \
"partnum=2\0" \
"p0start=1000000\0" \
"p0size=400000\0" \
"p0path=uImage\0" \
"p1start=1400000\0" \
"p1size=8000000\0" \
"p1path=android.rootfs\0" \
"bootstart=0\0" \
"bootsize=60000\0" \
"bootpath=u-boot-512M-UartB.bin\0" \
"normalstart=1000000\0" \
"normalsize=400000\0" \
#define CONFIG_BOOTCOMMAND "mmcinfo;fatload mmc 0:1 82000000 uimage;bootm"
#define CONFIG_AUTO_COMPLETE 1
#define CONFIG_SPI_BOOT 1
//#define CONFIG_MMC_BOOT
#ifndef CONFIG_JERRY_NAND_TEST
#define CONFIG_NAND_BOOT 1
#endif
//#ifdef CONFIG_NAND_BOOT
//#define CONFIG_AMLROM_NANDBOOT 1
//#endif
#define CONFIG_ENV_SIZE (64*1024)
#ifdef CONFIG_SPI_BOOT
#define CONFIG_ENV_OVERWRITE
#define CONFIG_ENV_IS_IN_SPI_FLASH
#define CONFIG_CMD_SAVEENV
//for CONFIG_SPI_FLASH_SPANSION 64KB sector size
//#ifdef CONFIG_SPI_FLASH_SPANSION
#define CONFIG_ENV_SECT_SIZE 0x10000
//#else
// #define CONFIG_ENV_SECT_SIZE 0x1000
//#endif
#define CONFIG_ENV_OFFSET 0x1f0000
#elif defined CONFIG_NAND_BOOT
#define CONFIG_ENV_IS_IN_AML_NAND
#define CONFIG_CMD_SAVEENV
#define CONFIG_ENV_OVERWRITE
#define CONFIG_ENV_OFFSET 0x400000
#define CONFIG_ENV_BLOCK_NUM 2
#elif defined CONFIG_MMC_BOOT
#define CONFIG_ENV_IS_IN_MMC
#define CONFIG_CMD_SAVEENV
#define CONFIG_SYS_MMC_ENV_DEV 0
#define CONFIG_ENV_OFFSET 0x1000000
#else
#define CONFIG_ENV_IS_NOWHERE 1
#endif
/*POST support*/
/*
#define CONFIG_POST (CONFIG_SYS_POST_CACHE | CONFIG_SYS_POST_BSPEC1 | \
CONFIG_SYS_POST_RTC | CONFIG_SYS_POST_ADC | \
CONFIG_SYS_POST_PLL)
*/
//CONFIG_SYS_POST_MEMORY
#undef CONFIG_POST
#ifdef CONFIG_POST
#define CONFIG_POST_AML
#define CONFIG_POST_ALT_LIST
#define CONFIG_SYS_CONSOLE_IS_IN_ENV /* Otherwise it catches logbuffer as output */
#define CONFIG_LOGBUFFER
#define CONFIG_CMD_DIAG
#define SYSTEST_INFO_L1 1
#define SYSTEST_INFO_L2 2
#define SYSTEST_INFO_L3 3
#define CONFIG_POST_BSPEC1 { \
"L2CACHE test", \
"l2cache", \
"This test verifies the L2 cache operation.", \
POST_RAM | POST_MANUAL, \
&l2cache_post_test, \
NULL, \
NULL, \
CONFIG_SYS_POST_BSPEC1 \
}
#define CONFIG_POST_BSPEC2 { \
"BIST test", \
"bist", \
"This test checks bist test", \
POST_RAM | POST_MANUAL, \
&bist_post_test, \
NULL, \
NULL, \
CONFIG_SYS_POST_BSPEC1 \
}
#endif /*end ifdef CONFIG_POST*/
//----------------------------------------------------------------------
//Please set the M6TV CPU clock(unit: MHz)
//legal value: 700, 800,900,1000,1200,1296
#define M6TV_CPU_CLK (800)
#define CONFIG_SYS_CPU_CLK (M6TV_CPU_CLK)
//----------------------------------------------------------------------
/*-----------------------------------------------------------------------
* Physical Memory Map
*/
//Please just define m6tv DDR clock here only
//current DDR clock range (408~804)MHz with fixed step 12MHz
#define CFG_M6TV_DDR_CLK (648)
//#define CONFIG_DDR_LOW_POWER 1
//#define M6TV_DDR3_512M
#define M6TV_DDR3_1GB
//above setting will affect following:
//board/amlogic/m6tv_skt_v1/firmware/timming.c
//arch/arm/cpu/aml_meson/m6tv/mmutable.s
//note: please DO NOT remove following check code
#if !defined(M6TV_DDR3_1GB) && !defined(M6TV_DDR3_512M)
#error "Please set DDR3 capacity first in file m6tv_skt_v1.h\n"
#endif
#define CONFIG_M6TV_DUMP_DDR_INFO 1
/***Other MARCO about DDR***/
#define ENABLE_WRITE_LEVELING 1
/***************************/
#define CONFIG_NR_DRAM_BANKS 1 /* CS1 may or may not be populated */
#define PHYS_MEMORY_START 0x80000000 // from 500000
#if defined(M6TV_DDR3_1GB)
#define PHYS_MEMORY_SIZE 0x40000000 // 1GB
#elif defined(M6TV_DDR3_512M)
#define PHYS_MEMORY_SIZE 0x20000000 // 512M
#else
#error "Please define DDR3 memory capacity in file m6tv_skt_v1.h\n"
#endif
#define CONFIG_SYS_MEMTEST_START 0x80000000 /* memtest works on */
#define CONFIG_SYS_MEMTEST_END 0x07000000 /* 0 ... 120 MB in DRAM */
#define CONFIG_ENABLE_MEM_DEVICE_TEST 1
#define CONFIG_NR_DRAM_BANKS 1 /* CS1 may or may not be populated */
//m6 security boot
//#define CONFIG_M6_SECU_BOOT 1
//To build the encrypted uboot with key: aml-rsa-key.rsa
//#define CONFIG_AML_CRYPTO_UBOOT 1
//M6TV L1 cache enable for uboot decompress speed up
#define CONFIG_AML_SPL_L1_CACHE_ON 1
//DDR pre-init setting
//#define CONFIG_AML_DDR_PRESET 1
/*-----------------------------------------------------------------------
* power down
*/
//#define CONFIG_CMD_RUNARC 1 /* runarc */
#define CONFIG_AML_SUSPEND 1
/*
* CPU switch test for uboot
*/
//#define CONFIG_M6_TEST_CPU_SWITCH 1
#endif //__CONFIG_M6_SKT_V1_H__
| gpl-2.0 |
holyangel/LGE_G3 | arch/m68k/platform/68328/ints.c | 4291 | /*
* linux/arch/m68knommu/platform/68328/ints.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Copyright 1996 Roman Zippel
* Copyright 1999 D. Jeff Dionne <[email protected]>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/machdep.h>
#if defined(CONFIG_M68328)
#include <asm/MC68328.h>
#elif defined(CONFIG_M68EZ328)
#include <asm/MC68EZ328.h>
#elif defined(CONFIG_M68VZ328)
#include <asm/MC68VZ328.h>
#endif
/* */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void trap3(void);
asmlinkage void trap4(void);
asmlinkage void trap5(void);
asmlinkage void trap6(void);
asmlinkage void trap7(void);
asmlinkage void trap8(void);
asmlinkage void trap9(void);
asmlinkage void trap10(void);
asmlinkage void trap11(void);
asmlinkage void trap12(void);
asmlinkage void trap13(void);
asmlinkage void trap14(void);
asmlinkage void trap15(void);
asmlinkage void trap33(void);
asmlinkage void trap34(void);
asmlinkage void trap35(void);
asmlinkage void trap36(void);
asmlinkage void trap37(void);
asmlinkage void trap38(void);
asmlinkage void trap39(void);
asmlinkage void trap40(void);
asmlinkage void trap41(void);
asmlinkage void trap42(void);
asmlinkage void trap43(void);
asmlinkage void trap44(void);
asmlinkage void trap45(void);
asmlinkage void trap46(void);
asmlinkage void trap47(void);
asmlinkage irqreturn_t bad_interrupt(int, void *);
asmlinkage irqreturn_t inthandler(void);
asmlinkage irqreturn_t inthandler1(void);
asmlinkage irqreturn_t inthandler2(void);
asmlinkage irqreturn_t inthandler3(void);
asmlinkage irqreturn_t inthandler4(void);
asmlinkage irqreturn_t inthandler5(void);
asmlinkage irqreturn_t inthandler6(void);
asmlinkage irqreturn_t inthandler7(void);
/*
*/
void process_int(int vec, struct pt_regs *fp)
{
int irq;
int mask;
unsigned long pend = ISR;
while (pend) {
if (pend & 0x0000ffff) {
if (pend & 0x000000ff) {
if (pend & 0x0000000f) {
mask = 0x00000001;
irq = 0;
} else {
mask = 0x00000010;
irq = 4;
}
} else {
if (pend & 0x00000f00) {
mask = 0x00000100;
irq = 8;
} else {
mask = 0x00001000;
irq = 12;
}
}
} else {
if (pend & 0x00ff0000) {
if (pend & 0x000f0000) {
mask = 0x00010000;
irq = 16;
} else {
mask = 0x00100000;
irq = 20;
}
} else {
if (pend & 0x0f000000) {
mask = 0x01000000;
irq = 24;
} else {
mask = 0x10000000;
irq = 28;
}
}
}
while (! (mask & pend)) {
mask <<=1;
irq++;
}
do_IRQ(irq, fp);
pend &= ~mask;
}
}
static void intc_irq_unmask(struct irq_data *d)
{
IMR &= ~(1 << d->irq);
}
static void intc_irq_mask(struct irq_data *d)
{
IMR |= (1 << d->irq);
}
static struct irq_chip intc_irq_chip = {
.name = "M68K-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
};
/*
*/
void __init trap_init(void)
{
int i;
/* */
for (i = 72; i < 256; ++i)
_ramvec[i] = (e_vector) bad_interrupt;
_ramvec[32] = system_call;
_ramvec[65] = (e_vector) inthandler1;
_ramvec[66] = (e_vector) inthandler2;
_ramvec[67] = (e_vector) inthandler3;
_ramvec[68] = (e_vector) inthandler4;
_ramvec[69] = (e_vector) inthandler5;
_ramvec[70] = (e_vector) inthandler6;
_ramvec[71] = (e_vector) inthandler7;
}
void __init init_IRQ(void)
{
int i;
IVR = 0x40; /* */
/* */
IMR = ~0;
for (i = 0; (i < NR_IRQS); i++) {
irq_set_chip(i, &intc_irq_chip);
irq_set_handler(i, handle_level_irq);
}
}
| gpl-2.0 |
ironman771/xbmc | xbmc/platform/android/activity/EventLoop.cpp | 4826 | /*
* Copyright (C) 2012-2013 Team XBMC
* http://kodi.tv
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "EventLoop.h"
#include "XBMCApp.h"
#include "AndroidExtra.h"
#include <dlfcn.h>
#define IS_FROM_SOURCE(v, s) ((v & s) == s)
CEventLoop::CEventLoop(android_app* application)
: m_enabled(false),
m_application(application),
m_activityHandler(NULL), m_inputHandler(NULL)
{
if (m_application == NULL)
return;
m_application->userData = this;
m_application->onAppCmd = activityCallback;
m_application->onInputEvent = inputCallback;
}
void CEventLoop::run(IActivityHandler &activityHandler, IInputHandler &inputHandler)
{
int ident;
int events;
struct android_poll_source* source;
m_activityHandler = &activityHandler;
m_inputHandler = &inputHandler;
CXBMCApp::android_printf("CEventLoop: starting event loop");
while (1)
{
// We will block forever waiting for events.
while ((ident = ALooper_pollAll(-1, NULL, &events, (void**)&source)) >= 0)
{
// Process this event.
if (source != NULL)
source->process(m_application, source);
// Check if we are exiting.
if (m_application->destroyRequested)
{
CXBMCApp::android_printf("CEventLoop: we are being destroyed");
return;
}
}
}
}
void CEventLoop::processActivity(int32_t command)
{
switch (command)
{
case APP_CMD_CONFIG_CHANGED:
m_activityHandler->onConfigurationChanged();
break;
case APP_CMD_INIT_WINDOW:
// The window is being shown, get it ready.
m_activityHandler->onCreateWindow(m_application->window);
// set the proper DPI value
m_inputHandler->setDPI(CXBMCApp::GetDPI());
break;
case APP_CMD_WINDOW_RESIZED:
// The window has been resized
m_activityHandler->onResizeWindow();
break;
case APP_CMD_TERM_WINDOW:
// The window is being hidden or closed, clean it up.
m_activityHandler->onDestroyWindow();
break;
case APP_CMD_GAINED_FOCUS:
m_activityHandler->onGainFocus();
break;
case APP_CMD_LOST_FOCUS:
m_activityHandler->onLostFocus();
break;
case APP_CMD_LOW_MEMORY:
m_activityHandler->onLowMemory();
break;
case APP_CMD_START:
m_activityHandler->onStart();
break;
case APP_CMD_RESUME:
m_activityHandler->onResume();
break;
case APP_CMD_SAVE_STATE:
// The system has asked us to save our current state. Do so.
m_activityHandler->onSaveState(&m_application->savedState, &m_application->savedStateSize);
break;
case APP_CMD_PAUSE:
m_activityHandler->onPause();
break;
case APP_CMD_STOP:
m_activityHandler->onStop();
break;
case APP_CMD_DESTROY:
m_activityHandler->onDestroy();
break;
default:
break;
}
}
int32_t CEventLoop::processInput(AInputEvent* event)
{
int32_t rtn = 0;
int32_t type = AInputEvent_getType(event);
int32_t source = AInputEvent_getSource(event);
// handle joystick input
if (IS_FROM_SOURCE(source, AINPUT_SOURCE_GAMEPAD) || IS_FROM_SOURCE(source, AINPUT_SOURCE_JOYSTICK))
{
if (m_inputHandler->onJoyStickEvent(event))
return true;
}
switch(type)
{
case AINPUT_EVENT_TYPE_KEY:
rtn = m_inputHandler->onKeyboardEvent(event);
break;
case AINPUT_EVENT_TYPE_MOTION:
if (IS_FROM_SOURCE(source, AINPUT_SOURCE_TOUCHSCREEN))
rtn = m_inputHandler->onTouchEvent(event);
else if (IS_FROM_SOURCE(source, AINPUT_SOURCE_MOUSE))
rtn = m_inputHandler->onMouseEvent(event);
break;
}
return rtn;
}
void CEventLoop::activityCallback(android_app* application, int32_t command)
{
if (application == NULL || application->userData == NULL)
return;
CEventLoop& eventLoop = *((CEventLoop*)application->userData);
eventLoop.processActivity(command);
}
int32_t CEventLoop::inputCallback(android_app* application, AInputEvent* event)
{
if (application == NULL || application->userData == NULL || event == NULL)
return 0;
CEventLoop& eventLoop = *((CEventLoop*)application->userData);
return eventLoop.processInput(event);
}
| gpl-2.0 |
di0fref/wordpress_fahlslstad | wp-content/themes/twentyfourteen/content-audio.php | 2316 | <?php
/**
* The template for displaying posts in the Audio post format
*
* @package WordPress
* @subpackage Twenty_Fourteen
* @since Twenty Fourteen 1.0
*/
?>
<article id="post-<?php the_ID(); ?>" <?php post_class(); ?>>
<?php twentyfourteen_post_thumbnail(); ?>
<header class="entry-header">
<?php if ( in_array( 'category', get_object_taxonomies( get_post_type() ) ) && twentyfourteen_categorized_blog() ) : ?>
<div class="entry-meta">
<span class="cat-links"><?php echo get_the_category_list( _x( ', ', 'Used between list items, there is a space after the comma.', 'twentyfourteen' ) ); ?></span>
</div><!-- .entry-meta -->
<?php
endif;
if ( is_single() ) :
the_title( '<h1 class="entry-title">', '</h1>' );
else :
the_title( '<h1 class="entry-title"><a href="' . esc_url( get_permalink() ) . '" rel="bookmark">', '</a></h1>' );
endif;
?>
<div class="entry-meta">
<span class="post-format">
<a class="entry-format" href="<?php echo esc_url( get_post_format_link( 'audio' ) ); ?>"><?php echo get_post_format_string( 'audio' ); ?></a>
</span>
<?php twentyfourteen_posted_on(); ?>
<?php if ( ! post_password_required() && ( comments_open() || get_comments_number() ) ) : ?>
<span class="comments-link"><?php comments_popup_link( __( 'Leave a comment', 'twentyfourteen' ), __( '1 Comment', 'twentyfourteen' ), __( '% Comments', 'twentyfourteen' ) ); ?></span>
<?php endif; ?>
<?php edit_post_link( __( 'Edit', 'twentyfourteen' ), '<span class="edit-link">', '</span>' ); ?>
</div><!-- .entry-meta -->
</header><!-- .entry-header -->
<div class="entry-content">
<?php
/* translators: %s: Name of current post */
the_content( sprintf(
esc_html__( 'Continue reading %s', 'twentyfourteen' ),
the_title( '<span class="screen-reader-text">', '</span> <span class="meta-nav">→</span>', false )
) );
wp_link_pages( array(
'before' => '<div class="page-links"><span class="page-links-title">' . __( 'Pages:', 'twentyfourteen' ) . '</span>',
'after' => '</div>',
'link_before' => '<span>',
'link_after' => '</span>',
) );
?>
</div><!-- .entry-content -->
<?php the_tags( '<footer class="entry-meta"><span class="tag-links">', '', '</span></footer>' ); ?>
</article><!-- #post-## -->
| gpl-2.0 |
artynet/openwrt-packages | utils/lxc/Makefile | 6730 | #
# Copyright (C) 2013-2015 OpenWrt.org
# Copyright (C) 2020 Sartura
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=lxc
PKG_VERSION:=4.0.12
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://linuxcontainers.org/downloads/lxc/
PKG_HASH:=db242f8366fc63e8c7588bb2017b354173cf3c4b20abc18780debdc48b14d3ef
PKG_MAINTAINER:=Marko Ratkaj <[email protected]>
PKG_LICENSE:=LGPL-2.1-or-later BSD-2-Clause GPL-2.0
PKG_FIXUP:=autoreconf
PKG_INSTALL:=1
PKG_BUILD_PARALLEL:=1
PKG_USE_MIPS16:=0
include $(INCLUDE_DIR)/package.mk
LXC_APPLETS_BIN += \
attach autostart cgroup copy config console create destroy device \
execute freeze info monitor snapshot start stop unfreeze unshare \
usernsexec wait top ls
LXC_APPLETS_LIB += \
monitord user-nic
LXC_SCRIPTS += \
checkconfig
DEPENDS_APPLETS = +libpthread +libcap +liblxc
DEPENDS_create = +lxc-configs +lxc-hooks +lxc-templates +flock +getopt
define Package/lxc/Default
SECTION:=utils
CATEGORY:=Utilities
TITLE:=LXC userspace tools
URL:=https://linuxcontainers.org/
DEPENDS:=lxc
endef
define Package/lxc
$(call Package/lxc/Default)
DEPENDS:=@!arc
MENU:=1
endef
define Package/lxc-auto
$(call Package/lxc/Default)
TITLE:= (initscript)
DEPENDS+=+lxc-start +lxc-stop
endef
define Package/lxc-auto/description
LXC is the userspace control package for Linux Containers, a lightweight
virtual system mechanism sometimes described as "chroot on steroids".
This package adds an initscript for starting and stopping the containers
on boot and shutdown.
endef
define Package/lxc-auto/conffiles
/etc/config/lxc-auto
endef
define Package/lxc-unprivileged
$(call Package/lxc/Default)
TITLE:=Helper script for unprivileged containers support
DEPENDS+=+shadow-utils +shadow-newuidmap +shadow-newgidmap
endef
define Package/lxc-unprivileged/description
Support for unprivileged containers requires newuidmap and newguidmap.
This package makes sure they are available & have correct permissions.
endef
define Package/lxc-unprivileged/install
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DATA) ./files/lxc-unprivileged.defaults $(1)/etc/uci-defaults/lxc-unprivileged
endef
define Package/lxc/config
source "$(SOURCE)/Config.in"
endef
define Package/lxc/description
LXC is the userspace control package for Linux Containers, a lightweight
virtual system mechanism sometimes described as "chroot on steroids".
endef
define Package/lxc-common
$(call Package/lxc/Default)
TITLE:=LXC common files
endef
define Package/lxc-hooks
$(call Package/lxc/Default)
TITLE:=LXC virtual machine hooks
endef
define Package/lxc-templates
$(call Package/lxc/Default)
TITLE:=LXC virtual machine templates
endef
define Package/lxc-configs
$(call Package/lxc/Default)
TITLE:=LXC virtual machine common config files
endef
define Package/liblxc
$(call Package/lxc/Default)
SECTION:=libs
CATEGORY:=Libraries
TITLE:=LXC userspace library
DEPENDS+= +libcap +libpthread +LXC_SECCOMP:libseccomp +libopenssl
endef
define Package/lxc-init
$(call Package/lxc/Default)
TITLE:=LXC Lua bindings
DEPENDS+= +liblxc
endef
CONFIGURE_ARGS += \
--disable-werror \
--disable-rpath \
--disable-doc \
--disable-api-docs \
--disable-apparmor \
--disable-selinux \
--$(if $(CONFIG_LXC_SECCOMP),en,dis)able-seccomp \
--enable-capabilities \
--disable-examples
ifdef CONFIG_USE_MIPS16
TARGET_CFLAGS += -minterlink-mips16
endif
TARGET_LDFLAGS += -lgcc_eh
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/lxc/
$(CP) \
$(PKG_INSTALL_DIR)/usr/include/lxc/* \
$(1)/usr/include/lxc/
$(INSTALL_DIR) $(1)/usr/lib
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/liblxc.so* \
$(1)/usr/lib/
$(INSTALL_DIR) $(1)/usr/lib/pkgconfig
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/pkgconfig/lxc.pc \
$(1)/usr/lib/pkgconfig/
$(SED) 's,/usr/include,$$$${prefix}/include,g' $(1)/usr/lib/pkgconfig/lxc.pc
$(SED) 's,/usr/lib,$$$${prefix}/lib,g' $(1)/usr/lib/pkgconfig/lxc.pc
endef
define Package/lxc/install
true
endef
define Package/lxc-auto/install
$(INSTALL_DIR) $(1)/etc/config $(1)/etc/init.d
$(INSTALL_CONF) ./files/lxc-auto.config $(1)/etc/config/lxc-auto
$(INSTALL_BIN) ./files/lxc-auto.init $(1)/etc/init.d/lxc-auto
endef
define Package/lxc-common/conffiles
/etc/lxc/default.conf
/etc/lxc/lxc.conf
endef
define Package/lxc-common/install
$(INSTALL_DIR) $(1)/usr/lib/lxc/rootfs
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/lxc/rootfs/README \
$(1)/usr/lib/lxc/rootfs/
$(INSTALL_DIR) $(1)/usr/share/lxc
$(CP) \
$(PKG_INSTALL_DIR)/usr/share/lxc/lxc.functions \
$(1)/usr/share/lxc/
$(INSTALL_DIR) $(1)/etc/lxc/
$(CP) \
$(PKG_INSTALL_DIR)/etc/lxc/default.conf \
$(1)/etc/lxc/default.conf
$(INSTALL_DIR) $(1)/etc/lxc/
$(CP) \
./files/lxc.conf \
$(1)/etc/lxc/lxc.conf
$(INSTALL_DIR) $(1)/srv/lxc/
endef
define Package/lxc-hooks/install
$(INSTALL_DIR) $(1)/usr/share/lxc/hooks
$(CP) \
$(PKG_INSTALL_DIR)/usr/share/lxc/hooks/* \
$(1)/usr/share/lxc/hooks/
endef
define Package/lxc-templates/install
$(INSTALL_DIR) $(1)/usr/share/lxc/templates/
$(CP) \
$(PKG_INSTALL_DIR)/usr/share/lxc/templates/lxc-* \
$(1)/usr/share/lxc/templates/
endef
define Package/lxc-configs/install
$(INSTALL_DIR) $(1)/usr/share/lxc/config/
$(CP) \
$(PKG_INSTALL_DIR)/usr/share/lxc/config/* \
$(1)/usr/share/lxc/config/
endef
define Package/liblxc/install
$(INSTALL_DIR) $(1)/usr/lib/
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/liblxc.so* \
$(1)/usr/lib/
endef
define Package/lxc-init/install
$(INSTALL_DIR) $(1)/sbin
$(CP) \
$(PKG_INSTALL_DIR)/usr/sbin/init.lxc \
$(1)/sbin/
endef
define GenPlugin
define Package/lxc-$(1)
$(call Package/lxc/Default)
TITLE:=Utility lxc-$(1) from the LXC userspace tools
DEPENDS+= +lxc-common $(2) $(DEPENDS_$(1))
endef
define Package/lxc-$(1)/install
$(INSTALL_DIR) $$(1)$(3)
$(INSTALL_BIN) \
$(PKG_INSTALL_DIR)$(3)/lxc-$(1) \
$$(1)$(3)/
endef
$$(eval $$(call BuildPackage,lxc-$(1)))
endef
$(eval $(call BuildPackage,lxc))
$(eval $(call BuildPackage,lxc-common))
$(eval $(call BuildPackage,lxc-hooks))
$(eval $(call BuildPackage,lxc-configs))
$(eval $(call BuildPackage,lxc-templates))
$(eval $(call BuildPackage,liblxc))
$(eval $(call BuildPackage,lxc-init))
$(eval $(call BuildPackage,lxc-auto))
$(eval $(call BuildPackage,lxc-unprivileged))
$(foreach u,$(LXC_APPLETS_BIN),$(eval $(call GenPlugin,$(u),$(DEPENDS_APPLETS),"/usr/bin")))
$(foreach u,$(LXC_APPLETS_LIB),$(eval $(call GenPlugin,$(u),$(DEPENDS_APPLETS),"/usr/lib/lxc")))
$(foreach u,$(LXC_SCRIPTS),$(eval $(call GenPlugin,$(u),,"/usr/bin")))
| gpl-2.0 |
tobiasbuhrer/tobiasb | web/core/modules/node/tests/src/Kernel/Migrate/d6/MigrateNodeDeriverTest.php | 1430 | <?php
namespace Drupal\Tests\node\Kernel\Migrate\d6;
use Drupal\Tests\migrate_drupal\Kernel\d6\MigrateDrupal6TestBase;
/**
* Test D6NodeDeriver.
*
* @group migrate_drupal_6
*/
class MigrateNodeDeriverTest extends MigrateDrupal6TestBase {
/**
* The migration plugin manager.
*
* @var \Drupal\migrate\Plugin\MigrationPluginManagerInterface
*/
protected $pluginManager;
/**
* {@inheritdoc}
*/
public function setUp(): void {
parent::setUp();
$this->pluginManager = $this->container->get('plugin.manager.migration');
}
/**
* Tests node translation migrations with translation disabled.
*/
public function testNoTranslations() {
// Without content_translation, there should be no translation migrations.
$migrations = $this->pluginManager->createInstances('d6_node_translation');
$this->assertSame([], $migrations,
"No node translation migrations without content_translation");
}
/**
* Tests node translation migrations with translation enabled.
*/
public function testTranslations() {
// With content_translation, there should be translation migrations for
// each content type.
$this->enableModules(['language', 'content_translation']);
$this->assertTrue($this->container->get('plugin.manager.migration')->hasDefinition('d6_node_translation:story'), "Node translation migrations exist after content_translation installed");
}
}
| gpl-2.0 |
ruriwo/ErgoThumb072_firmware | tmk_core/protocol/lufa/LUFA-git/Demos/Host/LowLevel/AudioInputHost/AudioInputHost.h | 2746 | /*
LUFA Library
Copyright (C) Dean Camera, 2014.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2014 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
/** \file
*
* Header file for AudioInputHost.c.
*/
#ifndef _AUDIO_INPUT_HOST_H_
#define _AUDIO_INPUT_HOST_H_
/* Includes: */
#include <avr/io.h>
#include <avr/wdt.h>
#include <avr/pgmspace.h>
#include <avr/power.h>
#include <avr/interrupt.h>
#include <stdio.h>
#include <LUFA/Drivers/Misc/TerminalCodes.h>
#include <LUFA/Drivers/USB/USB.h>
#include <LUFA/Drivers/Peripheral/Serial.h>
#include <LUFA/Drivers/Board/LEDs.h>
#include <LUFA/Platform/Platform.h>
#include "ConfigDescriptor.h"
/* Macros: */
/** LED mask for the library LED driver, to indicate that the USB interface is not ready. */
#define LEDMASK_USB_NOTREADY LEDS_LED1
/** LED mask for the library LED driver, to indicate that the USB interface is enumerating. */
#define LEDMASK_USB_ENUMERATING (LEDS_LED2 | LEDS_LED3)
/** LED mask for the library LED driver, to indicate that the USB interface is ready. */
#define LEDMASK_USB_READY (LEDS_LED2 | LEDS_LED4)
/** LED mask for the library LED driver, to indicate that an error has occurred in the USB interface. */
#define LEDMASK_USB_ERROR (LEDS_LED1 | LEDS_LED3)
/* Function Prototypes: */
void SetupHardware(void);
void EVENT_USB_Host_HostError(const uint8_t ErrorCode);
void EVENT_USB_Host_DeviceAttached(void);
void EVENT_USB_Host_DeviceUnattached(void);
void EVENT_USB_Host_DeviceEnumerationFailed(const uint8_t ErrorCode,
const uint8_t SubErrorCode);
void EVENT_USB_Host_DeviceEnumerationComplete(void);
#endif
| gpl-2.0 |
MegaPirateNG/ardupilot-mpng | libraries/GCS_MAVLink/GCS_MAVLink.h | 4158 | // -*- tab-width: 4; Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*-
/// @file GCS_MAVLink.h
/// @brief One size fits all header for MAVLink integration.
#ifndef GCS_MAVLink_h
#define GCS_MAVLink_h
#include <AP_HAL.h>
#include <AP_Param.h>
// we have separate helpers disabled to make it possible
// to select MAVLink 1.0 in the arduino GUI build
#define MAVLINK_SEPARATE_HELPERS
#define MAVLINK_SEND_UART_BYTES(chan, buf, len) comm_send_buffer(chan, buf, len)
// define our own MAVLINK_MESSAGE_CRC() macro to allow it to be put
// into progmem
#define MAVLINK_MESSAGE_CRC(msgid) mavlink_get_message_crc(msgid)
#if CONFIG_HAL_BOARD == HAL_BOARD_APM1 || CONFIG_HAL_BOARD == HAL_BOARD_APM2
#include <util/crc16.h>
#define HAVE_CRC_ACCUMULATE
#endif
#include "include/mavlink/v1.0/ardupilotmega/version.h"
// this allows us to make mavlink_message_t much smaller. It means we
// can't support the largest messages in common.xml, but we don't need
// those for APM
#define MAVLINK_MAX_PAYLOAD_LEN 96
#define MAVLINK_COMM_NUM_BUFFERS 2
#include "include/mavlink/v1.0/mavlink_types.h"
/// MAVLink stream used for HIL interaction
extern AP_HAL::BetterStream *mavlink_comm_0_port;
/// MAVLink stream used for ground control communication
extern AP_HAL::BetterStream *mavlink_comm_1_port;
/// MAVLink system definition
extern mavlink_system_t mavlink_system;
/// Send a byte to the nominated MAVLink channel
///
/// @param chan Channel to send to
/// @param ch Byte to send
///
static inline void comm_send_ch(mavlink_channel_t chan, uint8_t ch)
{
switch(chan) {
case MAVLINK_COMM_0:
mavlink_comm_0_port->write(ch);
break;
case MAVLINK_COMM_1:
mavlink_comm_1_port->write(ch);
break;
default:
break;
}
}
void comm_send_buffer(mavlink_channel_t chan, const uint8_t *buf, uint8_t len);
/// Read a byte from the nominated MAVLink channel
///
/// @param chan Channel to receive on
/// @returns Byte read
///
static inline uint8_t comm_receive_ch(mavlink_channel_t chan)
{
uint8_t data = 0;
switch(chan) {
case MAVLINK_COMM_0:
data = mavlink_comm_0_port->read();
break;
case MAVLINK_COMM_1:
data = mavlink_comm_1_port->read();
break;
default:
break;
}
return data;
}
/// Check for available data on the nominated MAVLink channel
///
/// @param chan Channel to check
/// @returns Number of bytes available
static inline uint16_t comm_get_available(mavlink_channel_t chan)
{
int16_t bytes = 0;
switch(chan) {
case MAVLINK_COMM_0:
bytes = mavlink_comm_0_port->available();
break;
case MAVLINK_COMM_1:
bytes = mavlink_comm_1_port->available();
break;
default:
break;
}
if (bytes == -1) {
return 0;
}
return (uint16_t)bytes;
}
/// Check for available transmit space on the nominated MAVLink channel
///
/// @param chan Channel to check
/// @returns Number of bytes available
static inline uint16_t comm_get_txspace(mavlink_channel_t chan)
{
int16_t ret = 0;
switch(chan) {
case MAVLINK_COMM_0:
ret = mavlink_comm_0_port->txspace();
break;
case MAVLINK_COMM_1:
ret = mavlink_comm_1_port->txspace();
break;
default:
break;
}
if (ret < 0) {
ret = 0;
}
return (uint16_t)ret;
}
#ifdef HAVE_CRC_ACCUMULATE
// use the AVR C library implementation. This is a bit over twice as
// fast as the C version
static inline void crc_accumulate(uint8_t data, uint16_t *crcAccum)
{
*crcAccum = _crc_ccitt_update(*crcAccum, data);
}
#endif
/*
return true if the MAVLink parser is idle, so there is no partly parsed
MAVLink message being processed
*/
bool comm_is_idle(mavlink_channel_t chan);
#define MAVLINK_USE_CONVENIENCE_FUNCTIONS
#include "include/mavlink/v1.0/ardupilotmega/mavlink.h"
uint8_t mavlink_check_target(uint8_t sysid, uint8_t compid);
// return a MAVLink variable type given a AP_Param type
uint8_t mav_var_type(enum ap_var_type t);
// return CRC byte for a mavlink message ID
uint8_t mavlink_get_message_crc(uint8_t msgid);
// severity levels used in STATUSTEXT messages
enum gcs_severity {
SEVERITY_LOW=1,
SEVERITY_MEDIUM,
SEVERITY_HIGH,
SEVERITY_CRITICAL,
SEVERITY_USER_RESPONSE
};
#endif // GCS_MAVLink_h
| gpl-3.0 |
uwehermann/libsigrok | src/hardware/manson-hcs-3xxx/api.c | 11116 | /*
* This file is part of the libsigrok project.
*
* Copyright (C) 2014 Uwe Hermann <[email protected]>
* Copyright (C) 2014 Matthias Heidbrink <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include "protocol.h"
static const uint32_t scanopts[] = {
SR_CONF_CONN,
SR_CONF_SERIALCOMM,
};
static const uint32_t drvopts[] = {
SR_CONF_POWER_SUPPLY,
};
static const uint32_t devopts[] = {
SR_CONF_CONTINUOUS,
SR_CONF_LIMIT_SAMPLES | SR_CONF_GET | SR_CONF_SET,
SR_CONF_LIMIT_MSEC | SR_CONF_GET | SR_CONF_SET,
SR_CONF_VOLTAGE | SR_CONF_GET,
SR_CONF_VOLTAGE_TARGET | SR_CONF_GET | SR_CONF_SET | SR_CONF_LIST,
SR_CONF_CURRENT | SR_CONF_GET,
SR_CONF_CURRENT_LIMIT | SR_CONF_GET | SR_CONF_SET | SR_CONF_LIST,
SR_CONF_ENABLED | SR_CONF_GET | SR_CONF_SET,
};
/* Note: All models have one power supply output only. */
static const struct hcs_model models[] = {
{ MANSON_HCS_3100, "HCS-3100", "3100", { 1, 18, 0.1 }, { 0, 10, 0.10 } },
{ MANSON_HCS_3100, "HCS-3100", "HCS-3100", { 1, 18, 0.1 }, { 0, 10, 0.10 } },
{ MANSON_HCS_3102, "HCS-3102", "3102", { 1, 36, 0.1 }, { 0, 5, 0.01 } },
{ MANSON_HCS_3102, "HCS-3102", "HCS-3102", { 1, 36, 0.1 }, { 0, 5, 0.01 } },
{ MANSON_HCS_3104, "HCS-3104", "3104", { 1, 60, 0.1 }, { 0, 2.5, 0.01 } },
{ MANSON_HCS_3104, "HCS-3104", "HCS-3104", { 1, 60, 0.1 }, { 0, 2.5, 0.01 } },
{ MANSON_HCS_3150, "HCS-3150", "3150", { 1, 18, 0.1 }, { 0, 15, 0.10 } },
{ MANSON_HCS_3150, "HCS-3150", "HCS-3150", { 1, 18, 0.1 }, { 0, 15, 0.10 } },
{ MANSON_HCS_3200, "HCS-3200", "3200", { 1, 18, 0.1 }, { 0, 20, 0.10 } },
{ MANSON_HCS_3200, "HCS-3200", "HCS-3200", { 1, 18, 0.1 }, { 0, 20, 0.10 } },
{ MANSON_HCS_3202, "HCS-3202", "3202", { 1, 36, 0.1 }, { 0, 10, 0.10 } },
{ MANSON_HCS_3202, "HCS-3202", "HCS-3202", { 1, 36, 0.1 }, { 0, 10, 0.10 } },
{ MANSON_HCS_3204, "HCS-3204", "3204", { 1, 60, 0.1 }, { 0, 5, 0.01 } },
{ MANSON_HCS_3204, "HCS-3204", "HCS-3204", { 1, 60, 0.1 }, { 0, 5, 0.01 } },
{ MANSON_HCS_3300, "HCS-3300-USB", "3300", { 1, 16, 0.1 }, { 0, 30, 0.10 } },
{ MANSON_HCS_3300, "HCS-3300-USB", "HCS-3300", { 1, 16, 0.1 }, { 0, 30, 0.10 } },
{ MANSON_HCS_3302, "HCS-3302-USB", "3302", { 1, 32, 0.1 }, { 0, 15, 0.10 } },
{ MANSON_HCS_3302, "HCS-3302-USB", "HCS-3302", { 1, 32, 0.1 }, { 0, 15, 0.10 } },
{ MANSON_HCS_3304, "HCS-3304-USB", "3304", { 1, 60, 0.1 }, { 0, 8, 0.10 } },
{ MANSON_HCS_3304, "HCS-3304-USB", "HCS-3304", { 1, 60, 0.1 }, { 0, 8, 0.10 } },
{ MANSON_HCS_3400, "HCS-3400-USB", "3400", { 1, 16, 0.1 }, { 0, 40, 0.10 } },
{ MANSON_HCS_3400, "HCS-3400-USB", "HCS-3400", { 1, 16, 0.1 }, { 0, 40, 0.10 } },
{ MANSON_HCS_3402, "HCS-3402-USB", "3402", { 1, 32, 0.1 }, { 0, 20, 0.10 } },
{ MANSON_HCS_3402, "HCS-3402-USB", "HCS-3402", { 1, 32, 0.1 }, { 0, 20, 0.10 } },
{ MANSON_HCS_3404, "HCS-3404-USB", "3404", { 1, 60, 0.1 }, { 0, 10, 0.10 } },
{ MANSON_HCS_3404, "HCS-3404-USB", "HCS-3404", { 1, 60, 0.1 }, { 0, 10, 0.10 } },
{ MANSON_HCS_3600, "HCS-3600-USB", "3600", { 1, 16, 0.1 }, { 0, 60, 0.10 } },
{ MANSON_HCS_3600, "HCS-3600-USB", "HCS-3600", { 1, 16, 0.1 }, { 0, 60, 0.10 } },
{ MANSON_HCS_3602, "HCS-3602-USB", "3602", { 1, 32, 0.1 }, { 0, 30, 0.10 } },
{ MANSON_HCS_3602, "HCS-3602-USB", "HCS-3602", { 1, 32, 0.1 }, { 0, 30, 0.10 } },
{ MANSON_HCS_3604, "HCS-3604-USB", "3604", { 1, 60, 0.1 }, { 0, 15, 0.10 } },
{ MANSON_HCS_3604, "HCS-3604-USB", "HCS-3604", { 1, 60, 0.1 }, { 0, 15, 0.10 } },
ALL_ZERO
};
static GSList *scan(struct sr_dev_driver *di, GSList *options)
{
int i, model_id;
struct dev_context *devc;
struct sr_dev_inst *sdi;
struct sr_config *src;
GSList *l;
const char *conn, *serialcomm;
struct sr_serial_dev_inst *serial;
char reply[50], **tokens, *dummy;
conn = NULL;
serialcomm = NULL;
devc = NULL;
for (l = options; l; l = l->next) {
src = l->data;
switch (src->key) {
case SR_CONF_CONN:
conn = g_variant_get_string(src->data, NULL);
break;
case SR_CONF_SERIALCOMM:
serialcomm = g_variant_get_string(src->data, NULL);
break;
default:
sr_err("Unknown option %d, skipping.", src->key);
break;
}
}
if (!conn)
return NULL;
if (!serialcomm)
serialcomm = "9600/8n1";
serial = sr_serial_dev_inst_new(conn, serialcomm);
if (serial_open(serial, SERIAL_RDWR) != SR_OK)
return NULL;
sr_info("Probing serial port %s.", conn);
/* Get the device model. */
memset(&reply, 0, sizeof(reply));
if ((hcs_send_cmd(serial, "GMOD\r") < 0) ||
(hcs_read_reply(serial, 2, reply, sizeof(reply)) < 0))
return NULL;
tokens = g_strsplit((const gchar *)&reply, "\r", 2);
model_id = -1;
for (i = 0; models[i].id != NULL; i++) {
if (!strcmp(models[i].id, tokens[0]))
model_id = i;
}
if (model_id < 0) {
sr_err("Unknown model ID '%s' detected, aborting.", tokens[0]);
g_strfreev(tokens);
return NULL;
}
g_strfreev(tokens);
sdi = g_malloc0(sizeof(struct sr_dev_inst));
sdi->status = SR_ST_INACTIVE;
sdi->vendor = g_strdup("Manson");
sdi->model = g_strdup(models[model_id].name);
sdi->inst_type = SR_INST_SERIAL;
sdi->conn = serial;
sr_channel_new(sdi, 0, SR_CHANNEL_ANALOG, TRUE, "CH1");
devc = g_malloc0(sizeof(struct dev_context));
sr_sw_limits_init(&devc->limits);
devc->model = &models[model_id];
sdi->priv = devc;
/* Get current voltage, current, status. */
if ((hcs_send_cmd(serial, "GETD\r") < 0) ||
(hcs_read_reply(serial, 2, reply, sizeof(reply)) < 0))
goto exit_err;
tokens = g_strsplit((const gchar *)&reply, "\r", 2);
if (hcs_parse_volt_curr_mode(sdi, tokens) < 0) {
g_strfreev(tokens);
goto exit_err;
}
g_strfreev(tokens);
/* Get max. voltage and current. */
if ((hcs_send_cmd(serial, "GMAX\r") < 0) ||
(hcs_read_reply(serial, 2, reply, sizeof(reply)) < 0))
goto exit_err;
tokens = g_strsplit((const gchar *)&reply, "\r", 2);
devc->current_max_device = g_strtod(&tokens[0][3], &dummy) * devc->model->current[2];
tokens[0][3] = '\0';
devc->voltage_max_device = g_strtod(tokens[0], &dummy) * devc->model->voltage[2];
g_strfreev(tokens);
serial_close(serial);
return std_scan_complete(di, g_slist_append(NULL, sdi));
exit_err:
sr_dev_inst_free(sdi);
g_free(devc);
return NULL;
}
static int config_get(uint32_t key, GVariant **data,
const struct sr_dev_inst *sdi, const struct sr_channel_group *cg)
{
struct dev_context *devc;
(void)cg;
if (!sdi)
return SR_ERR_ARG;
devc = sdi->priv;
switch (key) {
case SR_CONF_LIMIT_SAMPLES:
case SR_CONF_LIMIT_MSEC:
return sr_sw_limits_config_get(&devc->limits, key, data);
case SR_CONF_VOLTAGE:
*data = g_variant_new_double(devc->voltage);
break;
case SR_CONF_VOLTAGE_TARGET:
*data = g_variant_new_double(devc->voltage_max);
break;
case SR_CONF_CURRENT:
*data = g_variant_new_double(devc->current);
break;
case SR_CONF_CURRENT_LIMIT:
*data = g_variant_new_double(devc->current_max);
break;
case SR_CONF_ENABLED:
*data = g_variant_new_boolean(devc->output_enabled);
break;
default:
return SR_ERR_NA;
}
return SR_OK;
}
static int config_set(uint32_t key, GVariant *data,
const struct sr_dev_inst *sdi, const struct sr_channel_group *cg)
{
struct dev_context *devc;
gboolean bval;
gdouble dval;
(void)cg;
devc = sdi->priv;
switch (key) {
case SR_CONF_LIMIT_MSEC:
case SR_CONF_LIMIT_SAMPLES:
return sr_sw_limits_config_set(&devc->limits, key, data);
case SR_CONF_VOLTAGE_TARGET:
dval = g_variant_get_double(data);
if (dval < devc->model->voltage[0] || dval > devc->voltage_max_device)
return SR_ERR_ARG;
if ((hcs_send_cmd(sdi->conn, "VOLT%03.0f\r",
(dval / devc->model->voltage[2])) < 0) ||
(hcs_read_reply(sdi->conn, 1, devc->buf, sizeof(devc->buf)) < 0))
return SR_ERR;
devc->voltage_max = dval;
break;
case SR_CONF_CURRENT_LIMIT:
dval = g_variant_get_double(data);
if (dval < devc->model->current[0] || dval > devc->current_max_device)
return SR_ERR_ARG;
if ((hcs_send_cmd(sdi->conn, "CURR%03.0f\r",
(dval / devc->model->current[2])) < 0) ||
(hcs_read_reply(sdi->conn, 1, devc->buf, sizeof(devc->buf)) < 0))
return SR_ERR;
devc->current_max = dval;
break;
case SR_CONF_ENABLED:
bval = g_variant_get_boolean(data);
if (hcs_send_cmd(sdi->conn, "SOUT%1d\r", !bval) < 0) {
sr_err("Could not send SR_CONF_ENABLED command.");
return SR_ERR;
}
if (hcs_read_reply(sdi->conn, 1, devc->buf, sizeof(devc->buf)) < 0) {
sr_err("Could not read SR_CONF_ENABLED reply.");
return SR_ERR;
}
devc->output_enabled = bval;
break;
default:
return SR_ERR_NA;
}
return SR_OK;
}
static int config_list(uint32_t key, GVariant **data,
const struct sr_dev_inst *sdi, const struct sr_channel_group *cg)
{
const double *a;
struct dev_context *devc;
devc = (sdi) ? sdi->priv : NULL;
switch (key) {
case SR_CONF_SCAN_OPTIONS:
case SR_CONF_DEVICE_OPTIONS:
return STD_CONFIG_LIST(key, data, sdi, cg, scanopts, drvopts, devopts);
case SR_CONF_VOLTAGE_TARGET:
if (!devc || !devc->model)
return SR_ERR_ARG;
a = devc->model->voltage;
*data = std_gvar_min_max_step(a[0], devc->voltage_max_device, a[2]);
break;
case SR_CONF_CURRENT_LIMIT:
if (!devc || !devc->model)
return SR_ERR_ARG;
a = devc->model->current;
*data = std_gvar_min_max_step(a[0], devc->current_max_device, a[2]);
break;
default:
return SR_ERR_NA;
}
return SR_OK;
}
static int dev_acquisition_start(const struct sr_dev_inst *sdi)
{
struct dev_context *devc;
struct sr_serial_dev_inst *serial;
devc = sdi->priv;
sr_sw_limits_acquisition_start(&devc->limits);
std_session_send_df_header(sdi);
devc->reply_pending = FALSE;
devc->req_sent_at = 0;
serial = sdi->conn;
serial_source_add(sdi->session, serial, G_IO_IN, 10,
hcs_receive_data, (void *)sdi);
return SR_OK;
}
static struct sr_dev_driver manson_hcs_3xxx_driver_info = {
.name = "manson-hcs-3xxx",
.longname = "Manson HCS-3xxx",
.api_version = 1,
.init = std_init,
.cleanup = std_cleanup,
.scan = scan,
.dev_list = std_dev_list,
.dev_clear = std_dev_clear,
.config_get = config_get,
.config_set = config_set,
.config_list = config_list,
.dev_open = std_serial_dev_open,
.dev_close = std_serial_dev_close,
.dev_acquisition_start = dev_acquisition_start,
.dev_acquisition_stop = std_serial_dev_acquisition_stop,
.context = NULL,
};
SR_REGISTER_DEV_DRIVER(manson_hcs_3xxx_driver_info);
| gpl-3.0 |
Rana-TheCodeWizard/Koha | help.pl | 2292 | #!/usr/bin/perl
# Copyright 2010 Koha Development team
#
# This file is part of Koha.
#
# Koha is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Koha is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Koha; if not, see <http://www.gnu.org/licenses>.
use strict;
use warnings;
use C4::Templates;
use C4::Output;
# use C4::Auth;
use C4::Context;
use CGI qw ( -utf8 );
sub _help_template_file_of_url {
my $url = shift;
my $file;
if ($url =~ /koha\/(.*)\.pl/) {
$file = $1;
} else {
$file = 'mainpage';
}
$file =~ s/[^a-zA-Z0-9_\-\/]*//g;
return "help/$file.tt";
}
my $query = new CGI;
# find the script that called the online help using the CGI referer()
our $refer = $query->param('url');
$refer = $query->referer() if !$refer || $refer eq 'undefined';
my $from = _help_template_file_of_url($refer);
my $htdocs = C4::Context->config('intrahtdocs');
#
# checking that the help file exist, otherwise, display nohelp.tt page
#
my ( $theme, $lang ) = C4::Templates::themelanguage( $htdocs, $from, "intranet", $query );
unless ( -e "$htdocs/$theme/$lang/modules/$from" ) {
$from = "help/nohelp.tt";
( $theme, $lang ) = C4::Templates::themelanguage( $htdocs, $from, "intranet", $query );
}
my $template = C4::Templates::gettemplate($from, 'intranet', $query);
$template->param(
referer => $refer,
intranetstylesheet => C4::Context->preference("intranetstylesheet"),
intranetcolorstylesheet => C4::Context->preference("intranetcolorstylesheet"),
);
my $help_version = C4::Context->preference("Version");
if ( $help_version =~ m|^(\d+)\.(\d{2}).*$| ) {
my $version = $1;
my $major = $2;
if ( $major % 2 ) { $major-- };
$help_version = "$version.$major";
}
$template->param( helpVersion => $help_version );
output_html_with_http_headers $query, "", $template->output;
| gpl-3.0 |
ubc/learninglocker | app/views/system/password/reset.blade.php | 953 | @extends('layouts.loggedout')
@section('content')
@if(Session::has('error'))
<div class="clearfix">
<div class="alert alert-danger">
{{ Session::get('error') }}
</div>
</div>
@endif
<h1 class="col-sm-12">{{ trans('reminders.password_reset') }}</h1>
{{ Form::open(array('route' => array('password.update'))) }}
<p>{{ Form::label('email', 'Email') }}
{{ Form::text('email','',array('class' => 'form-control', 'required' => true)) }}</p>
<p>{{ Form::label('password', 'Password') }}
{{ Form::password('password',array('class' => 'form-control', 'required' => true)) }}</p>
<p>{{ Form::label('password_confirmation', 'Password confirm') }}
{{ Form::password('password_confirmation',array('class' => 'form-control', 'required' => true)) }}</p>
{{ Form::hidden('token', $token) }}
<p>{{ Form::submit('Submit',array('class' => 'btn btn-primary')) }}</p>
{{ Form::close() }}
@stop | gpl-3.0 |
anryko/ansible | lib/ansible/modules/cloud/google/gcp_redis_instance_info.py | 9179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_redis_instance_info
description:
- Gather info for GCP Instance
short_description: Gather info for GCP Instance
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
region:
description:
- The name of the Redis region of the instance.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on an instance
gcp_redis_instance_info:
region: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
alternativeLocationId:
description:
- Only applicable to STANDARD_HA tier which protects the instance against zonal
failures by provisioning it across two zones.
- If provided, it must be a different zone from the one provided in [locationId].
returned: success
type: str
authorizedNetwork:
description:
- The full name of the Google Compute Engine network to which the instance is
connected. If left unspecified, the default network will be used.
returned: success
type: str
createTime:
description:
- The time the instance was created in RFC3339 UTC "Zulu" format, accurate to
nanoseconds.
returned: success
type: str
currentLocationId:
description:
- The current zone where the Redis endpoint is placed.
- For Basic Tier instances, this will always be the same as the [locationId]
provided by the user at creation time. For Standard Tier instances, this can
be either [locationId] or [alternativeLocationId] and can change after a failover
event.
returned: success
type: str
displayName:
description:
- An arbitrary and optional user-provided name for the instance.
returned: success
type: str
host:
description:
- Hostname or IP address of the exposed Redis endpoint used by clients to connect
to the service.
returned: success
type: str
labels:
description:
- Resource labels to represent user provided metadata.
returned: success
type: dict
redisConfigs:
description:
- Redis configuration parameters, according to U(http://redis.io/topics/config).
- 'Please check Memorystore documentation for the list of supported parameters:
U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs)
.'
returned: success
type: dict
locationId:
description:
- The zone where the instance will be provisioned. If not provided, the service
will choose a zone for the instance. For STANDARD_HA tier, instances will
be created across two zones for protection against zonal failures. If [alternativeLocationId]
is also provided, it must be different from [locationId].
returned: success
type: str
name:
description:
- The ID of the instance or a fully qualified identifier for the instance.
returned: success
type: str
memorySizeGb:
description:
- Redis memory size in GiB.
returned: success
type: int
port:
description:
- The port number of the exposed Redis endpoint.
returned: success
type: int
redisVersion:
description:
- 'The version of Redis software. If not provided, latest supported version
will be used. Currently, the supported values are: - REDIS_4_0 for Redis 4.0
compatibility - REDIS_3_2 for Redis 3.2 compatibility .'
returned: success
type: str
reservedIpRange:
description:
- The CIDR range of internal addresses that are reserved for this instance.
If not provided, the service will choose an unused /29 block, for example,
10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with
existing subnets in an authorized network.
returned: success
type: str
tier:
description:
- 'The service tier of the instance. Must be one of these values: - BASIC: standalone
instance - STANDARD_HA: highly available primary/replica instances .'
returned: success
type: str
region:
description:
- The name of the Redis region of the instance.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(region=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'redis')
return auth.list(link, return_if_object, array_name='instances')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
xavilal/moodle | mod/facetoface/lang/en_utf8/help/facetoface/allowoverbook.html | 365 | <h1>Allow overbooking</h1>
<p>When "Allow overbooking" is checked, learners will be able to sign up for a face-to-face session even if it is already full.</p>
<p>When a learner signs up for a session that is already full, they will receive an email advising that they have been waitlisted for the session and will be notified when a booking becomes available.</p> | gpl-3.0 |
TheCrowsJoker/mahara | htdocs/lib/htmlpurifier/HTMLPurifier/URIFilter/SafeIframe.php | 1996 | <?php
/**
* Implements safety checks for safe iframes.
*
* @warning This filter is *critical* for ensuring that %HTML.SafeIframe
* works safely.
*/
class HTMLPurifier_URIFilter_SafeIframe extends HTMLPurifier_URIFilter
{
/**
* @type string
*/
public $name = 'SafeIframe';
/**
* @type bool
*/
public $always_load = true;
/**
* @type string
*/
protected $regexp = null;
// XXX: The not so good bit about how this is all set up now is we
// can't check HTML.SafeIframe in the 'prepare' step: we have to
// defer till the actual filtering.
/**
* @param HTMLPurifier_Config $config
* @return bool
*/
public function prepare($config)
{
$this->regexp = $config->get('URI.SafeIframeRegexp');
return true;
}
/**
* @param HTMLPurifier_URI $uri
* @param HTMLPurifier_Config $config
* @param HTMLPurifier_Context $context
* @return bool
*/
public function filter(&$uri, $config, $context)
{
// check if filter not applicable
if (!$config->get('HTML.SafeIframe')) {
return true;
}
// check if the filter should actually trigger
if (!$context->get('EmbeddedURI', true)) {
return true;
}
$token = $context->get('CurrentToken', true);
if (!($token && $token->name == 'iframe')) {
return true;
}
// check if we actually have some whitelists enabled
if ($this->regexp === null) {
return false;
}
// actually check the whitelists
if (!preg_match($this->regexp, $uri->toString())) {
return false;
}
// Make sure that if we're an HTTPS site, the iframe is also HTTPS
if (is_https() && $uri->scheme == 'http') {
// Convert it to a protocol-relative URL
$uri->scheme = null;
}
return $uri;
}
}
// vim: et sw=4 sts=4
| gpl-3.0 |
wrgeorge1983/librenms | sql-schema/151.sql | 135 | INSERT INTO config VALUES ('','alert.transports.msteams.url','','','Microsoft Teams Webhook URL','alerting',0, 'transports', 0, 0, 0);
| gpl-3.0 |
zerodowned/JustUO | Scripts/Spells/Base/Spell.cs | 28192 | #region References
using System;
using System.Collections.Generic;
using Server.Engines.ConPVP;
using Server.Items;
using Server.Misc;
using Server.Mobiles;
using Server.Network;
using Server.Spells.Bushido;
using Server.Spells.Necromancy;
using Server.Spells.Ninjitsu;
using Server.Spells.Second;
using Server.Spells.Spellweaving;
using Server.Targeting;
#endregion
namespace Server.Spells
{
public abstract class Spell : ISpell
{
private readonly Mobile m_Caster;
private readonly Item m_Scroll;
private readonly SpellInfo m_Info;
private SpellState m_State;
private long m_StartCastTime;
public SpellState State { get { return m_State; } set { m_State = value; } }
public Mobile Caster { get { return m_Caster; } }
public SpellInfo Info { get { return m_Info; } }
public string Name { get { return m_Info.Name; } }
public string Mantra { get { return m_Info.Mantra; } }
public Type[] Reagents { get { return m_Info.Reagents; } }
public Item Scroll { get { return m_Scroll; } }
public long StartCastTime { get { return m_StartCastTime; } }
private static readonly TimeSpan NextSpellDelay = TimeSpan.FromSeconds(0.75);
private static TimeSpan AnimateDelay = TimeSpan.FromSeconds(1.5);
public virtual SkillName CastSkill { get { return SkillName.Magery; } }
public virtual SkillName DamageSkill { get { return SkillName.EvalInt; } }
public virtual bool RevealOnCast { get { return true; } }
public virtual bool ClearHandsOnCast { get { return true; } }
public virtual bool ShowHandMovement { get { return true; } }
public virtual bool DelayedDamage { get { return false; } }
public virtual bool DelayedDamageStacking { get { return true; } }
//In reality, it's ANY delayed Damage spell Post-AoS that can't stack, but, only
//Expo & Magic Arrow have enough delay and a short enough cast time to bring up
//the possibility of stacking 'em. Note that a MA & an Explosion will stack, but
//of course, two MA's won't.
private static readonly Dictionary<Type, DelayedDamageContextWrapper> m_ContextTable =
new Dictionary<Type, DelayedDamageContextWrapper>();
private class DelayedDamageContextWrapper
{
private readonly Dictionary<Mobile, Timer> m_Contexts = new Dictionary<Mobile, Timer>();
public void Add(Mobile m, Timer t)
{
Timer oldTimer;
if (m_Contexts.TryGetValue(m, out oldTimer))
{
oldTimer.Stop();
m_Contexts.Remove(m);
}
m_Contexts.Add(m, t);
}
public void Remove(Mobile m)
{
m_Contexts.Remove(m);
}
}
public void StartDelayedDamageContext(Mobile m, Timer t)
{
if (DelayedDamageStacking)
{
return; //Sanity
}
DelayedDamageContextWrapper contexts;
if (!m_ContextTable.TryGetValue(GetType(), out contexts))
{
contexts = new DelayedDamageContextWrapper();
m_ContextTable.Add(GetType(), contexts);
}
contexts.Add(m, t);
}
public void RemoveDelayedDamageContext(Mobile m)
{
DelayedDamageContextWrapper contexts;
if (!m_ContextTable.TryGetValue(GetType(), out contexts))
{
return;
}
contexts.Remove(m);
}
public void HarmfulSpell(Mobile m)
{
if (m is BaseCreature)
{
((BaseCreature)m).OnHarmfulSpell(m_Caster);
}
}
public Spell(Mobile caster, Item scroll, SpellInfo info)
{
m_Caster = caster;
m_Scroll = scroll;
m_Info = info;
}
public virtual int GetNewAosDamage(int bonus, int dice, int sides, Mobile singleTarget)
{
if (singleTarget != null)
{
return GetNewAosDamage(bonus, dice, sides, (Caster.Player && singleTarget.Player), GetDamageScalar(singleTarget));
}
else
{
return GetNewAosDamage(bonus, dice, sides, false);
}
}
public virtual int GetNewAosDamage(int bonus, int dice, int sides, bool playerVsPlayer)
{
return GetNewAosDamage(bonus, dice, sides, playerVsPlayer, 1.0);
}
public virtual int GetNewAosDamage(int bonus, int dice, int sides, bool playerVsPlayer, double scalar)
{
int damage = Utility.Dice(dice, sides, bonus) * 100;
int damageBonus = 0;
int inscribeSkill = GetInscribeFixed(m_Caster);
int inscribeBonus = (inscribeSkill + (1000 * (inscribeSkill / 1000))) / 200;
damageBonus += inscribeBonus;
int intBonus = Caster.Int / 10;
damageBonus += intBonus;
int sdiBonus = AosAttributes.GetValue(m_Caster, AosAttribute.SpellDamage);
#region Mondain's Legacy
sdiBonus += ArcaneEmpowermentSpell.GetSpellBonus(m_Caster, playerVsPlayer);
#endregion
// PvP spell damage increase cap of 15% from an items magic property, 30% if spell school focused.
if (playerVsPlayer)
{
if (SpellHelper.HasSpellMastery(m_Caster) && sdiBonus > 30)
{
sdiBonus = 30;
}
if (!SpellHelper.HasSpellMastery(m_Caster) && sdiBonus > 15)
{
sdiBonus = 15;
}
}
damageBonus += sdiBonus;
TransformContext context = TransformationSpellHelper.GetContext(Caster);
if (context != null && context.Spell is ReaperFormSpell)
{
damageBonus += ((ReaperFormSpell)context.Spell).SpellDamageBonus;
}
damage = AOS.Scale(damage, 100 + damageBonus);
int evalSkill = GetDamageFixed(m_Caster);
int evalScale = 30 + ((9 * evalSkill) / 100);
damage = AOS.Scale(damage, evalScale);
damage = AOS.Scale(damage, (int)(scalar * 100));
return damage / 100;
}
public virtual bool IsCasting { get { return m_State == SpellState.Casting; } }
public virtual void OnCasterHurt()
{
//Confirm: Monsters and pets cannot be disturbed.
if (!Caster.Player)
{
return;
}
if (IsCasting)
{
object o = ProtectionSpell.Registry[m_Caster];
bool disturb = true;
if (o != null && o is double)
{
if (((double)o) > Utility.RandomDouble() * 100.0)
{
disturb = false;
}
}
#region Stygian Abyss
int focus = SAAbsorptionAttributes.GetValue(Caster, SAAbsorptionAttribute.CastingFocus);
if (focus > 0)
{
if (focus > 30)
{
focus = 30;
}
if (focus > Utility.Random(100))
{
disturb = false;
Caster.SendLocalizedMessage(1113690); // You regain your focus and continue casting the spell.
}
}
#endregion
if (disturb)
{
Disturb(DisturbType.Hurt, false, true);
}
}
}
public virtual void OnCasterKilled()
{
Disturb(DisturbType.Kill);
}
public virtual void OnConnectionChanged()
{
FinishSequence();
}
public virtual bool OnCasterMoving(Direction d)
{
if (IsCasting && BlocksMovement)
{
m_Caster.SendLocalizedMessage(500111); // You are frozen and can not move.
return false;
}
return true;
}
public virtual bool OnCasterEquiping(Item item)
{
if (IsCasting)
{
Disturb(DisturbType.EquipRequest);
}
return true;
}
public virtual bool OnCasterUsingObject(object o)
{
if (m_State == SpellState.Sequencing)
{
Disturb(DisturbType.UseRequest);
}
return true;
}
public virtual bool OnCastInTown(Region r)
{
return m_Info.AllowTown;
}
public virtual bool ConsumeReagents()
{
if (m_Caster.AccessLevel >= AccessLevel.Counselor)
return true;
if (m_Scroll != null || !m_Caster.Player)
{
return true;
}
if (AosAttributes.GetValue(m_Caster, AosAttribute.LowerRegCost) > Utility.Random(100))
{
return true;
}
if (DuelContext.IsFreeConsume(m_Caster))
{
return true;
}
Container pack = m_Caster.Backpack;
if (pack == null)
{
return false;
}
if (pack.ConsumeTotal(m_Info.Reagents, m_Info.Amounts) == -1)
{
return true;
}
return false;
}
public virtual double GetInscribeSkill(Mobile m)
{
// There is no chance to gain
// m.CheckSkill( SkillName.Inscribe, 0.0, 120.0 );
return m.Skills[SkillName.Inscribe].Value;
}
public virtual int GetInscribeFixed(Mobile m)
{
// There is no chance to gain
// m.CheckSkill( SkillName.Inscribe, 0.0, 120.0 );
return m.Skills[SkillName.Inscribe].Fixed;
}
public virtual int GetDamageFixed(Mobile m)
{
//m.CheckSkill( DamageSkill, 0.0, m.Skills[DamageSkill].Cap );
return m.Skills[DamageSkill].Fixed;
}
public virtual double GetDamageSkill(Mobile m)
{
//m.CheckSkill( DamageSkill, 0.0, m.Skills[DamageSkill].Cap );
return m.Skills[DamageSkill].Value;
}
public virtual double GetResistSkill(Mobile m)
{
return m.Skills[SkillName.MagicResist].Value;
}
public virtual double GetDamageScalar(Mobile target)
{
double scalar = 1.0;
if (!Core.AOS) //EvalInt stuff for AoS is handled elsewhere
{
double casterEI = m_Caster.Skills[DamageSkill].Value;
double targetRS = target.Skills[SkillName.MagicResist].Value;
/*
if( Core.AOS )
targetRS = 0;
*/
//m_Caster.CheckSkill( DamageSkill, 0.0, 120.0 );
if (casterEI > targetRS)
{
scalar = (1.0 + ((casterEI - targetRS) / 500.0));
}
else
{
scalar = (1.0 + ((casterEI - targetRS) / 200.0));
}
// magery damage bonus, -25% at 0 skill, +0% at 100 skill, +5% at 120 skill
scalar += (m_Caster.Skills[CastSkill].Value - 100.0) / 400.0;
if (!target.Player && !target.Body.IsHuman /*&& !Core.AOS*/)
{
scalar *= 2.0; // Double magery damage to monsters/animals if not AOS
}
}
if (target is BaseCreature)
{
((BaseCreature)target).AlterDamageScalarFrom(m_Caster, ref scalar);
}
if (m_Caster is BaseCreature)
{
((BaseCreature)m_Caster).AlterDamageScalarTo(target, ref scalar);
}
if (Core.SE)
{
scalar *= GetSlayerDamageScalar(target);
}
target.Region.SpellDamageScalar(m_Caster, target, ref scalar);
if (Evasion.CheckSpellEvasion(target)) //Only single target spells an be evaded
{
scalar = 0;
}
return scalar;
}
public virtual double GetSlayerDamageScalar(Mobile defender)
{
Spellbook atkBook = Spellbook.FindEquippedSpellbook(m_Caster);
double scalar = 1.0;
if (atkBook != null)
{
SlayerEntry atkSlayer = SlayerGroup.GetEntryByName(atkBook.Slayer);
SlayerEntry atkSlayer2 = SlayerGroup.GetEntryByName(atkBook.Slayer2);
if (atkSlayer != null && atkSlayer.Slays(defender) || atkSlayer2 != null && atkSlayer2.Slays(defender))
{
defender.FixedEffect(0x37B9, 10, 5); //TODO: Confirm this displays on OSIs
scalar = 2.0;
}
TransformContext context = TransformationSpellHelper.GetContext(defender);
if ((atkBook.Slayer == SlayerName.Silver || atkBook.Slayer2 == SlayerName.Silver) && context != null &&
context.Type != typeof(HorrificBeastSpell))
{
scalar += .25; // Every necromancer transformation other than horrific beast take an additional 25% damage
}
if (scalar != 1.0)
{
return scalar;
}
}
ISlayer defISlayer = Spellbook.FindEquippedSpellbook(defender);
if (defISlayer == null)
{
defISlayer = defender.Weapon as ISlayer;
}
if (defISlayer != null)
{
SlayerEntry defSlayer = SlayerGroup.GetEntryByName(defISlayer.Slayer);
SlayerEntry defSlayer2 = SlayerGroup.GetEntryByName(defISlayer.Slayer2);
if (defSlayer != null && defSlayer.Group.OppositionSuperSlays(m_Caster) ||
defSlayer2 != null && defSlayer2.Group.OppositionSuperSlays(m_Caster))
{
scalar = 2.0;
}
}
return scalar;
}
public virtual void DoFizzle()
{
m_Caster.LocalOverheadMessage(MessageType.Regular, 0x3B2, 502632); // The spell fizzles.
if (m_Caster.Player)
{
if (Core.AOS)
{
m_Caster.FixedParticles(0x3735, 1, 30, 9503, EffectLayer.Waist);
}
else
{
m_Caster.FixedEffect(0x3735, 6, 30);
}
m_Caster.PlaySound(0x5C);
}
}
private CastTimer m_CastTimer;
private AnimTimer m_AnimTimer;
public void Disturb(DisturbType type)
{
Disturb(type, true, false);
}
public virtual bool CheckDisturb(DisturbType type, bool firstCircle, bool resistable)
{
if (resistable && m_Scroll is BaseWand)
{
return false;
}
return true;
}
public void Disturb(DisturbType type, bool firstCircle, bool resistable)
{
if (!CheckDisturb(type, firstCircle, resistable))
{
return;
}
if (m_State == SpellState.Casting)
{
if (!firstCircle && !Core.AOS && this is MagerySpell && ((MagerySpell)this).Circle == SpellCircle.First)
{
return;
}
m_State = SpellState.None;
m_Caster.Spell = null;
OnDisturb(type, true);
if (m_CastTimer != null)
{
m_CastTimer.Stop();
}
if (m_AnimTimer != null)
{
m_AnimTimer.Stop();
}
if (Core.AOS && m_Caster.Player && type == DisturbType.Hurt)
{
DoHurtFizzle();
}
m_Caster.NextSpellTime = Core.TickCount + (int)GetDisturbRecovery().TotalMilliseconds;
}
else if (m_State == SpellState.Sequencing)
{
if (!firstCircle && !Core.AOS && this is MagerySpell && ((MagerySpell)this).Circle == SpellCircle.First)
{
return;
}
m_State = SpellState.None;
m_Caster.Spell = null;
OnDisturb(type, false);
Target.Cancel(m_Caster);
if (Core.AOS && m_Caster.Player && type == DisturbType.Hurt)
{
DoHurtFizzle();
}
}
}
public virtual void DoHurtFizzle()
{
m_Caster.FixedEffect(0x3735, 6, 30);
m_Caster.PlaySound(0x5C);
}
public virtual void OnDisturb(DisturbType type, bool message)
{
if (message)
{
m_Caster.SendLocalizedMessage(500641); // Your concentration is disturbed, thus ruining thy spell.
}
}
public virtual bool CheckCast()
{
return true;
}
public virtual void SayMantra()
{
if (m_Scroll is BaseWand)
{
return;
}
if (m_Info.Mantra != null && m_Info.Mantra.Length > 0 && m_Caster.Player)
{
m_Caster.PublicOverheadMessage(MessageType.Spell, m_Caster.SpeechHue, true, m_Info.Mantra, false);
}
}
public virtual bool BlockedByHorrificBeast { get { return true; } }
public virtual bool BlockedByAnimalForm { get { return true; } }
public virtual bool BlocksMovement { get { return true; } }
public virtual bool CheckNextSpellTime { get { return !(m_Scroll is BaseWand); } }
public bool Cast()
{
m_StartCastTime = Core.TickCount;
if (Core.AOS && m_Caster.Spell is Spell && ((Spell)m_Caster.Spell).State == SpellState.Sequencing)
{
((Spell)m_Caster.Spell).Disturb(DisturbType.NewCast);
}
if (!m_Caster.CheckAlive())
{
return false;
}
else if (m_Caster is PlayerMobile && ((PlayerMobile)m_Caster).Peaced)
{
m_Caster.SendLocalizedMessage(1072060); // You cannot cast a spell while calmed.
}
else if (m_Scroll is BaseWand && m_Caster.Spell != null && m_Caster.Spell.IsCasting)
{
m_Caster.SendLocalizedMessage(502643); // You can not cast a spell while frozen.
}
else if (m_Caster.Spell != null && m_Caster.Spell.IsCasting)
{
m_Caster.SendLocalizedMessage(502642); // You are already casting a spell.
}
else if (BlockedByHorrificBeast && TransformationSpellHelper.UnderTransformation(m_Caster, typeof(HorrificBeastSpell)) ||
(BlockedByAnimalForm && AnimalForm.UnderTransformation(m_Caster)))
{
m_Caster.SendLocalizedMessage(1061091); // You cannot cast that spell in this form.
}
else if (!(m_Scroll is BaseWand) && (m_Caster.Paralyzed || m_Caster.Frozen))
{
m_Caster.SendLocalizedMessage(502643); // You can not cast a spell while frozen.
}
else if (CheckNextSpellTime && Core.TickCount - m_Caster.NextSpellTime < 0)
{
m_Caster.SendLocalizedMessage(502644); // You have not yet recovered from casting a spell.
}
else if (m_Caster is PlayerMobile && ((PlayerMobile)m_Caster).PeacedUntil > DateTime.UtcNow)
{
m_Caster.SendLocalizedMessage(1072060); // You cannot cast a spell while calmed.
}
#region Dueling
else if (m_Caster is PlayerMobile && ((PlayerMobile)m_Caster).DuelContext != null &&
!((PlayerMobile)m_Caster).DuelContext.AllowSpellCast(m_Caster, this))
{ }
#endregion
else if (m_Caster.Mana >= ScaleMana(GetMana()))
{
#region Stygian Abyss
if (m_Caster.Race == Race.Gargoyle && m_Caster.Flying)
{
var tiles = Caster.Map.Tiles.GetStaticTiles(Caster.X, Caster.Y, true);
ItemData itemData;
bool cancast = true;
for (int i = 0; i < tiles.Length && cancast; ++i)
{
itemData = TileData.ItemTable[tiles[i].ID & TileData.MaxItemValue];
cancast = !(itemData.Name == "hover over");
}
if (!cancast)
{
if (m_Caster.IsPlayer())
{
m_Caster.SendLocalizedMessage(1113750); // You may not cast spells while flying over such precarious terrain.
return false;
}
else
{
m_Caster.SendMessage("Your staff level allows you to cast while flying over precarious terrain.");
}
}
}
#endregion
if (m_Caster.Spell == null && m_Caster.CheckSpellCast(this) && CheckCast() &&
m_Caster.Region.OnBeginSpellCast(m_Caster, this))
{
m_State = SpellState.Casting;
m_Caster.Spell = this;
if (!(m_Scroll is BaseWand) && RevealOnCast)
{
m_Caster.RevealingAction();
}
SayMantra();
TimeSpan castDelay = GetCastDelay();
if (ShowHandMovement && (m_Caster.Body.IsHuman || (m_Caster.Player && m_Caster.Body.IsMonster)))
{
int count = (int)Math.Ceiling(castDelay.TotalSeconds / AnimateDelay.TotalSeconds);
if (count != 0)
{
m_AnimTimer = new AnimTimer(this, count);
m_AnimTimer.Start();
}
if (m_Info.LeftHandEffect > 0)
{
Caster.FixedParticles(0, 10, 5, m_Info.LeftHandEffect, EffectLayer.LeftHand);
}
if (m_Info.RightHandEffect > 0)
{
Caster.FixedParticles(0, 10, 5, m_Info.RightHandEffect, EffectLayer.RightHand);
}
}
if (ClearHandsOnCast)
{
m_Caster.ClearHands();
}
if (Core.ML)
{
WeaponAbility.ClearCurrentAbility(m_Caster);
}
m_CastTimer = new CastTimer(this, castDelay);
//m_CastTimer.Start();
OnBeginCast();
if (castDelay > TimeSpan.Zero)
{
m_CastTimer.Start();
}
else
{
m_CastTimer.Tick();
}
return true;
}
else
{
return false;
}
}
else
{
m_Caster.LocalOverheadMessage(MessageType.Regular, 0x22, 502625); // Insufficient mana
}
return false;
}
public abstract void OnCast();
public virtual void OnBeginCast()
{ }
public virtual void GetCastSkills(out double min, out double max)
{
min = max = 0; //Intended but not required for overriding.
}
public virtual bool CheckFizzle()
{
if (m_Scroll is BaseWand)
{
return true;
}
double minSkill, maxSkill;
GetCastSkills(out minSkill, out maxSkill);
if (DamageSkill != CastSkill)
{
Caster.CheckSkill(DamageSkill, 0.0, Caster.Skills[DamageSkill].Cap);
}
return Caster.CheckSkill(CastSkill, minSkill, maxSkill);
}
public abstract int GetMana();
public virtual int ScaleMana(int mana)
{
double scalar = 1.0;
if (!MindRotSpell.GetMindRotScalar(Caster, ref scalar))
{
scalar = 1.0;
}
// Lower Mana Cost = 40%
int lmc = AosAttributes.GetValue(m_Caster, AosAttribute.LowerManaCost);
if (lmc > 40)
{
lmc = 40;
}
scalar -= (double)lmc / 100;
return (int)(mana * scalar);
}
public virtual TimeSpan GetDisturbRecovery()
{
if (Core.AOS)
{
return TimeSpan.Zero;
}
double delay = 1.0 - Math.Sqrt((Core.TickCount - m_StartCastTime) / 1000.0 / GetCastDelay().TotalSeconds);
if (delay < 0.2)
{
delay = 0.2;
}
return TimeSpan.FromSeconds(delay);
}
public virtual int CastRecoveryBase { get { return 6; } }
public virtual int CastRecoveryFastScalar { get { return 1; } }
public virtual int CastRecoveryPerSecond { get { return 4; } }
public virtual int CastRecoveryMinimum { get { return 0; } }
public virtual TimeSpan GetCastRecovery()
{
if (!Core.AOS)
{
return NextSpellDelay;
}
int fcr = AosAttributes.GetValue(m_Caster, AosAttribute.CastRecovery);
fcr -= ThunderstormSpell.GetCastRecoveryMalus(m_Caster);
int fcrDelay = -(CastRecoveryFastScalar * fcr);
int delay = CastRecoveryBase + fcrDelay;
if (delay < CastRecoveryMinimum)
{
delay = CastRecoveryMinimum;
}
return TimeSpan.FromSeconds((double)delay / CastRecoveryPerSecond);
}
public abstract TimeSpan CastDelayBase { get; }
public virtual double CastDelayFastScalar { get { return 1; } }
public virtual double CastDelaySecondsPerTick { get { return 0.25; } }
public virtual TimeSpan CastDelayMinimum { get { return TimeSpan.FromSeconds(0.25); } }
//public virtual int CastDelayBase{ get{ return 3; } }
//public virtual int CastDelayFastScalar{ get{ return 1; } }
//public virtual int CastDelayPerSecond{ get{ return 4; } }
//public virtual int CastDelayMinimum{ get{ return 1; } }
public virtual TimeSpan GetCastDelay()
{
if (m_Scroll is BaseWand)
{
return Core.ML ? CastDelayBase : TimeSpan.Zero; // TODO: Should FC apply to wands?
}
// Faster casting cap of 2 (if not using the protection spell)
// Faster casting cap of 0 (if using the protection spell)
// Paladin spells are subject to a faster casting cap of 4
// Paladins with magery of 70.0 or above are subject to a faster casting cap of 2
int fcMax = 4;
if (CastSkill == SkillName.Magery || CastSkill == SkillName.Necromancy ||
(CastSkill == SkillName.Chivalry && m_Caster.Skills[SkillName.Magery].Value >= 70.0))
{
fcMax = 2;
}
int fc = AosAttributes.GetValue(m_Caster, AosAttribute.CastSpeed);
if (fc > fcMax)
{
fc = fcMax;
}
if (ProtectionSpell.Registry.Contains(m_Caster))
{
fc -= 2;
}
if (EssenceOfWindSpell.IsDebuffed(m_Caster))
{
fc -= EssenceOfWindSpell.GetFCMalus(m_Caster);
}
TimeSpan baseDelay = CastDelayBase;
TimeSpan fcDelay = TimeSpan.FromSeconds(-(CastDelayFastScalar * fc * CastDelaySecondsPerTick));
//int delay = CastDelayBase + circleDelay + fcDelay;
TimeSpan delay = baseDelay + fcDelay;
if (delay < CastDelayMinimum)
{
delay = CastDelayMinimum;
}
#region Mondain's Legacy
if (DreadHorn.IsUnderInfluence(m_Caster))
{
delay.Add(delay);
}
#endregion
//return TimeSpan.FromSeconds( (double)delay / CastDelayPerSecond );
return delay;
}
public virtual void FinishSequence()
{
m_State = SpellState.None;
if (m_Caster.Spell == this)
{
m_Caster.Spell = null;
}
}
public virtual int ComputeKarmaAward()
{
return 0;
}
public virtual bool CheckSequence()
{
int mana = ScaleMana(GetMana());
if (m_Caster.Deleted || !m_Caster.Alive || m_Caster.Spell != this || m_State != SpellState.Sequencing)
{
DoFizzle();
}
else if (m_Scroll != null && !(m_Scroll is Runebook) &&
(m_Scroll.Amount <= 0 || m_Scroll.Deleted || m_Scroll.RootParent != m_Caster ||
(m_Scroll is BaseWand && (((BaseWand)m_Scroll).Charges <= 0 || m_Scroll.Parent != m_Caster))))
{
DoFizzle();
}
else if (!ConsumeReagents())
{
m_Caster.LocalOverheadMessage(MessageType.Regular, 0x22, 502630); // More reagents are needed for this spell.
}
else if (m_Caster.Mana < mana)
{
m_Caster.LocalOverheadMessage(MessageType.Regular, 0x22, 502625); // Insufficient mana for this spell.
}
else if (Core.AOS && (m_Caster.Frozen || m_Caster.Paralyzed))
{
m_Caster.SendLocalizedMessage(502646); // You cannot cast a spell while frozen.
DoFizzle();
}
else if (m_Caster is PlayerMobile && ((PlayerMobile)m_Caster).PeacedUntil > DateTime.UtcNow)
{
m_Caster.SendLocalizedMessage(1072060); // You cannot cast a spell while calmed.
DoFizzle();
}
else if (CheckFizzle())
{
m_Caster.Mana -= mana;
if (m_Scroll is SpellScroll)
{
m_Scroll.Consume();
}
#region SA
else if (m_Scroll is SpellStone)
{
// The SpellScroll check above isn't removing the SpellStones for some reason.
m_Scroll.Delete();
}
#endregion
else if (m_Scroll is BaseWand)
{
((BaseWand)m_Scroll).ConsumeCharge(m_Caster);
m_Caster.RevealingAction();
}
if (m_Scroll is BaseWand)
{
bool m = m_Scroll.Movable;
m_Scroll.Movable = false;
if (ClearHandsOnCast)
{
m_Caster.ClearHands();
}
m_Scroll.Movable = m;
}
else
{
if (ClearHandsOnCast)
{
m_Caster.ClearHands();
}
}
int karma = ComputeKarmaAward();
if (karma != 0)
{
Titles.AwardKarma(Caster, karma, true);
}
if (TransformationSpellHelper.UnderTransformation(m_Caster, typeof(VampiricEmbraceSpell)))
{
bool garlic = false;
for (int i = 0; !garlic && i < m_Info.Reagents.Length; ++i)
{
garlic = (m_Info.Reagents[i] == Reagent.Garlic);
}
if (garlic)
{
m_Caster.SendLocalizedMessage(1061651); // The garlic burns you!
AOS.Damage(m_Caster, Utility.RandomMinMax(17, 23), 100, 0, 0, 0, 0);
}
}
return true;
}
else
{
DoFizzle();
}
return false;
}
public bool CheckBSequence(Mobile target)
{
return CheckBSequence(target, false);
}
public bool CheckBSequence(Mobile target, bool allowDead)
{
if (!target.Alive && !allowDead)
{
m_Caster.SendLocalizedMessage(501857); // This spell won't work on that!
return false;
}
else if (Caster.CanBeBeneficial(target, true, allowDead) && CheckSequence())
{
Caster.DoBeneficial(target);
return true;
}
else
{
return false;
}
}
public bool CheckHSequence(Mobile target)
{
if (!target.Alive)
{
m_Caster.SendLocalizedMessage(501857); // This spell won't work on that!
return false;
}
else if (Caster.CanBeHarmful(target) && CheckSequence())
{
Caster.DoHarmful(target);
return true;
}
else
{
return false;
}
}
private class AnimTimer : Timer
{
private readonly Spell m_Spell;
public AnimTimer(Spell spell, int count)
: base(TimeSpan.Zero, AnimateDelay, count)
{
m_Spell = spell;
Priority = TimerPriority.FiftyMS;
}
protected override void OnTick()
{
if (m_Spell.State != SpellState.Casting || m_Spell.m_Caster.Spell != m_Spell)
{
Stop();
return;
}
if (!m_Spell.Caster.Mounted && m_Spell.m_Info.Action >= 0)
{
if (m_Spell.Caster.Body.IsHuman)
{
m_Spell.Caster.Animate(m_Spell.m_Info.Action, 7, 1, true, false, 0);
}
else if (m_Spell.Caster.Player && m_Spell.Caster.Body.IsMonster)
{
m_Spell.Caster.Animate(12, 7, 1, true, false, 0);
}
}
if (!Running)
{
m_Spell.m_AnimTimer = null;
}
}
}
private class CastTimer : Timer
{
private readonly Spell m_Spell;
public CastTimer(Spell spell, TimeSpan castDelay)
: base(castDelay)
{
m_Spell = spell;
Priority = TimerPriority.TwentyFiveMS;
}
protected override void OnTick()
{
if (m_Spell == null || m_Spell.m_Caster == null)
{
return;
}
else if (m_Spell.m_State == SpellState.Casting && m_Spell.m_Caster.Spell == m_Spell)
{
m_Spell.m_State = SpellState.Sequencing;
m_Spell.m_CastTimer = null;
m_Spell.m_Caster.OnSpellCast(m_Spell);
if (m_Spell.m_Caster.Region != null)
{
m_Spell.m_Caster.Region.OnSpellCast(m_Spell.m_Caster, m_Spell);
}
m_Spell.m_Caster.NextSpellTime = Core.TickCount + (int)m_Spell.GetCastRecovery().TotalMilliseconds;
// Spell.NextSpellDelay;
Target originalTarget = m_Spell.m_Caster.Target;
m_Spell.OnCast();
if (m_Spell.m_Caster.Player && m_Spell.m_Caster.Target != originalTarget && m_Spell.Caster.Target != null)
{
m_Spell.m_Caster.Target.BeginTimeout(m_Spell.m_Caster, TimeSpan.FromSeconds(30.0));
}
m_Spell.m_CastTimer = null;
}
}
public void Tick()
{
OnTick();
}
}
}
} | gpl-3.0 |
AIFDR/inasafe | safe/utilities/test/test_gis.py | 4837 | # coding=utf-8
"""Test for GIS utilities functions."""
import unittest
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
from qgis.core import QgsRectangle
from safe.definitions.constants import INASAFE_TEST
from safe.utilities.gis import (
is_polygon_layer,
is_raster_y_inverted,
wkt_to_rectangle,
validate_geo_array)
from safe.test.utilities import (
clone_raster_layer,
load_test_vector_layer,
load_test_raster_layer,
standard_data_path,
get_qgis_app)
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
class TestQGIS(unittest.TestCase):
def test_is_polygonal_layer(self):
"""Test we can get the correct attributes back."""
# Polygon layer
layer = load_test_vector_layer(
'aggregation',
'district_osm_jakarta.geojson',
clone=True
)
message = 'isPolygonLayer, %s layer should be polygonal' % layer
self.assertTrue(is_polygon_layer(layer), message)
# Point layer
layer = load_test_vector_layer('hazard', 'volcano_point.geojson')
message = '%s layer should be polygonal' % layer
self.assertFalse(is_polygon_layer(layer), message)
# Raster layer
layer = clone_raster_layer(
name='earthquake',
extension='.tif',
include_keywords=True,
source_directory=standard_data_path('hazard')
)
message = ('%s raster layer should not be polygonal' % layer)
self.assertFalse(is_polygon_layer(layer), message)
def test_raster_y_inverted(self):
"""Test if we can detect an upside down raster."""
# We should have one test with an inverted raster but as it's not
# usual, I'm not going to spend time.
layer = load_test_raster_layer('gisv4', 'hazard', 'earthquake.asc')
self.assertFalse(is_raster_y_inverted(layer))
def test_rectangle_from_wkt(self):
"""Test we can a create a rectangle from a WKT."""
rectangle = wkt_to_rectangle('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))')
self.assertTrue(isinstance(rectangle, QgsRectangle))
rectangle = wkt_to_rectangle('POLYGON ((0 1, 1 1, 1 0, 0 0))')
self.assertIsNone(rectangle)
def test_validate_geo_array(self):
"""Test validate geographic extent method.
.. versionadded:: 3.2
"""
# Normal case
min_longitude = 20.389938354492188
min_latitude = -34.10782492987083
max_longitude = 20.712661743164062
max_latitude = -34.008273470938335
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertTrue(validate_geo_array(extent))
# min_latitude >= max_latitude
min_latitude = 34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_longitude >= max_longitude
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 34.10782492987083
max_longitude = -34.008273470938335
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_latitude < -90 or > 90
min_latitude = -134.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# max_latitude < -90 or > 90
min_latitude = -9.10782492987083
max_latitude = 91.10782492987083
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_longitude < -180 or > 180
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = -184.10782492987083
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# max_longitude < -180 or > 180
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 180.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
arrmo/librenms | app/Console/Commands/DevicePing.php | 1546 | <?php
namespace App\Console\Commands;
use App\Console\LnmsCommand;
use App\Models\Device;
use Illuminate\Database\Eloquent\Builder;
use LibreNMS\Config;
use LibreNMS\Polling\ConnectivityHelper;
use Symfony\Component\Console\Input\InputArgument;
class DevicePing extends LnmsCommand
{
protected $name = 'device:ping';
/**
* Create a new command instance.
*
* @return void
*/
public function __construct()
{
parent::__construct();
$this->addArgument('device spec', InputArgument::REQUIRED);
}
/**
* Execute the console command.
*
* @return int
*/
public function handle(): int
{
$spec = $this->argument('device spec');
$devices = Device::query()->when($spec !== 'all', function (Builder $query) use ($spec) {
/** @phpstan-var Builder<Device> $query */
return $query->where('device_id', $spec)
->orWhere('hostname', $spec)
->limit(1);
})->get();
if ($devices->isEmpty()) {
$devices = [new Device(['hostname' => $spec])];
}
Config::set('icmp_check', true); // ignore icmp disabled, this is an explicit user action
/** @var Device $device */
foreach ($devices as $device) {
$helper = new ConnectivityHelper($device);
$response = $helper->isPingable();
$this->line($device->displayName() . ' : ' . ($response->wasSkipped() ? 'skipped' : $response));
}
return 0;
}
}
| gpl-3.0 |
starlis/EMC-CraftBukkit | src/main/java/net/minecraft/server/EntityPig.java | 4987 | package net.minecraft.server;
import org.bukkit.craftbukkit.event.CraftEventFactory; // CraftBukkit
public class EntityPig extends EntityAnimal {
private final PathfinderGoalPassengerCarrotStick bp;
public EntityPig(World world) {
super(world);
this.a(0.9F, 0.9F);
this.getNavigation().a(true);
this.goalSelector.a(0, new PathfinderGoalFloat(this));
this.goalSelector.a(1, new PathfinderGoalPanic(this, 1.25D));
this.goalSelector.a(2, this.bp = new PathfinderGoalPassengerCarrotStick(this, 0.3F));
this.goalSelector.a(3, new PathfinderGoalBreed(this, 1.0D));
this.goalSelector.a(4, new PathfinderGoalTempt(this, 1.2D, Items.CARROT_STICK, false));
this.goalSelector.a(4, new PathfinderGoalTempt(this, 1.2D, Items.CARROT, false));
this.goalSelector.a(5, new PathfinderGoalFollowParent(this, 1.1D));
this.goalSelector.a(6, new PathfinderGoalRandomStroll(this, 1.0D));
this.goalSelector.a(7, new PathfinderGoalLookAtPlayer(this, EntityHuman.class, 6.0F));
this.goalSelector.a(8, new PathfinderGoalRandomLookaround(this));
}
public boolean bk() {
return true;
}
protected void aD() {
super.aD();
this.getAttributeInstance(GenericAttributes.maxHealth).setValue(10.0D);
this.getAttributeInstance(GenericAttributes.d).setValue(0.25D);
}
protected void bn() {
super.bn();
}
public boolean bE() {
ItemStack itemstack = ((EntityHuman) this.passenger).be();
return itemstack != null && itemstack.getItem() == Items.CARROT_STICK;
}
protected void c() {
super.c();
this.datawatcher.a(16, Byte.valueOf((byte) 0));
}
public void b(NBTTagCompound nbttagcompound) {
super.b(nbttagcompound);
nbttagcompound.setBoolean("Saddle", this.hasSaddle());
}
public void a(NBTTagCompound nbttagcompound) {
super.a(nbttagcompound);
this.setSaddle(nbttagcompound.getBoolean("Saddle"));
}
protected String t() {
return "mob.pig.say";
}
protected String aT() {
return "mob.pig.say";
}
protected String aU() {
return "mob.pig.death";
}
protected void a(int i, int j, int k, Block block) {
this.makeSound("mob.pig.step", 0.15F, 1.0F);
}
public boolean a(EntityHuman entityhuman) {
if (super.a(entityhuman)) {
return true;
} else if (this.hasSaddle() && !this.world.isStatic && (this.passenger == null || this.passenger == entityhuman)) {
entityhuman.mount(this);
return true;
} else {
return false;
}
}
protected Item getLoot() {
return this.isBurning() ? Items.GRILLED_PORK : Items.PORK;
}
protected void dropDeathLoot(boolean flag, int i) {
int j = this.random.nextInt(3) + 1 + this.random.nextInt(1 + i);
for (int k = 0; k < j; ++k) {
if (this.isBurning()) {
this.a(Items.GRILLED_PORK, 1);
} else {
this.a(Items.PORK, 1);
}
}
if (this.hasSaddle()) {
this.a(Items.SADDLE, 1);
}
}
public boolean hasSaddle() {
return (this.datawatcher.getByte(16) & 1) != 0;
}
public void setSaddle(boolean flag) {
if (flag) {
this.datawatcher.watch(16, Byte.valueOf((byte) 1));
} else {
this.datawatcher.watch(16, Byte.valueOf((byte) 0));
}
}
public void a(EntityLightning entitylightning) {
if (!this.world.isStatic) {
EntityPigZombie entitypigzombie = new EntityPigZombie(this.world);
// CraftBukkit start
if (CraftEventFactory.callPigZapEvent(this, entitylightning, entitypigzombie).isCancelled()) {
return;
}
// CraftBukkit end
entitypigzombie.setEquipment(0, new ItemStack(Items.GOLD_SWORD));
entitypigzombie.setPositionRotation(this.locX, this.locY, this.locZ, this.yaw, this.pitch);
// CraftBukkit - added a reason for spawning this creature
this.world.addEntity(entitypigzombie, org.bukkit.event.entity.CreatureSpawnEvent.SpawnReason.LIGHTNING);
this.die();
}
}
protected void b(float f) {
super.b(f);
if (f > 5.0F && this.passenger instanceof EntityHuman) {
((EntityHuman) this.passenger).a((Statistic) AchievementList.u);
}
}
public EntityPig b(EntityAgeable entityageable) {
return new EntityPig(this.world);
}
public boolean c(ItemStack itemstack) {
return itemstack != null && itemstack.getItem() == Items.CARROT;
}
public PathfinderGoalPassengerCarrotStick ca() {
return this.bp;
}
public EntityAgeable createChild(EntityAgeable entityageable) {
return this.b(entityageable);
}
}
| gpl-3.0 |
pberndro/smartpi_exporter | vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go | 15836 | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package protoreflect provides interfaces to dynamically manipulate messages.
//
// This package includes type descriptors which describe the structure of types
// defined in proto source files and value interfaces which provide the
// ability to examine and manipulate the contents of messages.
//
//
// Protocol Buffer Descriptors
//
// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
// are immutable objects that represent protobuf type information.
// They are wrappers around the messages declared in descriptor.proto.
// Protobuf descriptors alone lack any information regarding Go types.
//
// Enums and messages generated by this module implement Enum and ProtoMessage,
// where the Descriptor and ProtoReflect.Descriptor accessors respectively
// return the protobuf descriptor for the values.
//
// The protobuf descriptor interfaces are not meant to be implemented by
// user code since they might need to be extended in the future to support
// additions to the protobuf language. Protobuf descriptors can be constructed
// using the "google.golang.org/protobuf/reflect/protodesc" package.
//
//
// Go Type Descriptors
//
// A type descriptor (e.g., EnumType or MessageType) is a constructor for
// a concrete Go type that represents the associated protobuf descriptor.
// There is commonly a one-to-one relationship between protobuf descriptors and
// Go type descriptors, but it can potentially be a one-to-many relationship.
//
// Enums and messages generated by this module implement Enum and ProtoMessage,
// where the Type and ProtoReflect.Type accessors respectively
// return the protobuf descriptor for the values.
//
// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
// create Go type descriptors from protobuf descriptors.
//
//
// Value Interfaces
//
// The Enum and Message interfaces provide a reflective view over an
// enum or message instance. For enums, it provides the ability to retrieve
// the enum value number for any concrete enum type. For messages, it provides
// the ability to access or manipulate fields of the message.
//
// To convert a proto.Message to a protoreflect.Message, use the
// former's ProtoReflect method. Since the ProtoReflect method is new to the
// v2 message interface, it may not be present on older message implementations.
// The "github.com/golang/protobuf/proto".MessageReflect function can be used
// to obtain a reflective view on older messages.
//
//
// Relationships
//
// The following diagrams demonstrate the relationships between
// various types declared in this package.
//
//
// ┌───────────────────────────────────┐
// V │
// ┌────────────── New(n) ─────────────┐ │
// │ │ │
// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │
// │ │ V V │ V │
// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗
// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║
// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝
// Λ Λ │ │
// │ └─── Descriptor() ──┘ │
// │ │
// └────────────────── Type() ───────┘
//
// • An EnumType describes a concrete Go enum type.
// It has an EnumDescriptor and can construct an Enum instance.
//
// • An EnumDescriptor describes an abstract protobuf enum type.
//
// • An Enum is a concrete enum instance. Generated enums implement Enum.
//
//
// ┌──────────────── New() ─────────────────┐
// │ │
// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐
// │ │ V V │ V
// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗
// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║
// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝
// Λ Λ │ │ Λ │
// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘
// │ │
// └─────────────────── Type() ─────────┘
//
// • A MessageType describes a concrete Go message type.
// It has a MessageDescriptor and can construct a Message instance.
//
// • A MessageDescriptor describes an abstract protobuf message type.
//
// • A Message is a concrete message instance. Generated messages implement
// ProtoMessage, which can convert to/from a Message.
//
//
// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐
// │ V │ V
// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗
// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║
// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝
// Λ │ │ Λ │ Λ
// └─────── Type() ───────┘ │ └─── may implement ────┘ │
// │ │
// └────── implements ────────┘
//
// • An ExtensionType describes a concrete Go implementation of an extension.
// It has an ExtensionTypeDescriptor and can convert to/from
// abstract Values and Go values.
//
// • An ExtensionTypeDescriptor is an ExtensionDescriptor
// which also has an ExtensionType.
//
// • An ExtensionDescriptor describes an abstract protobuf extension field and
// may not always be an ExtensionTypeDescriptor.
package protoreflect
import (
"fmt"
"regexp"
"strings"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/pragma"
)
type doNotImplement pragma.DoNotImplement
// ProtoMessage is the top-level interface that all proto messages implement.
// This is declared in the protoreflect package to avoid a cyclic dependency;
// use the proto.Message type instead, which aliases this type.
type ProtoMessage interface{ ProtoReflect() Message }
// Syntax is the language version of the proto file.
type Syntax syntax
type syntax int8 // keep exact type opaque as the int type may change
const (
Proto2 Syntax = 2
Proto3 Syntax = 3
)
// IsValid reports whether the syntax is valid.
func (s Syntax) IsValid() bool {
switch s {
case Proto2, Proto3:
return true
default:
return false
}
}
// String returns s as a proto source identifier (e.g., "proto2").
func (s Syntax) String() string {
switch s {
case Proto2:
return "proto2"
case Proto3:
return "proto3"
default:
return fmt.Sprintf("<unknown:%d>", s)
}
}
// GoString returns s as a Go source identifier (e.g., "Proto2").
func (s Syntax) GoString() string {
switch s {
case Proto2:
return "Proto2"
case Proto3:
return "Proto3"
default:
return fmt.Sprintf("Syntax(%d)", s)
}
}
// Cardinality determines whether a field is optional, required, or repeated.
type Cardinality cardinality
type cardinality int8 // keep exact type opaque as the int type may change
// Constants as defined by the google.protobuf.Cardinality enumeration.
const (
Optional Cardinality = 1 // appears zero or one times
Required Cardinality = 2 // appears exactly one time; invalid with Proto3
Repeated Cardinality = 3 // appears zero or more times
)
// IsValid reports whether the cardinality is valid.
func (c Cardinality) IsValid() bool {
switch c {
case Optional, Required, Repeated:
return true
default:
return false
}
}
// String returns c as a proto source identifier (e.g., "optional").
func (c Cardinality) String() string {
switch c {
case Optional:
return "optional"
case Required:
return "required"
case Repeated:
return "repeated"
default:
return fmt.Sprintf("<unknown:%d>", c)
}
}
// GoString returns c as a Go source identifier (e.g., "Optional").
func (c Cardinality) GoString() string {
switch c {
case Optional:
return "Optional"
case Required:
return "Required"
case Repeated:
return "Repeated"
default:
return fmt.Sprintf("Cardinality(%d)", c)
}
}
// Kind indicates the basic proto kind of a field.
type Kind kind
type kind int8 // keep exact type opaque as the int type may change
// Constants as defined by the google.protobuf.Field.Kind enumeration.
const (
BoolKind Kind = 8
EnumKind Kind = 14
Int32Kind Kind = 5
Sint32Kind Kind = 17
Uint32Kind Kind = 13
Int64Kind Kind = 3
Sint64Kind Kind = 18
Uint64Kind Kind = 4
Sfixed32Kind Kind = 15
Fixed32Kind Kind = 7
FloatKind Kind = 2
Sfixed64Kind Kind = 16
Fixed64Kind Kind = 6
DoubleKind Kind = 1
StringKind Kind = 9
BytesKind Kind = 12
MessageKind Kind = 11
GroupKind Kind = 10
)
// IsValid reports whether the kind is valid.
func (k Kind) IsValid() bool {
switch k {
case BoolKind, EnumKind,
Int32Kind, Sint32Kind, Uint32Kind,
Int64Kind, Sint64Kind, Uint64Kind,
Sfixed32Kind, Fixed32Kind, FloatKind,
Sfixed64Kind, Fixed64Kind, DoubleKind,
StringKind, BytesKind, MessageKind, GroupKind:
return true
default:
return false
}
}
// String returns k as a proto source identifier (e.g., "bool").
func (k Kind) String() string {
switch k {
case BoolKind:
return "bool"
case EnumKind:
return "enum"
case Int32Kind:
return "int32"
case Sint32Kind:
return "sint32"
case Uint32Kind:
return "uint32"
case Int64Kind:
return "int64"
case Sint64Kind:
return "sint64"
case Uint64Kind:
return "uint64"
case Sfixed32Kind:
return "sfixed32"
case Fixed32Kind:
return "fixed32"
case FloatKind:
return "float"
case Sfixed64Kind:
return "sfixed64"
case Fixed64Kind:
return "fixed64"
case DoubleKind:
return "double"
case StringKind:
return "string"
case BytesKind:
return "bytes"
case MessageKind:
return "message"
case GroupKind:
return "group"
default:
return fmt.Sprintf("<unknown:%d>", k)
}
}
// GoString returns k as a Go source identifier (e.g., "BoolKind").
func (k Kind) GoString() string {
switch k {
case BoolKind:
return "BoolKind"
case EnumKind:
return "EnumKind"
case Int32Kind:
return "Int32Kind"
case Sint32Kind:
return "Sint32Kind"
case Uint32Kind:
return "Uint32Kind"
case Int64Kind:
return "Int64Kind"
case Sint64Kind:
return "Sint64Kind"
case Uint64Kind:
return "Uint64Kind"
case Sfixed32Kind:
return "Sfixed32Kind"
case Fixed32Kind:
return "Fixed32Kind"
case FloatKind:
return "FloatKind"
case Sfixed64Kind:
return "Sfixed64Kind"
case Fixed64Kind:
return "Fixed64Kind"
case DoubleKind:
return "DoubleKind"
case StringKind:
return "StringKind"
case BytesKind:
return "BytesKind"
case MessageKind:
return "MessageKind"
case GroupKind:
return "GroupKind"
default:
return fmt.Sprintf("Kind(%d)", k)
}
}
// FieldNumber is the field number in a message.
type FieldNumber = protowire.Number
// FieldNumbers represent a list of field numbers.
type FieldNumbers interface {
// Len reports the number of fields in the list.
Len() int
// Get returns the ith field number. It panics if out of bounds.
Get(i int) FieldNumber
// Has reports whether n is within the list of fields.
Has(n FieldNumber) bool
doNotImplement
}
// FieldRanges represent a list of field number ranges.
type FieldRanges interface {
// Len reports the number of ranges in the list.
Len() int
// Get returns the ith range. It panics if out of bounds.
Get(i int) [2]FieldNumber // start inclusive; end exclusive
// Has reports whether n is within any of the ranges.
Has(n FieldNumber) bool
doNotImplement
}
// EnumNumber is the numeric value for an enum.
type EnumNumber int32
// EnumRanges represent a list of enum number ranges.
type EnumRanges interface {
// Len reports the number of ranges in the list.
Len() int
// Get returns the ith range. It panics if out of bounds.
Get(i int) [2]EnumNumber // start inclusive; end inclusive
// Has reports whether n is within any of the ranges.
Has(n EnumNumber) bool
doNotImplement
}
var (
regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`)
regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`)
)
// Name is the short name for a proto declaration. This is not the name
// as used in Go source code, which might not be identical to the proto name.
type Name string // e.g., "Kind"
// IsValid reports whether n is a syntactically valid name.
// An empty name is invalid.
func (n Name) IsValid() bool {
return regexName.MatchString(string(n))
}
// Names represent a list of names.
type Names interface {
// Len reports the number of names in the list.
Len() int
// Get returns the ith name. It panics if out of bounds.
Get(i int) Name
// Has reports whether s matches any names in the list.
Has(s Name) bool
doNotImplement
}
// FullName is a qualified name that uniquely identifies a proto declaration.
// A qualified name is the concatenation of the proto package along with the
// fully-declared name (i.e., name of parent preceding the name of the child),
// with a '.' delimiter placed between each Name.
//
// This should not have any leading or trailing dots.
type FullName string // e.g., "google.protobuf.Field.Kind"
// IsValid reports whether n is a syntactically valid full name.
// An empty full name is invalid.
func (n FullName) IsValid() bool {
return regexFullName.MatchString(string(n))
}
// Name returns the short name, which is the last identifier segment.
// A single segment FullName is the Name itself.
func (n FullName) Name() Name {
if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
return Name(n[i+1:])
}
return Name(n)
}
// Parent returns the full name with the trailing identifier removed.
// A single segment FullName has no parent.
func (n FullName) Parent() FullName {
if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
return n[:i]
}
return ""
}
// Append returns the qualified name appended with the provided short name.
//
// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid
func (n FullName) Append(s Name) FullName {
if n == "" {
return FullName(s)
}
return n + "." + FullName(s)
}
| gpl-3.0 |
petercpg/MozStumbler | libraries/stumbler/src/main/java/org/mozilla/mozstumbler/service/stumblerthread/datahandling/ReportBatchBuilder.java | 1691 | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.mozstumbler.service.stumblerthread.datahandling;
import org.json.JSONObject;
import org.mozilla.mozstumbler.service.stumblerthread.datahandling.base.SerializedJSONRows;
import org.mozilla.mozstumbler.service.stumblerthread.datahandling.base.JSONRowsObjectBuilder;
import org.mozilla.mozstumbler.service.utils.Zipper;
/*
ReportBatchBuilder accepts MLS GeoSubmit JSON blobs and serializes them to
string form.
*/
public class ReportBatchBuilder extends JSONRowsObjectBuilder {
public int getCellCount() {
int result = 0;
for (JSONObject obj: mJSONEntries) {
assert(obj instanceof MLSJSONObject);
result += ((MLSJSONObject) obj).getCellCount();
}
return result;
}
public int getWifiCount() {
int result = 0;
for (JSONObject obj : mJSONEntries) {
assert(obj instanceof MLSJSONObject);
result += ((MLSJSONObject) obj).getWifiCount();
}
return result;
}
@Override
public SerializedJSONRows finalizeToJSONRowsObject() {
int obs = entriesCount();
int wifis = getWifiCount();
int cells = getCellCount();
boolean preserveDataAfterGenerateJSON = false;
byte[] zippedbytes = Zipper.zipData(generateJSON(preserveDataAfterGenerateJSON).getBytes());
return new ReportBatch(zippedbytes,
SerializedJSONRows.StorageState.IN_MEMORY,
obs, wifis, cells);
}
}
| mpl-2.0 |
moliva/proactive | src/Core/org/objectweb/proactive/core/util/ProActiveCounter.java | 2303 | /*
* ################################################################
*
* ProActive Parallel Suite(TM): The Java(TM) library for
* Parallel, Distributed, Multi-Core Computing for
* Enterprise Grids & Clouds
*
* Copyright (C) 1997-2012 INRIA/University of
* Nice-Sophia Antipolis/ActiveEon
* Contact: [email protected] or [email protected]
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License
* as published by the Free Software Foundation; version 3 of
* the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* If needed, contact us to obtain a release under GPL Version 2 or 3
* or a different license than the AGPL.
*
* Initial developer(s): The ProActive Team
* http://proactive.inria.fr/team_members.htm
* Contributor(s):
*
* ################################################################
* $$PROACTIVE_INITIAL_DEV$$
*/
package org.objectweb.proactive.core.util;
import org.apache.log4j.Logger;
import org.objectweb.proactive.core.util.log.Loggers;
import org.objectweb.proactive.core.util.log.ProActiveLogger;
/**
* Provide an incremental per VM unique ID
*
* unique id starts from zero and is incremented each time getUniqID is called.
* If Long.MAX_VALUE is reached then then IllegalStateArgument exception is thrown
*
* @See {@link ProActiveRandom}
*
*/
public class ProActiveCounter {
static Logger logger = ProActiveLogger.getLogger(Loggers.CORE);
static long counter = 0;
synchronized static public long getUniqID() {
if (counter == Long.MAX_VALUE) {
throw new IllegalStateException(ProActiveCounter.class.getSimpleName() +
" counter reached max value");
} else {
return counter++;
}
}
}
| agpl-3.0 |
rdkgit/opennms | features/vaadin-node-maps/src/main/java/org/opennms/features/vaadin/nodemaps/internal/gwt/client/SearchResult.java | 2174 | /*******************************************************************************
* This file is part of OpenNMS(R).
*
* Copyright (C) 2013-2014 The OpenNMS Group, Inc.
* OpenNMS(R) is Copyright (C) 1999-2014 The OpenNMS Group, Inc.
*
* OpenNMS(R) is a registered trademark of The OpenNMS Group, Inc.
*
* OpenNMS(R) is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* OpenNMS(R) is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with OpenNMS(R). If not, see:
* http://www.gnu.org/licenses/
*
* For more information contact:
* OpenNMS(R) Licensing <[email protected]>
* http://www.opennms.org/
* http://www.opennms.com/
*******************************************************************************/
package org.opennms.features.vaadin.nodemaps.internal.gwt.client;
import org.discotools.gwt.leaflet.client.jsobject.JSObject;
import org.discotools.gwt.leaflet.client.types.LatLng;
public class SearchResult extends JSObject {
protected SearchResult() {}
public static final SearchResult create(final String title, final LatLng latLng) {
final SearchResult result = JSObject.createJSObject().cast();
result.setTitle(title);
result.setLatLng(latLng);
return result;
}
public final String getTitle() {
return getPropertyAsString("title");
}
public final SearchResult setTitle(final String title) {
setProperty("title", title);
return this;
}
public final LatLng getLatLng() {
return new LatLng(getProperty("latLng"));
}
public final SearchResult setLatLng(final LatLng latLng) {
setProperty("latLng", latLng.getJSObject());
return this;
}
}
| agpl-3.0 |
moliva/proactive | src/Extensions/org/objectweb/proactive/extensions/annotation/common/BogusAnnotationProcessor.java | 2065 | /*
* ################################################################
*
* ProActive Parallel Suite(TM): The Java(TM) library for
* Parallel, Distributed, Multi-Core Computing for
* Enterprise Grids & Clouds
*
* Copyright (C) 1997-2012 INRIA/University of
* Nice-Sophia Antipolis/ActiveEon
* Contact: [email protected] or [email protected]
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License
* as published by the Free Software Foundation; version 3 of
* the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* If needed, contact us to obtain a release under GPL Version 2 or 3
* or a different license than the AGPL.
*
* Initial developer(s): The ProActive Team
* http://proactive.inria.fr/team_members.htm
* Contributor(s):
*
* ################################################################
* $$PROACTIVE_INITIAL_DEV$$
*/
package org.objectweb.proactive.extensions.annotation.common;
import com.sun.mirror.apt.AnnotationProcessor;
/** This annotation processor processes the annotations provided by default
* whith JDK 1.5. This is needed in order to suppress the unnecessary warnings that
* apt generates for these default annotations.
* See also http://forums.sun.com/thread.jspa?threadID=5345947
* @author fabratu
* @version %G%, %I%
* @since ProActive 4.10
*/
public class BogusAnnotationProcessor implements AnnotationProcessor {
public BogusAnnotationProcessor() {
}
public void process() {
// nothing!
}
}
| agpl-3.0 |
ratliff/server | api_v3/lib/types/fileAsset/filters/orderEnums/KalturaFileAssetOrderBy.php | 272 | <?php
/**
* @package api
* @subpackage filters.enum
*/
class KalturaFileAssetOrderBy extends KalturaStringEnum
{
const CREATED_AT_ASC = "+createdAt";
const CREATED_AT_DESC = "-createdAt";
const UPDATED_AT_ASC = "+updatedAt";
const UPDATED_AT_DESC = "-updatedAt";
}
| agpl-3.0 |
tdefilip/opennms | opennms-services/src/main/java/org/opennms/netmgt/vacuumd/AutomationException.java | 2125 | /*******************************************************************************
* This file is part of OpenNMS(R).
*
* Copyright (C) 2007-2014 The OpenNMS Group, Inc.
* OpenNMS(R) is Copyright (C) 1999-2014 The OpenNMS Group, Inc.
*
* OpenNMS(R) is a registered trademark of The OpenNMS Group, Inc.
*
* OpenNMS(R) is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* OpenNMS(R) is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with OpenNMS(R). If not, see:
* http://www.gnu.org/licenses/
*
* For more information contact:
* OpenNMS(R) Licensing <[email protected]>
* http://www.opennms.org/
* http://www.opennms.com/
*******************************************************************************/
package org.opennms.netmgt.vacuumd;
/**
* <p>AutomationException class.</p>
*
* @author <a href="mailto:[email protected]">Mathew Brozowski</a>
* @version $Id: $
*/
public class AutomationException extends RuntimeException {
private static final long serialVersionUID = -8873671974245928627L;
/**
* <p>Constructor for AutomationException.</p>
*
* @param arg0 a {@link java.lang.String} object.
*/
public AutomationException(String arg0) {
super(arg0);
}
/**
* <p>Constructor for AutomationException.</p>
*
* @param arg0 a {@link java.lang.Throwable} object.
*/
public AutomationException(Throwable arg0) {
super(arg0);
}
/**
* <p>Constructor for AutomationException.</p>
*
* @param arg0 a {@link java.lang.String} object.
* @param arg1 a {@link java.lang.Throwable} object.
*/
public AutomationException(String arg0, Throwable arg1) {
super(arg0, arg1);
}
}
| agpl-3.0 |
SaranNV/canvas-lms | spec/views/quizzes/submission_versions.html.erb_spec.rb | 1143 | #
# Copyright (C) 2011 Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
require File.expand_path(File.dirname(__FILE__) + '/../../spec_helper')
require File.expand_path(File.dirname(__FILE__) + '/../views_helper')
describe "/quizzes/submission_versions" do
it "should render" do
course_with_teacher(:active_all => true)
course_quiz
view_context
ActiveRecord::Base.clear_cached_contexts
assigns[:quiz] = @quiz
assigns[:versions] = []
render "quizzes/submission_versions"
response.should_not be_nil
end
end | agpl-3.0 |
illerax/jcommune | jcommune-view/jcommune-web-view/src/main/webapp/resources/javascript/app/utils.js | 4376 | /*
* Copyright (C) 2011 JTalks.org Team
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
var Utils = {};
function quote(postId, postNumber) {
var callback = function (text) {
$('#post').focus();
console.log(text);
var answer = $('#postBody');
answer.focus();
if (answer) {
answer.val(answer.val() + text);
}
}
$.ajax({
url: baseUrl + '/posts/' + postId + '/quote',
type: 'POST',
data: {
selection: getSelectedPostText(postNumber)
},
success: function (data) {
callback(data.result);
},
error: function () {
callback('');
}
});
}
function getSelectedPostText(postNumber) {
var txt = '';
if (window.getSelection) {
if (window.getSelection().toString().length > 0 && isRangeInPost(window.getSelection().getRangeAt(0))
&& isSelectedPostQuoted(postNumber)) {
txt = window.getSelection().toString();
}
}
else if (document.selection) {
if (isRangeInPost(document.selection.createRange()) && isSelectedPostQuoted(postNumber)) {
txt = document.selection.createRange().text;
}
}
return txt;
}
/**
* Checks if selected document fragment is a part of the post content.
* @param {Range} range Range object which represent current selection.
* @return {boolean} <b>true</b> if if selected document fragment is a part of the post content
* <b>false</b> otherwise.
*/
function isRangeInPost(range) {
return $(range.startContainer).closest(".post-content-body").length > 0;
}
/**
* Checks if "quote" button pressed on the post which was selected.
* @param {Number} postNumber number of the post on the page which "quote" button was pressed.
* @return {boolean} <b>true</> if selected text is a part of the post which will be quoted
* <b>false</b> otherwise.
*/
function isSelectedPostQuoted(postNumber) {
return $(window.getSelection().getRangeAt(0).startContainer).closest('.post').prevAll().length == postNumber;
}
/**
* Encodes given string by escaping special HTML characters
*
* @param s string to be encoded
*/
Utils.htmlEncode = function (s) {
return $('<div/>').text(s).html();
};
/**
* Do focus to element
*
* @param target selector of element to focus
*/
Utils.focusFirstEl = function (target) {
$(target).focus();
}
/**
* Replaces all \n characters by <br> tags. Used for review comments.
*
* @param s string where perform replacing
*/
Utils.lf2br = function (s) {
return s.replace(/\n/g, "<br>");
}
/**
* Replaces all \<br> tags by \n characters. Used for review comments.
*
* @param s string where perform replacing
*/
Utils.br2lf = function (s) {
return s.replace(/<br>/gi, "\n");
}
/**
* Create form field with given label(placeholder), id, type, class and style.
*/
Utils.createFormElement = function (label, id, type, cls, style) {
var elementHtml = ' \
<div class="control-group"> \
<div class="controls"> \
<input type="' + type + '" id="' + id + '" name="' + id + '" placeholder="' + label + '" class="input-xlarge ' + cls + '" style="'+ style +'" /> \
</div> \
</div> \
';
return elementHtml;
}
/**
* Handling "onError" event for images if it's can't loaded. Invoke in config kefirbb.xml for [img] bbtag.
* */
function imgError(image) {
var imageDefault = baseUrl + "/resources/images/noimage.jpg";
image.src = imageDefault;
image.className = "thumbnail-default";
image.parentNode.href = imageDefault;
image.onerror = "";
} | lgpl-2.1 |
wood-galaxy/FreeCAD | src/Mod/Web/Gui/Resources/translations/Web_hr.ts | 6073 | <?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hr" sourcelanguage="en">
<context>
<name>CmdWebBrowserBack</name>
<message>
<location filename="../../Command.cpp" line="75"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="76"/>
<source>Previous page</source>
<translation type="unfinished">Previous page</translation>
</message>
<message>
<location filename="../../Command.cpp" line="77"/>
<source>Go back to the previous page</source>
<translation type="unfinished">Go back to the previous page</translation>
</message>
</context>
<context>
<name>CmdWebBrowserNext</name>
<message>
<location filename="../../Command.cpp" line="103"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="104"/>
<source>Next page</source>
<translation type="unfinished">Next page</translation>
</message>
<message>
<location filename="../../Command.cpp" line="105"/>
<source>Go to the next page</source>
<translation type="unfinished">Go to the next page</translation>
</message>
</context>
<context>
<name>CmdWebBrowserRefresh</name>
<message>
<location filename="../../Command.cpp" line="131"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="132"/>
<location filename="../../Command.cpp" line="133"/>
<source>Refresh web page</source>
<translation type="unfinished">Refresh web page</translation>
</message>
</context>
<context>
<name>CmdWebBrowserStop</name>
<message>
<location filename="../../Command.cpp" line="158"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="159"/>
<source>Stop loading</source>
<translation type="unfinished">Stop loading</translation>
</message>
<message>
<location filename="../../Command.cpp" line="160"/>
<source>Stop the current loading</source>
<translation type="unfinished">Stop the current loading</translation>
</message>
</context>
<context>
<name>CmdWebBrowserZoomIn</name>
<message>
<location filename="../../Command.cpp" line="187"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="188"/>
<source>Zoom in</source>
<translation type="unfinished">Zoom in</translation>
</message>
<message>
<location filename="../../Command.cpp" line="189"/>
<source>Zoom into the page</source>
<translation type="unfinished">Zoom into the page</translation>
</message>
</context>
<context>
<name>CmdWebBrowserZoomOut</name>
<message>
<location filename="../../Command.cpp" line="215"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="216"/>
<source>Zoom out</source>
<translation type="unfinished">Zoom out</translation>
</message>
<message>
<location filename="../../Command.cpp" line="217"/>
<source>Zoom out of the page</source>
<translation type="unfinished">Zoom out of the page</translation>
</message>
</context>
<context>
<name>CmdWebOpenWebsite</name>
<message>
<location filename="../../Command.cpp" line="50"/>
<source>Web</source>
<translation type="unfinished">Web</translation>
</message>
<message>
<location filename="../../Command.cpp" line="51"/>
<source>Open website...</source>
<translation type="unfinished">Open website...</translation>
</message>
<message>
<location filename="../../Command.cpp" line="52"/>
<source>Opens a website in FreeCAD</source>
<translation type="unfinished">Opens a website in FreeCAD</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<location filename="../../AppWebGui.cpp" line="78"/>
<location filename="../../BrowserView.cpp" line="348"/>
<source>Browser</source>
<translation type="unfinished">Browser</translation>
</message>
<message>
<location filename="../../BrowserView.cpp" line="244"/>
<source>File does not exist!</source>
<translation type="unfinished">File does not exist!</translation>
</message>
</context>
<context>
<name>WebGui::BrowserView</name>
<message>
<location filename="../../BrowserView.cpp" line="239"/>
<source>Error</source>
<translation>Pogreška</translation>
</message>
<message>
<location filename="../../BrowserView.cpp" line="319"/>
<source>Loading %1...</source>
<translation type="unfinished">Loading %1...</translation>
</message>
</context>
<context>
<name>WebGui::WebView</name>
<message>
<location filename="../../BrowserView.cpp" line="121"/>
<source>Open in External Browser</source>
<translation type="unfinished">Open in External Browser</translation>
</message>
<message>
<location filename="../../BrowserView.cpp" line="125"/>
<source>Open in new window</source>
<translation type="unfinished">Open in new window</translation>
</message>
</context>
<context>
<name>Workbench</name>
<message>
<location filename="../../Workbench.cpp" line="46"/>
<source>Navigation</source>
<translation type="unfinished">Navigation</translation>
</message>
</context>
</TS>
| lgpl-2.1 |
lukasz-skalski/libwebsockets | lib/extension.c | 4677 | #include "private-libwebsockets.h"
#include "extension-deflate-frame.h"
#include "extension-deflate-stream.h"
struct libwebsocket_extension libwebsocket_internal_extensions[] = {
#ifdef LWS_EXT_DEFLATE_STREAM
{
"deflate-stream",
lws_extension_callback_deflate_stream,
sizeof(struct lws_ext_deflate_stream_conn)
},
#else
{
"x-webkit-deflate-frame",
lws_extension_callback_deflate_frame,
sizeof(struct lws_ext_deflate_frame_conn)
},
{
"deflate-frame",
lws_extension_callback_deflate_frame,
sizeof(struct lws_ext_deflate_frame_conn)
},
#endif
{ /* terminator */
NULL, NULL, 0
}
};
LWS_VISIBLE void
lws_context_init_extensions(struct lws_context_creation_info *info,
struct libwebsocket_context *context)
{
context->extensions = info->extensions;
lwsl_info(" LWS_MAX_EXTENSIONS_ACTIVE: %u\n", LWS_MAX_EXTENSIONS_ACTIVE);
}
LWS_VISIBLE struct libwebsocket_extension *libwebsocket_get_internal_extensions()
{
return libwebsocket_internal_extensions;
}
/* 0 = nobody had nonzero return, 1 = somebody had positive return, -1 = fail */
int lws_ext_callback_for_each_active(struct libwebsocket *wsi, int reason,
void *arg, int len)
{
int n, m, handled = 0;
for (n = 0; n < wsi->count_active_extensions; n++) {
m = wsi->active_extensions[n]->callback(
wsi->protocol->owning_server,
wsi->active_extensions[n], wsi,
reason,
wsi->active_extensions_user[n],
arg, len);
if (m < 0) {
lwsl_ext(
"Extension '%s' failed to handle callback %d!\n",
wsi->active_extensions[n]->name, reason);
return -1;
}
if (m > handled)
handled = m;
}
return handled;
}
int lws_ext_callback_for_each_extension_type(
struct libwebsocket_context *context, struct libwebsocket *wsi,
int reason, void *arg, int len)
{
int n = 0, m, handled = 0;
struct libwebsocket_extension *ext = context->extensions;
while (ext && ext->callback && !handled) {
m = ext->callback(context, ext, wsi, reason,
(void *)(long)n, arg, len);
if (m < 0) {
lwsl_ext(
"Extension '%s' failed to handle callback %d!\n",
wsi->active_extensions[n]->name, reason);
return -1;
}
if (m)
handled = 1;
ext++;
n++;
}
return 0;
}
int
lws_issue_raw_ext_access(struct libwebsocket *wsi,
unsigned char *buf, size_t len)
{
int ret;
struct lws_tokens eff_buf;
int m;
int n = 0;
eff_buf.token = (char *)buf;
eff_buf.token_len = len;
/*
* while we have original buf to spill ourselves, or extensions report
* more in their pipeline
*/
ret = 1;
while (ret == 1) {
/* default to nobody has more to spill */
ret = 0;
/* show every extension the new incoming data */
m = lws_ext_callback_for_each_active(wsi,
LWS_EXT_CALLBACK_PACKET_TX_PRESEND, &eff_buf, 0);
if (m < 0)
return -1;
if (m) /* handled */
ret = 1;
if ((char *)buf != eff_buf.token)
/*
* extension recreated it:
* need to buffer this if not all sent
*/
wsi->u.ws.clean_buffer = 0;
/* assuming they left us something to send, send it */
if (eff_buf.token_len) {
n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
eff_buf.token_len);
if (n < 0) {
lwsl_info("closing from ext access\n");
return -1;
}
/* always either sent it all or privately buffered */
if (wsi->u.ws.clean_buffer) {
eff_buf.token_len = n;
len = n;
}
}
lwsl_parser("written %d bytes to client\n", n);
/* no extension has more to spill? Then we can go */
if (!ret)
break;
/* we used up what we had */
eff_buf.token = NULL;
eff_buf.token_len = 0;
/*
* Did that leave the pipe choked?
* Or we had to hold on to some of it?
*/
if (!lws_send_pipe_choked(wsi) && !wsi->truncated_send_len)
/* no we could add more, lets's do that */
continue;
lwsl_debug("choked\n");
/*
* Yes, he's choked. Don't spill the rest now get a callback
* when he is ready to send and take care of it there
*/
libwebsocket_callback_on_writable(
wsi->protocol->owning_server, wsi);
wsi->extension_data_pending = 1;
ret = 0;
}
return len;
}
int
lws_any_extension_handled(struct libwebsocket_context *context,
struct libwebsocket *wsi,
enum libwebsocket_extension_callback_reasons r,
void *v, size_t len)
{
int n;
int handled = 0;
/* maybe an extension will take care of it for us */
for (n = 0; n < wsi->count_active_extensions && !handled; n++) {
if (!wsi->active_extensions[n]->callback)
continue;
handled |= wsi->active_extensions[n]->callback(context,
wsi->active_extensions[n], wsi,
r, wsi->active_extensions_user[n], v, len);
}
return handled;
}
| lgpl-2.1 |
Pistachioman/pcsx2 | 3rdparty/w32pthreads/pthread_delay_np.c | 4827 | /*
* pthreads_delay_np.c
*
* Description:
* This translation unit implements non-portable thread functions.
*
* --------------------------------------------------------------------------
*
* Pthreads-win32 - POSIX Threads Library for Win32
* Copyright(C) 1998 John E. Bossom
* Copyright(C) 1999,2005 Pthreads-win32 contributors
*
* Contact Email: [email protected]
*
* The current list of contributors is contained
* in the file CONTRIBUTORS included with the source
* code distribution. The list can also be seen at the
* following World Wide Web location:
* http://sources.redhat.com/pthreads-win32/contributors.html
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library in the file COPYING.LIB;
* if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "ptw32pch.h"
/*
* pthread_delay_np
*
* DESCRIPTION
*
* This routine causes a thread to delay execution for a specific period of time.
* This period ends at the current time plus the specified interval. The routine
* will not return before the end of the period is reached, but may return an
* arbitrary amount of time after the period has gone by. This can be due to
* system load, thread priorities, and system timer granularity.
*
* Specifying an interval of zero (0) seconds and zero (0) nanoseconds is
* allowed and can be used to force the thread to give up the processor or to
* deliver a pending cancelation request.
*
* The timespec structure contains the following two fields:
*
* tv_sec is an integer number of seconds.
* tv_nsec is an integer number of nanoseconds.
*
* Return Values
*
* If an error condition occurs, this routine returns an integer value indicating
* the type of error. Possible return values are as follows:
*
* 0
* Successful completion.
* [EINVAL]
* The value specified by interval is invalid.
*
* Example
*
* The following code segment would wait for 5 and 1/2 seconds
*
* struct timespec tsWait;
* int intRC;
*
* tsWait.tv_sec = 5;
* tsWait.tv_nsec = 500000000L;
* intRC = pthread_delay_np(&tsWait);
*/
int
pthread_delay_np (struct timespec *interval)
{
DWORD wait_time;
DWORD secs_in_millisecs;
DWORD millisecs;
DWORD status;
pthread_t self;
ptw32_thread_t * sp;
if (interval == NULL)
{
return EINVAL;
}
if (interval->tv_sec == 0L && interval->tv_nsec == 0L)
{
pthread_testcancel ();
Sleep (0);
pthread_testcancel ();
return (0);
}
/* convert secs to millisecs */
secs_in_millisecs = interval->tv_sec * 1000L;
/* convert nanosecs to millisecs (rounding up) */
millisecs = (interval->tv_nsec + 999999L) / 1000000L;
#if defined(__WATCOMC__)
#pragma disable_message (124)
#endif
/*
* Most compilers will issue a warning 'comparison always 0'
* because the variable type is unsigned, but we need to keep this
* for some reason I can't recall now.
*/
if (0 > (wait_time = secs_in_millisecs + millisecs))
{
return EINVAL;
}
#if defined(__WATCOMC__)
#pragma enable_message (124)
#endif
if (NULL == (self = pthread_self ()).p)
{
return ENOMEM;
}
sp = (ptw32_thread_t *) self.p;
if (sp->cancelState == PTHREAD_CANCEL_ENABLE)
{
/*
* Async cancelation won't catch us until wait_time is up.
* Deferred cancelation will cancel us immediately.
*/
if (WAIT_OBJECT_0 ==
(status = WaitForSingleObject (sp->cancelEvent, wait_time)))
{
/*
* Canceling!
*/
(void) pthread_mutex_lock (&sp->cancelLock);
if (sp->state < PThreadStateCanceling)
{
sp->state = PThreadStateCanceling;
sp->cancelState = PTHREAD_CANCEL_DISABLE;
(void) pthread_mutex_unlock (&sp->cancelLock);
ptw32_throw (PTW32_EPS_CANCEL);
}
(void) pthread_mutex_unlock (&sp->cancelLock);
return ESRCH;
}
else if (status != WAIT_TIMEOUT)
{
return EINVAL;
}
}
else
{
Sleep (wait_time);
}
return (0);
}
| lgpl-3.0 |
andreoid/testing | brjs-sdk/workspace/sdk/libs/javascript/br-util/src/br/util/Number.js | 4499 | 'use strict';
/**
* @module br/util/Number
*/
var StringUtility = require('br/util/StringUtility');
/**
* @class
* @alias module:br/util/Number
*
* @classdesc
* Utility methods for numbers
*/
function NumberUtil() {
}
/**
* Returns a numeric representation of the sign on the number.
*
* @param {Number} n The number (or a number as a string)
* @return {int} 1 for positive values, -1 for negative values, or the original value for zero and non-numeric values.
*/
NumberUtil.sgn = function(n) {
return n > 0 ? 1 : n < 0 ? -1 : n;
};
/**
* @param {Object} n
* @return {boolean} true for numbers and their string representations and false for other values including non-numeric
* strings, null, Infinity, NaN.
*/
NumberUtil.isNumber = function(n) {
if (typeof n === 'string') {
n = n.trim();
}
return n != null && n !== '' && n - n === 0;
};
/**
* Formats the number to the specified number of decimal places.
*
* @param {Number} n The number (or a number as a string).
* @param {Number} dp The number of decimal places.
* @return {String} The formatted number.
*/
NumberUtil.toFixed = function(n, dp) {
//return this.isNumber(n) && dp != null ? Number(n).toFixed(dp) : n;
//Workaround for IE8/7/6 where toFixed returns 0 for (0.5).toFixed(0) and 0.0 for (0.05).toFixed(1)
if (this.isNumber(n) && dp != null) {
var sgn = NumberUtil.sgn(n);
n = sgn * n;
var nFixed = (Math.round(Math.pow(10, dp)*n)/Math.pow(10, dp)).toFixed(dp);
return (sgn * nFixed).toFixed(dp);
}
return n;
};
/**
* Formats the number to the specified number of significant figures. This fixes the bugs in the native Number function
* of the same name that are prevalent in various browsers. If the number of significant figures is less than one,
* then the function has no effect.
*
* @param {Number} n The number (or a number as a string).
* @param {Number} sf The number of significant figures.
* @return {String} The formatted number.
*/
NumberUtil.toPrecision = function(n, sf) {
return this.isNumber(n) && sf > 0 ? Number(n).toPrecision(sf) : n;
};
/**
* Formats the number to the specified number of decimal places, omitting any trailing zeros.
*
* @param {Number} n The number (or a number as a string).
* @param {Number} rounding The number of decimal places to round.
* @return {String} The rounded number.
*/
NumberUtil.toRounded = function(n, rounding) {
//return this.isNumber(n) && rounding != null ? String(Number(Number(n).toFixed(rounding))) : n;
//Workaround for IE8/7/6 where toFixed returns 0 for (0.5).toFixed(0) and 0.0 for (0.05).toFixed(1)
if (this.isNumber(n) && rounding != null) {
var sgn = NumberUtil.sgn(n);
n = sgn * n;
var nRounded = (Math.round(Math.pow(10, rounding)*n)/Math.pow(10, rounding)).toFixed(rounding);
return sgn * nRounded;
}
return n;
};
/**
* Logarithm to base 10.
*
* @param {Number} n The number (or a number as a string).
* @return {Number} The logarithm to base 10.
*/
NumberUtil.log10 = function(n) {
return Math.log(n) / Math.LN10;
};
/**
* Rounds a floating point number
*
* @param {Number} n The number (or a number as a string).
* @return {Number} The formatted number.
*/
NumberUtil.round = function(n) {
var dp = 13 - (n ? Math.ceil(this.log10(Math.abs(n))) : 0);
return this.isNumber(n) ? Number(Number(n).toFixed(dp)) : n;
};
/**
* Pads the integer part of a number with zeros to reach the specified length.
*
* @param {Number} value The number (or a number as a string).
* @param {Number} numLength The required length of the number.
* @return {String} The formatted number.
*/
NumberUtil.pad = function(value, numLength) {
if (this.isNumber(value)) {
var nAbsolute = Math.abs(value);
var sInteger = new String(parseInt(nAbsolute));
var nSize = numLength || 0;
var sSgn = value < 0 ? "-" : "";
value = sSgn + StringUtility.repeat("0", nSize - sInteger.length) + nAbsolute;
}
return value;
};
/**
* Counts the amount of decimal places within a number.
* Also supports scientific notations
*
* @param {Number} n The number (or a number as a string).
* @return {Number} The number of decimal places
*/
NumberUtil.decimalPlaces = function(n) {
var match = (''+n).match(/(?:\.(\d+))?(?:[eE]([+-]?\d+))?$/);
if (!match) {
return 0;
}
return Math.max(
0,
// Number of digits right of decimal point.
(match[1] ? match[1].length : 0)
// Adjust for scientific notation.
- (match[2] ? +match[2] : 0));
}
module.exports = NumberUtil;
| lgpl-3.0 |
ogre0403/hraven | hraven-etl/src/main/java/com/twitter/hraven/etl/JobRunner.java | 2525 | /*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.hraven.etl;
import java.util.concurrent.Callable;
import org.apache.hadoop.mapreduce.Job;
/**
* Can be used to run a single Hadoop job. The {@link #call()} method will block
* until the job is complete and will return a non-null return value indicating
* the success of the Hadoop job.
*/
public class JobRunner implements Callable<Boolean> {
private volatile boolean isCalled = false;
private final Job job;
/**
* Post processing step that gets called upon successful completion of the
* Hadoop job.
*/
private final Callable<Boolean> postProcessor;
/**
* Constructor
*
* @param job
* to job to run in the call method.
* @param postProcessor
* Post processing step that gets called upon successful completion
* of the Hadoop job. Can be null, in which case it will be skipped.
* Final results will be the return value of this final processing
* step.
*/
public JobRunner(Job job, Callable<Boolean> postProcessor) {
this.job = job;
this.postProcessor = postProcessor;
}
/*
* (non-Javadoc)
*
* @see java.util.concurrent.Callable#call()
*/
@Override
public Boolean call() throws Exception {
// Guard to make sure we get called only once.
if (isCalled) {
return false;
} else {
isCalled = true;
}
if (job == null) {
return false;
}
boolean success = false;
// Schedule the job on the JobTracker and wait for it to complete.
try {
success = job.waitForCompletion(true);
} catch (InterruptedException interuptus) {
// We're told to stop, so honor that.
// And restore interupt status.
Thread.currentThread().interrupt();
// Indicate that we should NOT run the postProcessor.
success = false;
}
if (success && (postProcessor != null)) {
success = postProcessor.call();
}
return success;
}
}
| apache-2.0 |
darroyocazorla/crossdata | testsIT/src/test/scala/com/stratio/crossdata/driver/querybuilder/QueryBuilderSpec.scala | 13639 | /*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.driver.querybuilder
import java.sql.{Date, Timestamp}
import java.util.GregorianCalendar
import com.stratio.crossdata.driver.querybuilder
import com.stratio.crossdata.test.BaseXDTest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class QueryBuilderSpec extends BaseXDTest {
"The Query Builder" should "be able to build a completed query using strings" in {
val query = select("col, '1', max(col)") from "table inner join table2 on a = b" where "a = b" groupBy "col" having "a = b" orderBy "col ASC" limit 5
val expected = """
| SELECT col, '1', max(col)
| FROM table inner join table2 on a = b
| WHERE a = b
| GROUP BY col
| HAVING a = b
| ORDER BY col ASC
| LIMIT 5
"""
compareAfterFormatting(query, expected)
}
it should "be able to add a where clause on a limited query" in {
val query = selectAll from 'table limit 1 where 'a < 5
val expected = """
| SELECT * FROM table
| WHERE a < 5
| LIMIT 1
"""
compareAfterFormatting(query, expected)
}
it should "be able to add a where clause on a limited query which contains filters" in {
val query = selectAll from 'table where 'a > 10 limit 1 where 'a < 5
val expected = """
| SELECT * FROM table
| WHERE (a > 10) AND (a < 5)
| LIMIT 1
"""
compareAfterFormatting(query, expected)
}
it should "be able to join several queries" in {
val query = (selectAll from 'table) unionAll (selectAll from 'table2) unionAll (selectAll from 'table3)
val expected = """
| SELECT * FROM table
| UNION ALL
| SELECT * FROM table2
| UNION ALL
| SELECT * FROM table3
"""
compareAfterFormatting(query, expected)
}
it should "support union distinct to join runnable queries" in {
val query = (selectAll from 'table) unionDistinct (selectAll from 'table2)
val expected = """
| SELECT * FROM table
| UNION DISTINCT
| SELECT * FROM table2
"""
compareAfterFormatting(query, expected)
}
it should "support intersect to join runnable queries" in {
val query = (selectAll from 'table) intersect (selectAll from 'table2)
val expected = """
| SELECT * FROM table
| INTERSECT
| SELECT * FROM table2
"""
compareAfterFormatting(query, expected)
}
it should "support except to join runnable queries" in {
val query = (selectAll from 'table) except (selectAll from 'table2)
val expected = """
| SELECT * FROM table
| EXCEPT
| SELECT * FROM table2
"""
compareAfterFormatting(query, expected)
}
it should "not allow to add a filter on a combined query" in {
the[Error] thrownBy {
(selectAll from 'table) unionAll (selectAll from 'table2) where "a = b"
} should have message "Predicates cannot by applied to combined queries"
}
it should "be able to build a query containing predicates with objects" in {
val query = selectAll from 'table where ('a < new Date(0) or 'a > 5)
val expected = """
| SELECT * FROM table
| WHERE
| (a < '1970-01-01')
| OR
| (a > 5)
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query containing a subquery as a relation" in {
val query_1 = select('c).from('table)
val query = selectAll from query_1
val expected = """
| SELECT * FROM
| ( SELECT c FROM table )
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query and add filters on the fly" in {
val query = select('amount).from('table)
val expected = """
| SELECT amount FROM table WHERE amount > 5
"""
compareAfterFormatting(query.where('amount > 5), expected)
}
it should "be able to build a query containing a subquery as a predicate" in {
val query = select('c + 4).from('table).where('col === (select('c) from 't))
val expected = """
| SELECT c + 4 FROM table
| WHERE col = ( SELECT c FROM t )
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a completed query without distinct" in {
val query = select(distinct('col)) from 'test where ('quantity > 10) groupBy 'age having ('age > 25) orderBy 'age limit 10
val expected = """
| SELECT DISTINCT col
| FROM test
| WHERE quantity > 10
| GROUP BY age
| HAVING age > 25
| ORDER BY age
| LIMIT 10
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query with a subquery" in {
val query = select("alias.name") from ((selectAll from 'table) as 'alias)
val expected = """
| SELECT alias.name
| FROM (
| SELECT * FROM table
| ) AS alias
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query with an inner join clause" in {
val query = select('name, 'age, 'quantity) from ('test innerJoin 'animals on "test.id = animals.id")
val expected = """
| SELECT name, age, quantity
| FROM test
| JOIN animals
| ON test.id = animals.id
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query with a left semi join clause" in {
val query = selectAll from ('t1 leftSemiJoin 't2)
val expected = """
| SELECT * FROM t1
| LEFT SEMI JOIN
| t2
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query with a left outer join clause" in {
val query = selectAll from ('t1 leftOuterJoin 't2)
val expected = """
| SELECT * FROM t1
| LEFT OUTER JOIN
| t2
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query with a right outer join clause" in {
val query = selectAll from ('t1 rightOuterJoin 't2)
val expected = """
| SELECT * FROM t1
| RIGHT OUTER JOIN
| t2
"""
compareAfterFormatting(query, expected)
}
it should "be able to build a query with a full outer join clause" in {
val query = selectAll from ('t1 fullOuterJoin 't2)
val expected = """
| SELECT * FROM t1
| FULL OUTER JOIN
| t2
"""
compareAfterFormatting(query, expected)
}
it should "be able to maintain user associations" in {
val query = select (('a + 13) * ('hola + 2) + 5) from 'test
val expected = """
| SELECT ((a + 13) * (hola + 2)) + 5
| FROM test
"""
compareAfterFormatting(query, expected)
}
it should "be able to support aliases" in {
val query = select ('a as 'alias) from ('test as 'talias, (selectAll from 'table) as 'qalias)
val expected = """
| SELECT a AS alias
| FROM test AS talias JOIN ( SELECT * FROM table ) AS qalias
"""
compareAfterFormatting(query, expected)
}
/*
This test is here as documentation. Actually, its testing Scala since
a mathematical precedence order is guaranteed by Scala's method names precedence table.
Check "Programming in Scala: A comprehensive step-by-step guide", M.Ordersky,
Section "5.8 - Operator precedence and associativity".
*/
it should "make use of Scala's method names precedence rules" in {
val query = select ('a, 'c - 'd * 'a) from 'test
val expected = "SELECT a, c - (d * a) FROM test"
compareAfterFormatting(query, expected)
}
it should "keep operator precedence provided by the user through the use of parenthesis" in {
val query = select ('a, 'b * ( 'c - 'd )) from 'test
val expected = "SELECT a, b * (c - d) FROM test"
compareAfterFormatting(query, expected)
}
it should "generate correct queries using arithmetic operators" in {
val arithmeticExpressions = ('a + 'b)::('c - 'd)::('e * 'f)::('g / 'h)::('i % 'j)::Nil
val baseQuery = select (arithmeticExpressions:_*) from 'test
val query = (baseQuery /: arithmeticExpressions) {
(q, op) => q.where(op === 'ref)
}
val expectedExpressions = "a + b"::"c - d"::"e * f"::"g / h"::"i % j"::Nil
val expected = s"""
|SELECT ${expectedExpressions mkString ", "}
|FROM test
|WHERE ${expectedExpressions.map(exp => s"($exp = ref)") mkString " AND "}
|""".stripMargin
compareAfterFormatting(query, expected)
}
it should "generate insert-select queries" in {
val selQueryStr = "SELECT a FROM sourceTable"
Seq(
(insert into 'test select 'a from 'sourceTable, s"INSERT INTO test $selQueryStr"),
(insert overwrite 'test select 'a from 'sourceTable, s"INSERT OVERWRITE test $selQueryStr")
) foreach { case (query, expected) =>
compareAfterFormatting(query, expected)
}
}
it should "be able to support common functions in the select expression" in {
val query = select(
distinct('col), countDistinct('col), sumDistinct('col),
count(querybuilder.all), approxCountDistinct('col, 0.95),
avg('col), min('col), max('col), sum('col), abs('col)
) from 'table
val expected = """
| SELECT DISTINCT col, count( DISTINCT col), sum( DISTINCT col),
| count(*), APPROXIMATE (0.95) count ( DISTINCT col),
| avg(col), min(col), max(col), sum(col), abs(col)
| FROM table
"""
compareAfterFormatting(query, expected)
}
it should "be able to allow different order selections" in {
val queryAsc = selectAll from 'table orderBy('col asc)
val queryDesc = selectAll from 'table sortBy('col desc)
val expectedAsc =
"""
| SELECT *
| FROM table
| ORDER BY col ASC
"""
val expectedDesc =
"""
| SELECT *
| FROM table
| SORT BY col DESC
"""
compareAfterFormatting(queryAsc, expectedAsc)
compareAfterFormatting(queryDesc, expectedDesc)
}
it should "be able to support comparison predicates" in {
val query = selectAll from 'table where( !('a < 5 && 'a <= 5 && 'a > 5 && 'a >=5 && 'a === 5 && 'a <> 5 || false))
val expected =
"""
| SELECT *
| FROM table
| WHERE !(((a < 5) AND (a <= 5) AND (a > 5) AND (a >= 5) AND (a = 5) AND (a <> 5)) OR false)
"""
compareAfterFormatting(query, expected)
}
it should "be able to support common predicates" in {
val query = selectAll from 'table where ( ('a in (2,3,4)) && ('b like "%R") && ('b isNull) && ('b isNotNull))
val expected =
"""
| SELECT *
| FROM table
| WHERE ( a IN (2,3,4)) AND (b LIKE '%R') AND ( b IS NULL) AND ( b IS NOT NULL)
"""
compareAfterFormatting(query, expected)
}
it should "be able to support SparkSQL types" in {
val timestampVal = new Timestamp(new GregorianCalendar(1970,0,1,0,0,0).getTimeInMillis)
val query = selectAll from 'table where ( ('a <> "string") && ('a <> 5f) && ('a <> true) && ('a <> timestampVal) && ('a <> new java.math.BigDecimal(1)))
val expected =
"""
| SELECT * FROM table
| WHERE (a <> 'string') AND (a <> 5.0) AND (a <> true) AND (a <> '1970-01-01 00:00:00.0') AND (a <> 1)
|
"""
compareAfterFormatting(query, expected)
}
def compareAfterFormatting(query: RunnableQuery, expected: String) = {
formatOutput(query.build) should be(formatOutput(expected))
}
def formatOutput(query: String): String =
query.stripMargin.replaceAll(System.lineSeparator(), " ").trim.replaceAll(" +", " ")
}
| apache-2.0 |
weswigham/TypeScript | tests/baselines/reference/moduleAugmentationsImports4.js | 1844 | //// [tests/cases/compiler/moduleAugmentationsImports4.ts] ////
//// [a.ts]
export class A {}
//// [b.ts]
export class B {x: number;}
//// [c.d.ts]
declare module "C" {
class Cls {y: string; }
}
//// [d.d.ts]
declare module "D" {
import {A} from "a";
import {B} from "b";
module "a" {
interface A {
getB(): B;
}
}
}
//// [e.d.ts]
/// <reference path="c.d.ts"/>
declare module "E" {
import {A} from "a";
import {Cls} from "C";
module "a" {
interface A {
getCls(): Cls;
}
}
}
//// [main.ts]
/// <reference path="d.d.ts"/>
/// <reference path="e.d.ts"/>
import {A} from "./a";
import "D";
import "E";
let a: A;
let b = a.getB().x.toFixed();
let c = a.getCls().y.toLowerCase();
//// [f.js]
define("a", ["require", "exports"], function (require, exports) {
"use strict";
exports.__esModule = true;
var A = /** @class */ (function () {
function A() {
}
return A;
}());
exports.A = A;
});
define("b", ["require", "exports"], function (require, exports) {
"use strict";
exports.__esModule = true;
var B = /** @class */ (function () {
function B() {
}
return B;
}());
exports.B = B;
});
define("main", ["require", "exports", "D", "E"], function (require, exports) {
"use strict";
exports.__esModule = true;
var a;
var b = a.getB().x.toFixed();
var c = a.getCls().y.toLowerCase();
});
//// [f.d.ts]
/// <reference path="tests/cases/compiler/d.d.ts" />
/// <reference path="tests/cases/compiler/e.d.ts" />
declare module "a" {
export class A {
}
}
declare module "b" {
export class B {
x: number;
}
}
declare module "main" {
import "D";
import "E";
}
| apache-2.0 |
graydon/rust | src/test/ui/macros/bang-after-name.rs | 137 | // run-rustfix
#[allow(unused_macros)]
macro_rules! foo! { //~ ERROR macro names aren't followed by a `!`
() => {};
}
fn main() {}
| apache-2.0 |
hodgesds/streamparse | streamparse/dsl/component.py | 976 | """
Component-level Specification
This module is called component to mirror organization of storm package.
"""
from ..storm.component import Component
class Specification(object):
def __init__(self, component_cls, name=None, parallelism=1):
if not issubclass(component_cls, Component):
raise TypeError("Invalid component: {}".format(component_cls))
if not isinstance(parallelism, int) or parallelism < 1:
raise ValueError("Parallelism must be a integer greater than 0")
self.component_cls = component_cls
self.name = name
self.parallelism = parallelism
def resolve_dependencies(self, specifications):
"""Allows specification subclasses to resolve an dependencies
that they may have on other specifications.
:param specifications: all of the specification objects for this
topology.
:type specifications: dict
"""
pass | apache-2.0 |
adrapereira/jena | jena-fuseki2/jena-fuseki-core/src/main/java/org/apache/jena/fuseki/servlets/SPARQL_QueryDataset.java | 2072 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.fuseki.servlets;
import org.apache.jena.query.Dataset ;
import org.apache.jena.query.DatasetFactory ;
import org.apache.jena.query.Query ;
import org.apache.jena.sparql.core.DatasetDescription ;
import org.apache.jena.sparql.core.DatasetGraph ;
import org.apache.jena.sparql.core.DynamicDatasets ;
public class SPARQL_QueryDataset extends SPARQL_Query
{
public SPARQL_QueryDataset(boolean verbose) { super() ; }
public SPARQL_QueryDataset()
{ this(false) ; }
@Override
protected void validateRequest(HttpAction action)
{ }
@Override
protected void validateQuery(HttpAction action, Query query)
{ }
@Override
protected Dataset decideDataset(HttpAction action, Query query, String queryStringLog)
{
DatasetGraph dsg = action.getActiveDSG() ;
// query.getDatasetDescription() ;
// Protocol.
DatasetDescription dsDesc = getDatasetDescription(action) ;
if (dsDesc != null )
{
//errorBadRequest("SPARQL Query: Dataset description in the protocol request") ;
dsg = DynamicDatasets.dynamicDataset(dsDesc, dsg, false) ;
}
return DatasetFactory.create(dsg) ;
}
}
| apache-2.0 |
laosiaudi/tensorflow | tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.layers.batch_norm.md | 3965 | ### `tf.contrib.layers.batch_norm(*args, **kwargs)` {#batch_norm}
Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, specially in distributed settings.
##### Args:
* <b>`inputs`</b>: a tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
* <b>`decay`</b>: decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc. Lower
`decay` value (recommend trying `decay`=0.9) if model experiences reasonably
good training performance but poor validation and/or test performance.
* <b>`center`</b>: If True, subtract `beta`. If False, `beta` is ignored.
* <b>`scale`</b>: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
* <b>`epsilon`</b>: small float added to variance to avoid dividing by zero.
* <b>`activation_fn`</b>: activation function, default set to None to skip it and
maintain a linear activation.
* <b>`param_initializers`</b>: optional initializers for beta, gamma, moving mean and
moving variance.
* <b>`updates_collections`</b>: collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
* <b>`is_training`</b>: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
* <b>`reuse`</b>: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
* <b>`variables_collections`</b>: optional collections for the variables.
* <b>`outputs_collections`</b>: collections to add the outputs.
* <b>`trainable`</b>: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
* <b>`batch_weights`</b>: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
* <b>`fused`</b>: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
* <b>`data_format`</b>: A string. `NHWC` (default) and `NCHW` are supported.
* <b>`scope`</b>: Optional scope for `variable_scope`.
##### Returns:
A `Tensor` representing the output of the operation.
##### Raises:
* <b>`ValueError`</b>: if `batch_weights` is not None and `fused` is True.
* <b>`ValueError`</b>: if `data_format` is neither `NHWC` nor `NCHW`.
* <b>`ValueError`</b>: if the rank of `inputs` is undefined.
* <b>`ValueError`</b>: if rank or channels dimension of `inputs` is undefined.
| apache-2.0 |
HonzaKral/elasticsearch | server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java | 19747 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.basic;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.MatchQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.functionscore.ScriptScoreFunctionBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
import org.elasticsearch.search.aggregations.bucket.global.Global;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import java.util.TreeSet;
import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH;
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.client.Requests.searchRequest;
import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.startsWith;
public class TransportTwoNodesSearchIT extends ESIntegTestCase {
@Override
protected int numberOfReplicas() {
return 0;
}
private Set<String> prepareData() throws Exception {
return prepareData(-1);
}
private Set<String> prepareData(int numShards) throws Exception {
Set<String> fullExpectedIds = new TreeSet<>();
Settings.Builder settingsBuilder = Settings.builder()
.put(indexSettings());
if (numShards > 0) {
settingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numShards);
}
client().admin().indices().create(createIndexRequest("test")
.settings(settingsBuilder)
.simpleMapping("foo", "type=geo_point"))
.actionGet();
ensureGreen();
for (int i = 0; i < 100; i++) {
index(Integer.toString(i), "test", i);
fullExpectedIds.add(Integer.toString(i));
}
refresh();
return fullExpectedIds;
}
private void index(String id, String nameValue, int age) throws IOException {
client().index(Requests.indexRequest("test").id(id).source(source(id, nameValue, age))).actionGet();
}
private XContentBuilder source(String id, String nameValue, int age) throws IOException {
StringBuilder multi = new StringBuilder().append(nameValue);
for (int i = 0; i < age; i++) {
multi.append(" ").append(nameValue);
}
return jsonBuilder().startObject()
.field("id", id)
.field("nid", Integer.parseInt(id))
.field("name", nameValue + id)
.field("age", age)
.field("multi", multi.toString())
.endObject();
}
public void testDfsQueryThenFetch() throws Exception {
Settings.Builder settingsBuilder = Settings.builder()
.put(indexSettings());
client().admin().indices().create(createIndexRequest("test")
.settings(settingsBuilder))
.actionGet();
ensureGreen();
// we need to have age (ie number of repeats of "test" term) high enough
// to produce the same 8-bit norm for all docs here, so that
// the tf is basically the entire score (assuming idf is fixed, which
// it should be if dfs is working correctly)
// With the current way of encoding norms, every length between 1048 and 1176
// are encoded into the same byte
for (int i = 1048; i < 1148; i++) {
index(Integer.toString(i - 1048), "test", i);
}
refresh();
int total = 0;
SearchResponse searchResponse = client().prepareSearch("test").setSearchType(DFS_QUERY_THEN_FETCH)
.setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).setScroll(TimeValue.timeValueSeconds(30)).get();
while (true) {
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
SearchHit[] hits = searchResponse.getHits().getHits();
if (hits.length == 0) {
break; // finished
}
for (int i = 0; i < hits.length; ++i) {
SearchHit hit = hits[i];
assertThat(hit.getExplanation(), notNullValue());
assertThat(hit.getExplanation().getDetails().length, equalTo(1));
assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(),
startsWith("n,"));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(),
equalTo(100L));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(),
startsWith("N,"));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(),
equalTo(100L));
assertThat("id[" + hit.getId() + "] -> " + hit.getExplanation().toString(), hit.getId(),
equalTo(Integer.toString(100 - total - i - 1)));
}
total += hits.length;
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30))
.get();
}
clearScroll(searchResponse.getScrollId());
assertEquals(100, total);
}
public void testDfsQueryThenFetchWithSort() throws Exception {
prepareData();
int total = 0;
SearchResponse searchResponse = client().prepareSearch("test").setSearchType(DFS_QUERY_THEN_FETCH)
.setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).addSort("age", SortOrder.ASC)
.setScroll(TimeValue.timeValueSeconds(30)).get();
while (true) {
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
SearchHit[] hits = searchResponse.getHits().getHits();
if (hits.length == 0) {
break; // finished
}
for (int i = 0; i < hits.length; ++i) {
SearchHit hit = hits[i];
assertThat(hit.getExplanation(), notNullValue());
assertThat(hit.getExplanation().getDetails().length, equalTo(1));
assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(),
startsWith("n,"));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(),
equalTo(100L));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(),
startsWith("N,"));
assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(),
equalTo(100L));
assertThat("id[" + hit.getId() + "]", hit.getId(), equalTo(Integer.toString(total + i)));
}
total += hits.length;
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
}
clearScroll(searchResponse.getScrollId());
assertEquals(100, total);
}
public void testQueryThenFetch() throws Exception {
prepareData();
int total = 0;
SearchResponse searchResponse = client().prepareSearch("test").setSearchType(QUERY_THEN_FETCH).setQuery(termQuery("multi", "test"))
.setSize(60).setExplain(true).addSort("nid", SortOrder.DESC).setScroll(TimeValue.timeValueSeconds(30)).get();
while (true) {
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
SearchHit[] hits = searchResponse.getHits().getHits();
if (hits.length == 0) {
break; // finished
}
for (int i = 0; i < hits.length; ++i) {
SearchHit hit = hits[i];
assertThat(hit.getExplanation(), notNullValue());
assertThat("id[" + hit.getId() + "]", hit.getId(), equalTo(Integer.toString(100 - total - i - 1)));
}
total += hits.length;
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
}
clearScroll(searchResponse.getScrollId());
assertEquals(100, total);
}
public void testQueryThenFetchWithFrom() throws Exception {
Set<String> fullExpectedIds = prepareData();
SearchSourceBuilder source = searchSource()
.query(matchAllQuery())
.explain(true);
Set<String> collectedIds = new TreeSet<>();
SearchResponse searchResponse = client().search(searchRequest("test").source(source.from(0).size(60)).searchType(QUERY_THEN_FETCH))
.actionGet();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
assertThat(searchResponse.getHits().getHits().length, equalTo(60));
for (int i = 0; i < 60; i++) {
SearchHit hit = searchResponse.getHits().getHits()[i];
collectedIds.add(hit.getId());
}
searchResponse = client().search(searchRequest("test").source(source.from(60).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
assertThat(searchResponse.getHits().getHits().length, equalTo(40));
for (int i = 0; i < 40; i++) {
SearchHit hit = searchResponse.getHits().getHits()[i];
collectedIds.add(hit.getId());
}
assertThat(collectedIds, equalTo(fullExpectedIds));
}
public void testQueryThenFetchWithSort() throws Exception {
prepareData();
int total = 0;
SearchResponse searchResponse = client().prepareSearch("test").setQuery(termQuery("multi", "test")).setSize(60).setExplain(true)
.addSort("age", SortOrder.ASC).setScroll(TimeValue.timeValueSeconds(30)).get();
while (true) {
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
SearchHit[] hits = searchResponse.getHits().getHits();
if (hits.length == 0) {
break; // finished
}
for (int i = 0; i < hits.length; ++i) {
SearchHit hit = hits[i];
assertThat(hit.getExplanation(), notNullValue());
assertThat("id[" + hit.getId() + "]", hit.getId(), equalTo(Integer.toString(total + i)));
}
total += hits.length;
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
}
clearScroll(searchResponse.getScrollId());
assertEquals(100, total);
}
public void testSimpleFacets() throws Exception {
prepareData();
SearchSourceBuilder sourceBuilder = searchSource()
.query(termQuery("multi", "test"))
.from(0).size(20).explain(true)
.aggregation(AggregationBuilders.global("global").subAggregation(
AggregationBuilders.filter("all", termQuery("multi", "test"))))
.aggregation(AggregationBuilders.filter("test1", termQuery("name", "test1")));
SearchResponse searchResponse = client().search(searchRequest("test").source(sourceBuilder)).actionGet();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L));
Global global = searchResponse.getAggregations().get("global");
Filter all = global.getAggregations().get("all");
Filter test1 = searchResponse.getAggregations().get("test1");
assertThat(test1.getDocCount(), equalTo(1L));
assertThat(all.getDocCount(), equalTo(100L));
}
public void testFailedSearchWithWrongQuery() throws Exception {
prepareData();
NumShards test = getNumShards("test");
logger.info("Start Testing failed search with wrong query");
try {
SearchResponse searchResponse = client().search(
searchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))).actionGet();
assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries));
assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries));
fail("search should fail");
} catch (ElasticsearchException e) {
assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
// all is well
}
logger.info("Done Testing failed search");
}
public void testFailedSearchWithWrongFrom() throws Exception {
prepareData();
NumShards test = getNumShards("test");
logger.info("Start Testing failed search with wrong from");
SearchSourceBuilder source = searchSource()
.query(termQuery("multi", "test"))
.from(1000).size(20).explain(true);
SearchResponse response = client().search(searchRequest("test").searchType(DFS_QUERY_THEN_FETCH).source(source)).actionGet();
assertThat(response.getHits().getHits().length, equalTo(0));
assertThat(response.getTotalShards(), equalTo(test.numPrimaries));
assertThat(response.getSuccessfulShards(), equalTo(test.numPrimaries));
assertThat(response.getFailedShards(), equalTo(0));
response = client().search(searchRequest("test").searchType(QUERY_THEN_FETCH).source(source)).actionGet();
assertNoFailures(response);
assertThat(response.getHits().getHits().length, equalTo(0));
response = client().search(searchRequest("test").searchType(DFS_QUERY_THEN_FETCH).source(source)).actionGet();
assertNoFailures(response);
assertThat(response.getHits().getHits().length, equalTo(0));
response = client().search(searchRequest("test").searchType(DFS_QUERY_THEN_FETCH).source(source)).actionGet();
assertNoFailures(response);
assertThat(response.getHits().getHits().length, equalTo(0));
logger.info("Done Testing failed search");
}
public void testFailedMultiSearchWithWrongQuery() throws Exception {
prepareData();
logger.info("Start Testing failed multi search with a wrong query");
MultiSearchResponse response = client().prepareMultiSearch()
.add(client().prepareSearch("test").setQuery(new MatchQueryBuilder("foo", "biz")))
.add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
.add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
.get();
assertThat(response.getResponses().length, equalTo(3));
assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1));
assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10));
logger.info("Done Testing failed search");
}
public void testFailedMultiSearchWithWrongQueryWithFunctionScore() throws Exception {
prepareData();
logger.info("Start Testing failed multi search with a wrong query");
MultiSearchResponse response = client().prepareMultiSearch()
// Add custom score query with bogus script
.add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1),
new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, "bar", "foo", Collections.emptyMap())))))
.add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
.add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
.get();
assertThat(response.getResponses().length, equalTo(3));
assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1));
assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10));
logger.info("Done Testing failed search");
}
}
| apache-2.0 |
clicktravel-james/Cheddar | cheddar/cheddar-rest/src/main/java/com/clicktravel/cheddar/rest/exception/mapper/cdm1/JsonParseExceptionMapper.java | 1551 | /*
* Copyright 2014 Click Travel Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.clicktravel.cheddar.rest.exception.mapper.cdm1;
import static com.clicktravel.cheddar.rest.exception.mapper.cdm1.JsonProcessingExceptionMapperUtils.buildErrorResponse;
import javax.annotation.Priority;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.core.JsonParseException;
@Provider
@Priority(Integer.MAX_VALUE)
public class JsonParseExceptionMapper implements ExceptionMapper<JsonParseException> {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
@Override
public Response toResponse(final JsonParseException exception) {
if (logger.isDebugEnabled()) {
logger.debug(exception.getMessage(), exception);
}
return Response.status(Response.Status.BAD_REQUEST).entity(buildErrorResponse(exception)).build();
}
}
| apache-2.0 |
siosio/intellij-community | platform/core-impl/src/com/intellij/psi/impl/PsiParserFacadeImpl.java | 4617 | // Copyright 2000-2021 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.psi.impl;
import com.intellij.lang.ASTFactory;
import com.intellij.lang.Commenter;
import com.intellij.lang.Language;
import com.intellij.lang.LanguageCommenters;
import com.intellij.openapi.fileTypes.LanguageFileType;
import com.intellij.openapi.project.Project;
import com.intellij.psi.*;
import com.intellij.psi.impl.source.DummyHolderFactory;
import com.intellij.psi.impl.source.SourceTreeToPsiMap;
import com.intellij.psi.impl.source.tree.FileElement;
import com.intellij.psi.impl.source.tree.LeafElement;
import com.intellij.psi.impl.source.tree.TreeElement;
import com.intellij.psi.util.PsiTreeUtil;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
public final class PsiParserFacadeImpl implements PsiParserFacade {
private final PsiManagerEx myManager;
public PsiParserFacadeImpl(@NotNull Project project) {
myManager = PsiManagerEx.getInstanceEx(project);
}
@Override
@NotNull
public PsiElement createWhiteSpaceFromText(@NotNull @NonNls String text) throws IncorrectOperationException {
final FileElement holderElement = DummyHolderFactory.createHolder(myManager, null).getTreeElement();
final LeafElement newElement = ASTFactory.leaf(TokenType.WHITE_SPACE, holderElement.getCharTable().intern(text));
holderElement.rawAddChildren(newElement);
GeneratedMarkerVisitor.markGenerated(newElement.getPsi());
return newElement.getPsi();
}
@Override
@NotNull
public PsiComment createLineCommentFromText(@NotNull LanguageFileType fileType,
@NotNull String text) throws IncorrectOperationException {
return createLineCommentFromText(fileType.getLanguage(), text);
}
@Override
@NotNull
public PsiComment createLineCommentFromText(@NotNull final Language language,
@NotNull final String text) throws IncorrectOperationException {
Commenter commenter = LanguageCommenters.INSTANCE.forLanguage(language);
assert commenter != null;
String prefix = commenter.getLineCommentPrefix();
if (prefix == null) {
throw new IncorrectOperationException("No line comment prefix defined for language " + language.getID());
}
PsiFile aFile = createDummyFile(language, prefix + text);
return findPsiCommentChild(aFile);
}
@NotNull
@Override
public PsiComment createBlockCommentFromText(@NotNull Language language,
@NotNull String text) throws IncorrectOperationException {
Commenter commenter = LanguageCommenters.INSTANCE.forLanguage(language);
assert commenter != null : language;
final String blockCommentPrefix = commenter.getBlockCommentPrefix();
final String blockCommentSuffix = commenter.getBlockCommentSuffix();
assert blockCommentPrefix != null && blockCommentSuffix != null;
PsiFile aFile = createDummyFile(language, blockCommentPrefix + text + blockCommentSuffix);
return findPsiCommentChild(aFile);
}
@Override
@NotNull
public PsiComment createLineOrBlockCommentFromText(@NotNull Language language,
@NotNull String text) throws IncorrectOperationException {
Commenter commenter = LanguageCommenters.INSTANCE.forLanguage(language);
assert commenter != null : language;
String prefix = commenter.getLineCommentPrefix();
final String blockCommentPrefix = commenter.getBlockCommentPrefix();
final String blockCommentSuffix = commenter.getBlockCommentSuffix();
assert prefix != null || (blockCommentPrefix != null && blockCommentSuffix != null);
PsiFile aFile = createDummyFile(language, prefix != null ? (prefix + text) : (blockCommentPrefix + text + blockCommentSuffix));
return findPsiCommentChild(aFile);
}
private PsiComment findPsiCommentChild(PsiFile aFile) {
PsiComment comment = PsiTreeUtil.findChildOfType(aFile, PsiComment.class);
if (comment == null) {
throw new IncorrectOperationException("Incorrect comment \"" + aFile.getText() + "\".");
}
DummyHolderFactory.createHolder(myManager, (TreeElement)SourceTreeToPsiMap.psiElementToTree(comment), null);
return comment;
}
private PsiFile createDummyFile(final Language language, String text) {
return PsiFileFactory.getInstance(myManager.getProject()).createFileFromText("_Dummy_", language, text);
}
}
| apache-2.0 |
jbonofre/beam | sdks/java/core/src/test/java/org/apache/beam/sdk/values/TypeDescriptorsTest.java | 5295 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.values;
import static org.apache.beam.sdk.values.TypeDescriptors.integers;
import static org.apache.beam.sdk.values.TypeDescriptors.iterables;
import static org.apache.beam.sdk.values.TypeDescriptors.kvs;
import static org.apache.beam.sdk.values.TypeDescriptors.lists;
import static org.apache.beam.sdk.values.TypeDescriptors.sets;
import static org.apache.beam.sdk.values.TypeDescriptors.strings;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import java.util.List;
import java.util.Set;
import org.hamcrest.CoreMatchers;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Tests for {@link TypeDescriptors}.
*/
@RunWith(JUnit4.class)
public class TypeDescriptorsTest {
@Test
public void testTypeDescriptorsIterables() throws Exception {
TypeDescriptor<Iterable<String>> descriptor = iterables(strings());
assertEquals(descriptor, new TypeDescriptor<Iterable<String>>() {});
}
@Test
public void testTypeDescriptorsSets() throws Exception {
TypeDescriptor<Set<String>> descriptor = sets(strings());
assertEquals(descriptor, new TypeDescriptor<Set<String>>() {});
}
@Test
public void testTypeDescriptorsKV() throws Exception {
TypeDescriptor<KV<String, Integer>> descriptor =
kvs(strings(), integers());
assertEquals(descriptor, new TypeDescriptor<KV<String, Integer>>() {});
}
@Test
public void testTypeDescriptorsLists() throws Exception {
TypeDescriptor<List<String>> descriptor = lists(strings());
assertEquals(descriptor, new TypeDescriptor<List<String>>() {});
assertNotEquals(descriptor, new TypeDescriptor<List<Boolean>>() {});
}
@Test
public void testTypeDescriptorsListsOfLists() throws Exception {
TypeDescriptor<List<List<String>>> descriptor = lists(lists(strings()));
assertEquals(descriptor, new TypeDescriptor<List<List<String>>>() {});
assertNotEquals(descriptor, new TypeDescriptor<List<String>>() {});
assertNotEquals(descriptor, new TypeDescriptor<List<Boolean>>() {});
}
private interface Generic<FooT, BarT> {}
private static <ActualFooT> Generic<ActualFooT, String> typeErasedGeneric() {
return new Generic<ActualFooT, String>() {};
}
private static <ActualFooT, ActualBarT> TypeDescriptor<ActualFooT> extractFooT(
Generic<ActualFooT, ActualBarT> instance) {
return TypeDescriptors.extractFromTypeParameters(
instance,
Generic.class,
new TypeDescriptors.TypeVariableExtractor<
Generic<ActualFooT, ActualBarT>, ActualFooT>() {});
}
private static <ActualFooT, ActualBarT> TypeDescriptor<ActualBarT> extractBarT(
Generic<ActualFooT, ActualBarT> instance) {
return TypeDescriptors.extractFromTypeParameters(
instance,
Generic.class,
new TypeDescriptors.TypeVariableExtractor<
Generic<ActualFooT, ActualBarT>, ActualBarT>() {});
}
private static <ActualFooT, ActualBarT> TypeDescriptor<KV<ActualFooT, ActualBarT>> extractKV(
Generic<ActualFooT, ActualBarT> instance) {
return TypeDescriptors.extractFromTypeParameters(
instance,
Generic.class,
new TypeDescriptors.TypeVariableExtractor<
Generic<ActualFooT, ActualBarT>, KV<ActualFooT, ActualBarT>>() {});
}
@Test
public void testTypeDescriptorsTypeParameterOf() throws Exception {
assertEquals(strings(), extractFooT(new Generic<String, Integer>() {}));
assertEquals(integers(), extractBarT(new Generic<String, Integer>() {}));
assertEquals(kvs(strings(), integers()), extractKV(new Generic<String, Integer>() {}));
}
@Test
public void testTypeDescriptorsTypeParameterOfErased() throws Exception {
Generic<Integer, String> instance = TypeDescriptorsTest.typeErasedGeneric();
TypeDescriptor<Integer> fooT = extractFooT(instance);
assertNotNull(fooT);
// Using toString() assertions because verifying the contents of a Type is very cumbersome,
// and the expected types can not be easily constructed directly.
assertEquals("ActualFooT", fooT.toString());
assertEquals(strings(), extractBarT(instance));
TypeDescriptor<KV<Integer, String>> kvT = extractKV(instance);
assertNotNull(kvT);
assertThat(kvT.toString(), CoreMatchers.containsString("KV<ActualFooT, java.lang.String>"));
}
}
| apache-2.0 |
neeraj9/otp | erts/emulator/beam/erl_goodfit_alloc.c | 17740 | /*
* %CopyrightBegin%
*
* Copyright Ericsson AB 2003-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* %CopyrightEnd%
*/
/*
* Description: A "good fit" allocator. Segregated free-lists with a
* maximum search depth are used in order to find a good
* fit fast. Each free-list contains blocks of sizes in a
* specific range. First the free-list
* covering the desired size is searched if it is not empty.
* This search is stopped when the maximum search depth has
* been reached. If no free block was found in the free-list
* covering the desired size, the next non-empty free-list
* covering larger sizes is searched. The maximum search
* depth is by default 3. The insert and delete operations
* are O(1) and the search operation is O(n) where n is the
* maximum search depth, i.e. by default the all operations
* are O(1).
*
* This module is a callback-module for erl_alloc_util.c
*
* Author: Rickard Green
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "global.h"
#define GET_ERL_GF_ALLOC_IMPL
#include "erl_goodfit_alloc.h"
#define MIN_MBC_SZ (16*1024)
#define MIN_MBC_FIRST_FREE_SZ (4*1024)
#define MAX_SUB_MASK_IX \
((((UWord)1) << (NO_OF_BKT_IX_BITS - SUB_MASK_IX_SHIFT)) - 1)
#define MAX_SUB_BKT_IX ((((UWord)1) << SUB_MASK_IX_SHIFT) - 1)
#define MAX_BKT_IX (NO_OF_BKTS - 1)
#define MIN_BLK_SZ UNIT_CEILING(sizeof(GFFreeBlock_t) + sizeof(UWord))
#define IX2SBIX(IX) ((IX) & (~(~((UWord)0) << SUB_MASK_IX_SHIFT)))
#define IX2SMIX(IX) ((IX) >> SUB_MASK_IX_SHIFT)
#define MAKE_BKT_IX(SMIX, SBIX) \
((((UWord)(SMIX)) << SUB_MASK_IX_SHIFT) | ((UWord)(SBIX)))
#define SET_BKT_MASK_IX(BM, IX) \
do { \
int sub_mask_ix__ = IX2SMIX((IX)); \
(BM).main |= (((UWord) 1) << sub_mask_ix__); \
(BM).sub[sub_mask_ix__] |= (((UWord)1) << IX2SBIX((IX))); \
} while (0)
#define UNSET_BKT_MASK_IX(BM, IX) \
do { \
int sub_mask_ix__ = IX2SMIX((IX)); \
(BM).sub[sub_mask_ix__] &= ~(((UWord)1) << IX2SBIX((IX))); \
if (!(BM).sub[sub_mask_ix__]) \
(BM).main &= ~(((UWord)1) << sub_mask_ix__); \
} while (0)
/* Buckets ... */
#define BKT_INTRVL_A (1*sizeof(Unit_t))
#define BKT_INTRVL_B (16*sizeof(Unit_t))
#define BKT_INTRVL_C (96*sizeof(Unit_t))
#define BKT_MIN_SIZE_A MIN_BLK_SZ
#define BKT_MIN_SIZE_B (BKT_MAX_SIZE_A + 1)
#define BKT_MIN_SIZE_C (BKT_MAX_SIZE_B + 1)
#define BKT_MIN_SIZE_D (BKT_MAX_SIZE_C + 1)
#define BKT_MAX_SIZE_A ((NO_OF_BKTS/4)*BKT_INTRVL_A+BKT_MIN_SIZE_A-1)
#define BKT_MAX_SIZE_B ((NO_OF_BKTS/4)*BKT_INTRVL_B+BKT_MIN_SIZE_B-1)
#define BKT_MAX_SIZE_C ((NO_OF_BKTS/4)*BKT_INTRVL_C+BKT_MIN_SIZE_C-1)
#define BKT_MAX_IX_A ((NO_OF_BKTS*1)/4 - 1)
#define BKT_MAX_IX_B ((NO_OF_BKTS*2)/4 - 1)
#define BKT_MAX_IX_C ((NO_OF_BKTS*3)/4 - 1)
#define BKT_MAX_IX_D ((NO_OF_BKTS*4)/4 - 1)
#define BKT_MIN_IX_A (0)
#define BKT_MIN_IX_B (BKT_MAX_IX_A + 1)
#define BKT_MIN_IX_C (BKT_MAX_IX_B + 1)
#define BKT_MIN_IX_D (BKT_MAX_IX_C + 1)
#define BKT_IX_(BAP, SZ) \
((SZ) <= BKT_MAX_SIZE_A \
? (((SZ) - BKT_MIN_SIZE_A)/BKT_INTRVL_A + BKT_MIN_IX_A) \
: ((SZ) <= BKT_MAX_SIZE_B \
? (((SZ) - BKT_MIN_SIZE_B)/BKT_INTRVL_B + BKT_MIN_IX_B) \
: ((SZ) <= BKT_MAX_SIZE_C \
? (((SZ) - BKT_MIN_SIZE_C)/BKT_INTRVL_C + BKT_MIN_IX_C) \
: ((SZ) <= (BAP)->bkt_max_size_d \
? (((SZ) - BKT_MIN_SIZE_D)/(BAP)->bkt_intrvl_d + BKT_MIN_IX_D)\
: (NO_OF_BKTS - 1)))))
#define BKT_MIN_SZ_(BAP, IX) \
((IX) <= BKT_MAX_IX_A \
? (((IX) - BKT_MIN_IX_A)*BKT_INTRVL_A + BKT_MIN_SIZE_A) \
: ((IX) <= BKT_MAX_IX_B \
? (((IX) - BKT_MIN_IX_B)*BKT_INTRVL_B + BKT_MIN_SIZE_B) \
: ((IX) <= BKT_MAX_IX_C \
? (((IX) - BKT_MIN_IX_C)*BKT_INTRVL_C + BKT_MIN_SIZE_C) \
: (((IX) - BKT_MIN_IX_D)*(BAP)->bkt_intrvl_d + BKT_MIN_SIZE_D))))
#ifdef DEBUG
static int
BKT_IX(GFAllctr_t *gfallctr, Uint size)
{
int ix;
ASSERT(size >= MIN_BLK_SZ);
ix = BKT_IX_(gfallctr, size);
ASSERT(0 <= ix && ix <= BKT_MAX_IX_D);
return ix;
}
static Uint
BKT_MIN_SZ(GFAllctr_t *gfallctr, int ix)
{
Uint size;
ASSERT(0 <= ix && ix <= BKT_MAX_IX_D);
size = BKT_MIN_SZ_(gfallctr, ix);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
ASSERT(ix == BKT_IX(gfallctr, size));
ASSERT(size == MIN_BLK_SZ || ix - 1 == BKT_IX(gfallctr, size - 1));
#endif
return size;
}
#else
#define BKT_IX BKT_IX_
#define BKT_MIN_SZ BKT_MIN_SZ_
#endif
/* Prototypes of callback functions */
static Block_t * get_free_block (Allctr_t *, Uint,
Block_t *, Uint);
static void link_free_block (Allctr_t *, Block_t *);
static void unlink_free_block (Allctr_t *, Block_t *);
static void update_last_aux_mbc (Allctr_t *, Carrier_t *);
static Eterm info_options (Allctr_t *, char *, int *,
void *, Uint **, Uint *);
static void init_atoms (void);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void check_block (Allctr_t *, Block_t *, int);
static void check_mbc (Allctr_t *, Carrier_t *);
#endif
static int atoms_initialized = 0;
void
erts_gfalc_init(void)
{
atoms_initialized = 0;
}
Allctr_t *
erts_gfalc_start(GFAllctr_t *gfallctr,
GFAllctrInit_t *gfinit,
AllctrInit_t *init)
{
struct {
int dummy;
GFAllctr_t allctr;
} zero = {0};
/* The struct with a dummy element first is used in order to avoid (an
incorrect) gcc warning. gcc warns if {0} is used as initializer of
a struct when the first member is a struct (not if, for example,
the third member is a struct). */
Allctr_t *allctr = (Allctr_t *) gfallctr;
sys_memcpy((void *) gfallctr, (void *) &zero.allctr, sizeof(GFAllctr_t));
allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(GFFreeBlock_t);
allctr->vsn_str = ERTS_ALC_GF_ALLOC_VSN_STR;
/* Callback functions */
allctr->get_free_block = get_free_block;
allctr->link_free_block = link_free_block;
allctr->unlink_free_block = unlink_free_block;
allctr->info_options = info_options;
allctr->get_next_mbc_size = NULL;
allctr->creating_mbc = update_last_aux_mbc;
allctr->destroying_mbc = update_last_aux_mbc;
allctr->add_mbc = NULL;
allctr->remove_mbc = NULL;
allctr->largest_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
allctr->check_block = check_block;
allctr->check_mbc = check_mbc;
#endif
allctr->atoms_initialized = 0;
if (init->sbct > BKT_MIN_SIZE_D-1)
gfallctr->bkt_intrvl_d =
UNIT_CEILING(((3*(init->sbct - BKT_MIN_SIZE_D - 1)
/(NO_OF_BKTS/4 - 1)) + 1)
/ 2);
if (gfallctr->bkt_intrvl_d < BKT_INTRVL_C)
gfallctr->bkt_intrvl_d = BKT_INTRVL_C;
gfallctr->bkt_max_size_d = ((NO_OF_BKTS/4)*gfallctr->bkt_intrvl_d
+ BKT_MIN_SIZE_D
- 1);
gfallctr->max_blk_search = (Uint) MAX(1, gfinit->mbsd);
if (!erts_alcu_start(allctr, init))
return NULL;
if (allctr->min_block_size != MIN_BLK_SZ)
return NULL;
return allctr;
}
static int
find_bucket(BucketMask_t *bmask, int min_index)
{
int min, mid, max;
int sub_mask_ix, sub_bkt_ix;
int ix = -1;
#undef GET_MIN_BIT
#define GET_MIN_BIT(MinBit, BitMask, Min, Max) \
min = (Min); \
max = (Max); \
while(max != min) { \
mid = ((max - min) >> 1) + min; \
if((BitMask) \
& (~(~((UWord) 0) << (mid + 1))) \
& (~((UWord) 0) << min)) \
max = mid; \
else \
min = mid + 1; \
} \
(MinBit) = min
ASSERT(bmask->main < (((UWord) 1) << (MAX_SUB_MASK_IX+1)));
sub_mask_ix = IX2SMIX(min_index);
if ((bmask->main & (~((UWord) 0) << sub_mask_ix)) == 0)
return -1;
/* There exists a non empty bucket; find it... */
if (bmask->main & (((UWord) 1) << sub_mask_ix)) {
sub_bkt_ix = IX2SBIX(min_index);
if ((bmask->sub[sub_mask_ix] & (~((UWord) 0) << sub_bkt_ix)) == 0) {
sub_mask_ix++;
sub_bkt_ix = 0;
if ((bmask->main & (~((UWord) 0)<< sub_mask_ix)) == 0)
return -1;
}
else
goto find_sub_bkt_ix;
}
else {
sub_mask_ix++;
sub_bkt_ix = 0;
}
ASSERT(sub_mask_ix <= MAX_SUB_MASK_IX);
/* Has to be a bit > sub_mask_ix */
ASSERT(bmask->main & (~((UWord) 0) << (sub_mask_ix)));
GET_MIN_BIT(sub_mask_ix, bmask->main, sub_mask_ix, MAX_SUB_MASK_IX);
find_sub_bkt_ix:
ASSERT(sub_mask_ix <= MAX_SUB_MASK_IX);
ASSERT(sub_bkt_ix <= MAX_SUB_BKT_IX);
if ((bmask->sub[sub_mask_ix] & (((UWord) 1) << sub_bkt_ix)) == 0) {
ASSERT(sub_mask_ix + 1 <= MAX_SUB_BKT_IX);
/* Has to be a bit > sub_bkt_ix */
ASSERT(bmask->sub[sub_mask_ix] & (~((UWord) 0) << sub_bkt_ix));
GET_MIN_BIT(sub_bkt_ix,
bmask->sub[sub_mask_ix],
sub_bkt_ix+1,
MAX_SUB_BKT_IX);
ASSERT(sub_bkt_ix <= MAX_SUB_BKT_IX);
}
ix = MAKE_BKT_IX(sub_mask_ix, sub_bkt_ix);
ASSERT(0 <= ix && ix < NO_OF_BKTS);
return ix;
#undef GET_MIN_BIT
}
static Block_t *
search_bucket(Allctr_t *allctr, int ix, Uint size)
{
int i;
Uint min_sz;
Uint blk_sz;
Uint cand_sz = 0;
UWord max_blk_search;
GFFreeBlock_t *blk;
GFFreeBlock_t *cand = NULL;
int blk_on_lambc;
int cand_on_lambc = 0;
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
ASSERT(0 <= ix && ix <= NO_OF_BKTS - 1);
if (!gfallctr->buckets[ix])
return NULL;
min_sz = BKT_MIN_SZ(gfallctr, ix);
if (min_sz < size)
min_sz = size;
max_blk_search = gfallctr->max_blk_search;
for (blk = gfallctr->buckets[ix], i = 0;
blk && i < max_blk_search;
blk = blk->next, i++) {
blk_sz = MBC_FBLK_SZ(&blk->block_head);
blk_on_lambc = (((char *) blk) < gfallctr->last_aux_mbc_end
&& gfallctr->last_aux_mbc_start <= ((char *) blk));
if (blk_sz == min_sz && !blk_on_lambc)
return (Block_t *) blk;
if (blk_sz >= min_sz
&& (!cand
|| (!blk_on_lambc && (cand_on_lambc || blk_sz < cand_sz))
|| (blk_on_lambc && cand_on_lambc && blk_sz < cand_sz))) {
cand_sz = blk_sz;
cand = blk;
cand_on_lambc = blk_on_lambc;
}
}
return (Block_t *) cand;
}
static Block_t *
get_free_block(Allctr_t *allctr, Uint size,
Block_t *cand_blk, Uint cand_size)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
int unsafe_bi, min_bi;
Block_t *blk;
ASSERT(!cand_blk || cand_size >= size);
unsafe_bi = BKT_IX(gfallctr, size);
min_bi = find_bucket(&gfallctr->bucket_mask, unsafe_bi);
if (min_bi < 0)
return NULL;
if (min_bi == unsafe_bi) {
blk = search_bucket(allctr, min_bi, size);
if (blk) {
if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
unlink_free_block(allctr, blk);
return blk;
}
if (min_bi < NO_OF_BKTS - 1) {
min_bi = find_bucket(&gfallctr->bucket_mask, min_bi + 1);
if (min_bi < 0)
return NULL;
}
else
return NULL;
}
else {
ASSERT(min_bi > unsafe_bi);
}
/* We are guaranteed to find a block that fits in this bucket */
blk = search_bucket(allctr, min_bi, size);
ASSERT(blk);
if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
unlink_free_block(allctr, blk);
return blk;
}
static void
link_free_block(Allctr_t *allctr, Block_t *block)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
Uint sz = MBC_FBLK_SZ(&blk->block_head);
int i = BKT_IX(gfallctr, sz);
ASSERT(sz >= MIN_BLK_SZ);
SET_BKT_MASK_IX(gfallctr->bucket_mask, i);
blk->prev = NULL;
blk->next = gfallctr->buckets[i];
if (blk->next) {
ASSERT(!blk->next->prev);
blk->next->prev = blk;
}
gfallctr->buckets[i] = blk;
}
static void
unlink_free_block(Allctr_t *allctr, Block_t *block)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
Uint sz = MBC_FBLK_SZ(&blk->block_head);
int i = BKT_IX(gfallctr, sz);
if (!blk->prev) {
ASSERT(gfallctr->buckets[i] == blk);
gfallctr->buckets[i] = blk->next;
}
else
blk->prev->next = blk->next;
if (blk->next)
blk->next->prev = blk->prev;
if (!gfallctr->buckets[i])
UNSET_BKT_MASK_IX(gfallctr->bucket_mask, i);
}
static void
update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
if (gfallctr->last_aux_mbc_start != (char *) allctr->mbc_list.last) {
if (allctr->mbc_list.last
&& allctr->main_carrier != allctr->mbc_list.last) {
gfallctr->last_aux_mbc_start = (char *) allctr->mbc_list.last;
gfallctr->last_aux_mbc_end = (((char *) allctr->mbc_list.last)
+ CARRIER_SZ(allctr->mbc_list.last));
}
else {
gfallctr->last_aux_mbc_start = NULL;
gfallctr->last_aux_mbc_end = NULL;
}
}
}
static struct {
Eterm mbsd;
Eterm as;
Eterm gf;
#ifdef DEBUG
Eterm end_of_atoms;
#endif
} am;
static void ERTS_INLINE atom_init(Eterm *atom, char *name)
{
*atom = am_atom_put(name, strlen(name));
}
#define AM_INIT(AM) atom_init(&am.AM, #AM)
static void
init_atoms(void)
{
#ifdef DEBUG
Eterm *atom;
#endif
if (atoms_initialized)
return;
#ifdef DEBUG
for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
*atom = THE_NON_VALUE;
}
#endif
AM_INIT(mbsd);
AM_INIT(as);
AM_INIT(gf);
#ifdef DEBUG
for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
ASSERT(*atom != THE_NON_VALUE);
}
#endif
atoms_initialized = 1;
}
#define bld_uint erts_bld_uint
#define bld_cons erts_bld_cons
#define bld_tuple erts_bld_tuple
static ERTS_INLINE void
add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
{
*lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
}
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
Eterm res = THE_NON_VALUE;
if (print_to_p) {
erts_print(*print_to_p,
print_to_arg,
"%smbsd: %lu\n"
"%sas: gf\n",
prefix, gfallctr->max_blk_search,
prefix);
}
if (hpp || szp) {
if (!atoms_initialized)
erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error: Atoms not initialized",
__FILE__, __LINE__);
res = NIL;
add_2tup(hpp, szp, &res, am.as, am.gf);
add_2tup(hpp, szp, &res,
am.mbsd,
bld_uint(hpp, szp, gfallctr->max_blk_search));
}
return res;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_gfalc_test() is only supposed to be used for testing. *
* *
* Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
* to erts_gfalc_test() *
\* */
UWord
erts_gfalc_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
case 0x100: return (UWord) BKT_IX((GFAllctr_t *) a1, (Uint) a2);
case 0x101: return (UWord) BKT_MIN_SZ((GFAllctr_t *) a1, (int) a2);
case 0x102: return (UWord) NO_OF_BKTS;
case 0x103: return (UWord)
find_bucket(&((GFAllctr_t *) a1)->bucket_mask, (int) a2);
default: ASSERT(0); return ~((UWord) 0);
}
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Debug functions *
\* */
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
void
check_block(Allctr_t *allctr, Block_t * blk, int free_block)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
int i;
int bi;
int found;
GFFreeBlock_t *fblk;
if(free_block) {
Uint blk_sz = is_sbc_blk(blk) ? SBC_BLK_SZ(blk) : MBC_BLK_SZ(blk);
bi = BKT_IX(gfallctr, blk_sz);
ASSERT(gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi)));
ASSERT(gfallctr->bucket_mask.sub[IX2SMIX(bi)]
& (((UWord) 1) << IX2SBIX(bi)));
found = 0;
for (fblk = gfallctr->buckets[bi]; fblk; fblk = fblk->next)
if (blk == (Block_t *) fblk)
found++;
ASSERT(found == 1);
}
else
bi = -1;
found = 0;
for (i = 0; i < NO_OF_BKTS; i++) {
if (i == bi)
continue; /* Already checked */
for (fblk = gfallctr->buckets[i]; fblk; fblk = fblk->next)
if (blk == (Block_t *) fblk)
found++;
}
ASSERT(found == 0);
}
void
check_mbc(Allctr_t *allctr, Carrier_t *mbc)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
int bi;
for(bi = 0; bi < NO_OF_BKTS; bi++) {
if ((gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi)))
&& (gfallctr->bucket_mask.sub[IX2SMIX(bi)]
& (((UWord) 1) << IX2SBIX(bi)))) {
ASSERT(gfallctr->buckets[bi] != NULL);
}
else {
ASSERT(gfallctr->buckets[bi] == NULL);
}
}
}
#endif
| apache-2.0 |
Zhengzl15/onos-securearp | core/api/src/main/java/org/onosproject/net/intent/MplsPathIntent.java | 5350 | /*
* Copyright 2015 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.net.intent;
import java.util.List;
import java.util.Optional;
import com.google.common.annotations.Beta;
import org.onlab.packet.MplsLabel;
import org.onosproject.core.ApplicationId;
import org.onosproject.net.Path;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Abstraction of explicit MPLS label-switched path.
*/
@Beta
public final class MplsPathIntent extends PathIntent {
private final Optional<MplsLabel> ingressLabel;
private final Optional<MplsLabel> egressLabel;
/**
* Creates a new point-to-point intent with the supplied ingress/egress
* ports and using the specified explicit path.
*
* @param appId application identifier
* @param selector traffic selector
* @param treatment treatment
* @param path traversed links
* @param ingressLabel MPLS egress label
* @param egressLabel MPLS ingress label
* @param constraints optional list of constraints
* @param priority priority to use for flows generated by this intent
* @throws NullPointerException {@code path} is null
*/
private MplsPathIntent(ApplicationId appId, TrafficSelector selector,
TrafficTreatment treatment, Path path, Optional<MplsLabel> ingressLabel,
Optional<MplsLabel> egressLabel, List<Constraint> constraints,
int priority) {
super(appId, selector, treatment, path, constraints,
priority);
this.ingressLabel = checkNotNull(ingressLabel);
this.egressLabel = checkNotNull(egressLabel);
}
/**
* Returns a new host to host intent builder.
*
* @return host to host intent builder
*/
public static Builder builder() {
return new Builder();
}
/**
* Builder of a host to host intent.
*/
public static final class Builder extends PathIntent.Builder {
private Optional<MplsLabel> ingressLabel = Optional.empty();
private Optional<MplsLabel> egressLabel = Optional.empty();
private Builder() {
// Hide constructor
}
@Override
public Builder appId(ApplicationId appId) {
return (Builder) super.appId(appId);
}
@Override
public Builder key(Key key) {
return (Builder) super.key(key);
}
@Override
public Builder selector(TrafficSelector selector) {
return (Builder) super.selector(selector);
}
@Override
public Builder treatment(TrafficTreatment treatment) {
return (Builder) super.treatment(treatment);
}
@Override
public Builder constraints(List<Constraint> constraints) {
return (Builder) super.constraints(constraints);
}
@Override
public Builder priority(int priority) {
return (Builder) super.priority(priority);
}
@Override
public Builder path(Path path) {
return (Builder) super.path(path);
}
/**
* Sets the ingress label of the intent that will be built.
*
* @param ingressLabel ingress label
* @return this builder
*/
public Builder ingressLabel(Optional<MplsLabel> ingressLabel) {
this.ingressLabel = ingressLabel;
return this;
}
/**
* Sets the ingress label of the intent that will be built.
*
* @param egressLabel ingress label
* @return this builder
*/
public Builder egressLabel(Optional<MplsLabel> egressLabel) {
this.egressLabel = egressLabel;
return this;
}
/**
* Builds a host to host intent from the accumulated parameters.
*
* @return point to point intent
*/
public MplsPathIntent build() {
return new MplsPathIntent(
appId,
selector,
treatment,
path,
ingressLabel,
egressLabel,
constraints,
priority
);
}
}
/**
* Returns the MPLS label which the ingress traffic should tagged.
*
* @return ingress MPLS label
*/
public Optional<MplsLabel> ingressLabel() {
return ingressLabel;
}
/**
* Returns the MPLS label which the egress traffic should tagged.
*
* @return egress MPLS label
*/
public Optional<MplsLabel> egressLabel() {
return egressLabel;
}
}
| apache-2.0 |
siosio/intellij-community | java/java-tests/testData/codeInsight/slice/forward/OneInterfaceTwoImplementations.java | 304 | interface JavaInterface {
void foo(Object p);
}
class JavaClass1 implements JavaInterface {
@Override
public void foo(Object <caret>p) {
System.out.println(<flown1>p);
}
}
class JavaClass2 implements JavaInterface {
@Override
public void foo(Object p) {
System.err.println(p);
}
} | apache-2.0 |
oujesky/closure-templates | java/src/com/google/template/soy/msgs/restricted/SoyMsgPluralCaseSpec.java | 3688 | /*
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.template.soy.msgs.restricted;
import com.google.template.soy.msgs.SoyMsgException;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.Locale;
import java.util.Objects;
/**
* Represents a plural case value.
*
* A plural case value can be either a number, or one of {@code ZERO}, {@code ONE}, {@code TWO},
* {@code FEW}, {@code MANY} or {@code OTHER}. Here, a number is represented by the number
* {@code explicitValue} with status set to EXPLICIT and the remaining by an enum value.
*
*/
public class SoyMsgPluralCaseSpec {
/** The type. EXPLICIT indicating numeric, or one of the others indicating non-numeric. */
public enum Type { EXPLICIT, ZERO, ONE, TWO, FEW, MANY, OTHER }
/** Internal mapping of Type to String, reduces memory usage */
private static final EnumMap<Type, String> TYPE_TO_STRING = new EnumMap<>(Type.class);
static {
for (Type t : EnumSet.allOf(Type.class)) {
TYPE_TO_STRING.put(t, t.name().toLowerCase(Locale.ENGLISH));
}
}
/** ZERO, ONE, TWO, FEW, MANY or OTHER if the type is non-numeric, or EXPLICIT if numeric. */
private final Type type;
/** The numeric value if the type is numeric, -1 otherwise. */
private final int explicitValue;
/**
* Constructs an object from a non-numeric value.
* The field type is set to an enum value corresponding to the string given, and explicitValue
* is set to -1.
* @param typeStr String representation of the non-numeric value.
* @throws IllegalArgumentException if typeStr (after converting to upper
* case) does not match with any of the enum types.
*/
public SoyMsgPluralCaseSpec(String typeStr) {
type = Type.valueOf(typeStr.toUpperCase(Locale.ENGLISH));
explicitValue = -1;
}
/**
* Constructs an object from a numeric value.
* The field type is set to EXPLICIT, and explicitValue is set to the numeric value given.
* @param explicitValue The numeric value.
* @throws SoyMsgException if invalid numeric value.
*/
public SoyMsgPluralCaseSpec(int explicitValue) {
if (explicitValue >= 0) {
type = Type.EXPLICIT;
this.explicitValue = explicitValue;
} else {
throw new SoyMsgException("Negative plural case value.");
}
}
/**
* Get the type.
* @return The type. EXPLICIT if numeric.
*/
public Type getType() {
return type;
}
/**
* Get the numeric value.
* @return if numeric, return the numeric value, else -1.
*/
public int getExplicitValue() {
return explicitValue;
}
@Override
public String toString() {
return (type == Type.EXPLICIT) ? "=" + explicitValue : TYPE_TO_STRING.get(type);
}
@Override public boolean equals(Object other) {
if (!(other instanceof SoyMsgPluralCaseSpec)) {
return false;
}
SoyMsgPluralCaseSpec otherSpec = (SoyMsgPluralCaseSpec) other;
return type == otherSpec.type
&& explicitValue == otherSpec.explicitValue;
}
@Override public int hashCode() {
return Objects.hash(SoyMsgPluralCaseSpec.class, type, explicitValue);
}
}
| apache-2.0 |
jomarko/kie-wb-common | kie-wb-common-dmn/kie-wb-common-dmn-client/src/test/java/org/kie/workbench/common/dmn/client/editors/included/IncludedModelsPageStateTest.java | 3116 | /*
* Copyright 2019 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.dmn.client.editors.included;
import java.util.List;
import com.google.gwtmockito.GwtMockitoTestRunner;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.kie.workbench.common.dmn.client.editors.included.common.IncludedModelsPageStateProvider;
import org.mockito.Mock;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(GwtMockitoTestRunner.class)
public class IncludedModelsPageStateTest {
@Mock
private IncludedModelsPageStateProvider pageProvider;
private IncludedModelsPageState state;
@Before
public void setup() {
state = new IncludedModelsPageState();
}
@Test
public void testGetCurrentDiagramNamespaceWhenPageProviderIsPresent() {
final String expectedNamespace = "://namespace";
when(pageProvider.getCurrentDiagramNamespace()).thenReturn(expectedNamespace);
state.init(pageProvider);
final String actualNamespace = state.getCurrentDiagramNamespace();
assertEquals(expectedNamespace, actualNamespace);
}
@Test
public void testGetCurrentDiagramNamespaceWhenPageProviderIsNotPresent() {
final String expectedNamespace = "";
state.init(null);
final String actualNamespace = state.getCurrentDiagramNamespace();
assertEquals(expectedNamespace, actualNamespace);
}
@Test
public void testGenerateIncludedModelsWhenPageProviderIsNotPresent() {
state.init(null);
final List<BaseIncludedModelActiveRecord> actualIncludedModels = state.generateIncludedModels();
final List<BaseIncludedModelActiveRecord> expectedIncludedModels = emptyList();
assertEquals(expectedIncludedModels, actualIncludedModels);
}
@Test
public void testGenerateIncludedModelsWhenPageProviderIsPresent() {
final List<BaseIncludedModelActiveRecord> expectedIncludedModels = asList(mock(BaseIncludedModelActiveRecord.class), mock(BaseIncludedModelActiveRecord.class));
when(pageProvider.generateIncludedModels()).thenReturn(expectedIncludedModels);
state.init(pageProvider);
final List<BaseIncludedModelActiveRecord> actualIncludedModels = state.generateIncludedModels();
assertEquals(actualIncludedModels, expectedIncludedModels);
}
}
| apache-2.0 |
BigData-Lab-Frankfurt/HiBench-DSE | common/mahout-distribution-0.7-hadoop1/math/src/test/java/org/apache/mahout/math/jet/random/GammaTest.java | 5887 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mahout.math.jet.random;
import org.apache.mahout.common.RandomUtils;
import org.apache.mahout.math.MahoutTestCase;
import org.junit.Test;
import java.util.Arrays;
import java.util.Locale;
import java.util.Random;
public final class GammaTest extends MahoutTestCase {
@Test
public void testNextDouble() {
double[] z = new double[100000];
Random gen = RandomUtils.getRandom();
for (double alpha : new double[]{1, 2, 10, 0.1, 0.01, 100}) {
Gamma g = new Gamma(alpha, 1, gen);
for (int i = 0; i < z.length; i++) {
z[i] = g.nextDouble();
}
Arrays.sort(z);
// verify that empirical CDF matches theoretical one pretty closely
for (double q : seq(0.01, 1, 0.01)) {
double p = z[(int) (q * z.length)];
assertEquals(q, g.cdf(p), 0.01);
}
}
}
@Test
public void testCdf() {
Random gen = RandomUtils.getRandom();
// verify scaling for special case of alpha = 1
for (double beta : new double[]{1, 0.1, 2, 100}) {
Gamma g1 = new Gamma(1, beta, gen);
Gamma g2 = new Gamma(1, 1, gen);
for (double x : seq(0, 0.99, 0.1)) {
assertEquals(String.format(Locale.ENGLISH, "Rate invariance: x = %.4f, alpha = 1, beta = %.1f", x, beta),
1 - Math.exp(-x * beta), g1.cdf(x), 1.0e-9);
assertEquals(String.format(Locale.ENGLISH, "Rate invariance: x = %.4f, alpha = 1, beta = %.1f", x, beta),
g2.cdf(beta * x), g1.cdf(x), 1.0e-9);
}
}
// now test scaling for a selection of values of alpha
for (double alpha : new double[]{0.01, 0.1, 1, 2, 10, 100, 1000}) {
Gamma g = new Gamma(alpha, 1, gen);
for (double beta : new double[]{0.1, 1, 2, 100}) {
Gamma g1 = new Gamma(alpha, beta, gen);
for (double x : seq(0, 0.9999, 0.001)) {
assertEquals(
String.format(Locale.ENGLISH, "Rate invariance: x = %.4f, alpha = %.2f, beta = %.1f", x, alpha, beta),
g.cdf(x * beta), g1.cdf(x), 0);
}
}
}
// now check against known values computed using R for various values of alpha
checkGammaCdf(0.01, 1, 0.0000000, 0.9450896, 0.9516444, 0.9554919, 0.9582258, 0.9603474, 0.9620810, 0.9635462, 0.9648148, 0.9659329, 0.9669321);
checkGammaCdf(0.1, 1, 0.0000000, 0.7095387, 0.7591012, 0.7891072, 0.8107067, 0.8275518, 0.8413180, 0.8529198, 0.8629131, 0.8716623, 0.8794196);
checkGammaCdf(1, 1, 0.0000000, 0.1812692, 0.3296800, 0.4511884, 0.5506710, 0.6321206, 0.6988058, 0.7534030, 0.7981035, 0.8347011, 0.8646647);
checkGammaCdf(10, 1, 0.000000e+00, 4.649808e-05, 8.132243e-03, 8.392402e-02, 2.833757e-01, 5.420703e-01, 7.576078e-01, 8.906006e-01, 9.567017e-01, 9.846189e-01, 9.950046e-01);
checkGammaCdf(100, 1, 0.000000e+00, 3.488879e-37, 1.206254e-15, 1.481528e-06, 1.710831e-02, 5.132988e-01, 9.721363e-01, 9.998389e-01, 9.999999e-01, 1.000000e+00, 1.000000e+00);
// > pgamma(seq(0,0.02,by=0.002),0.01,1)
// [1] 0.0000000 0.9450896 0.9516444 0.9554919 0.9582258 0.9603474 0.9620810 0.9635462 0.9648148 0.9659329 0.9669321
// > pgamma(seq(0,0.2,by=0.02),0.1,1)
// [1] 0.0000000 0.7095387 0.7591012 0.7891072 0.8107067 0.8275518 0.8413180 0.8529198 0.8629131 0.8716623 0.8794196
// > pgamma(seq(0,2,by=0.2),1,1)
// [1] 0.0000000 0.1812692 0.3296800 0.4511884 0.5506710 0.6321206 0.6988058 0.7534030 0.7981035 0.8347011 0.8646647
// > pgamma(seq(0,20,by=2),10,1)
// [1] 0.000000e+00 4.649808e-05 8.132243e-03 8.392402e-02 2.833757e-01 5.420703e-01 7.576078e-01 8.906006e-01 9.567017e-01 9.846189e-01 9.950046e-01
// > pgamma(seq(0,200,by=20),100,1)
// [1] 0.000000e+00 3.488879e-37 1.206254e-15 1.481528e-06 1.710831e-02 5.132988e-01 9.721363e-01 9.998389e-01 9.999999e-01 1.000000e+00 1.000000e+00
}
private static void checkGammaCdf(double alpha, double beta, double... values) {
Gamma g = new Gamma(alpha, beta, RandomUtils.getRandom());
int i = 0;
for (double x : seq(0, 2 * alpha, 2 * alpha / 10)) {
assertEquals(String.format(Locale.ENGLISH, "alpha=%.2f, i=%d, x=%.2f", alpha, i, x),
values[i], g.cdf(x), 1.0e-7);
i++;
}
}
private static double[] seq(double from, double to, double by) {
double[] r = new double[(int) Math.ceil(0.999999 * (to - from) / by)];
int i = 0;
for (double x = from; x < to - (to - from) * 1.0e-6; x += by) {
r[i++] = x;
}
return r;
}
@Test
public void testPdf() {
Random gen = RandomUtils.getRandom();
for (double alpha : new double[]{0.01, 0.1, 1, 2, 10, 100}) {
for (double beta : new double[]{0.1, 1, 2, 100}) {
Gamma g1 = new Gamma(alpha, beta, gen);
for (double x : seq(0, 0.99, 0.1)) {
double p = Math.pow(beta, alpha) * Math.pow(x, alpha - 1) *
Math.exp(-beta * x - org.apache.mahout.math.jet.stat.Gamma.logGamma(alpha));
assertEquals(String.format(Locale.ENGLISH, "alpha=%.2f, beta=%.2f, x=%.2f\n", alpha, beta, x),
p, g1.pdf(x), 1.0e-9);
}
}
}
}
}
| apache-2.0 |
barneykim/pinpoint | web/src/main/java/com/navercorp/pinpoint/web/dao/stat/DeadlockDao.java | 825 | /*
* Copyright 2017 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.web.dao.stat;
import com.navercorp.pinpoint.common.server.bo.stat.DeadlockThreadCountBo;
/**
* @author Taejin Koo
*/
public interface DeadlockDao extends AgentStatDao<DeadlockThreadCountBo> {
}
| apache-2.0 |
maxamillion/origin | pkg/network/sdn-cni-plugin/openshift-sdn.go | 5969 | // +build linux
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"strings"
"time"
"github.com/openshift/origin/pkg/network/node/cniserver"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/020"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ip"
"github.com/containernetworking/plugins/pkg/ipam"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/vishvananda/netlink"
)
type cniPlugin struct {
socketPath string
hostNS ns.NetNS
}
func NewCNIPlugin(socketPath string, hostNS ns.NetNS) *cniPlugin {
return &cniPlugin{socketPath: socketPath, hostNS: hostNS}
}
// Create and fill a CNIRequest with this plugin's environment and stdin which
// contain the CNI variables and configuration
func newCNIRequest(args *skel.CmdArgs) *cniserver.CNIRequest {
envMap := make(map[string]string)
for _, item := range os.Environ() {
idx := strings.Index(item, "=")
if idx > 0 {
envMap[strings.TrimSpace(item[:idx])] = item[idx+1:]
}
}
return &cniserver.CNIRequest{
Env: envMap,
Config: args.StdinData,
}
}
// Send a CNI request to the CNI server via JSON + HTTP over a root-owned unix socket,
// and return the result
func (p *cniPlugin) doCNI(url string, req *cniserver.CNIRequest) ([]byte, error) {
data, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal CNI request %v: %v", req, err)
}
client := &http.Client{
Transport: &http.Transport{
Dial: func(proto, addr string) (net.Conn, error) {
return net.Dial("unix", p.socketPath)
},
},
}
var resp *http.Response
err = p.hostNS.Do(func(ns.NetNS) error {
resp, err = client.Post(url, "application/json", bytes.NewReader(data))
return err
})
if err != nil {
return nil, fmt.Errorf("failed to send CNI request: %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read CNI result: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("CNI request failed with status %v: '%s'", resp.StatusCode, string(body))
}
return body, nil
}
// Send the ADD command environment and config to the CNI server, returning
// the IPAM result to the caller
func (p *cniPlugin) doCNIServerAdd(req *cniserver.CNIRequest, hostVeth string) (types.Result, error) {
req.HostVeth = hostVeth
body, err := p.doCNI("http://dummy/", req)
if err != nil {
return nil, err
}
// We currently expect CNI version 0.2.0 results, because that's the
// CNIVersion we pass in our config JSON
result, err := types020.NewResult(body)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal response '%s': %v", string(body), err)
}
return result, nil
}
func (p *cniPlugin) testCmdAdd(args *skel.CmdArgs) (types.Result, error) {
return p.doCNIServerAdd(newCNIRequest(args), "dummy0")
}
func (p *cniPlugin) CmdAdd(args *skel.CmdArgs) error {
req := newCNIRequest(args)
config, err := cniserver.ReadConfig(cniserver.CNIServerConfigFilePath)
if err != nil {
return err
}
var hostVeth, contVeth net.Interface
err = ns.WithNetNSPath(args.Netns, func(hostNS ns.NetNS) error {
hostVeth, contVeth, err = ip.SetupVeth(args.IfName, int(config.MTU), hostNS)
if err != nil {
return fmt.Errorf("failed to create container veth: %v", err)
}
return nil
})
if err != nil {
return err
}
result, err := p.doCNIServerAdd(req, hostVeth.Name)
if err != nil {
return err
}
// current.NewResultFromResult and ipam.ConfigureIface both think that
// a route with no gateway specified means to pass the default gateway
// as the next hop to ip.AddRoute, but that's not what we want; we want
// to pass nil as the next hop. So we need to clear the default gateway.
result020, err := types020.GetResult(result)
if err != nil {
return fmt.Errorf("failed to convert IPAM result: %v", err)
}
result020.IP4.Gateway = nil
result030, err := current.NewResultFromResult(result020)
if err != nil || len(result030.IPs) != 1 || result030.IPs[0].Version != "4" {
return fmt.Errorf("failed to convert IPAM result: %v", err)
}
// Add a sandbox interface record which ConfigureInterface expects.
// The only interface we report is the pod interface.
result030.Interfaces = []*current.Interface{
{
Name: args.IfName,
Mac: contVeth.HardwareAddr.String(),
Sandbox: args.Netns,
},
}
result030.IPs[0].Interface = current.Int(0)
err = ns.WithNetNSPath(args.Netns, func(ns.NetNS) error {
// Set up eth0
if err := ip.SetHWAddrByIP(args.IfName, result030.IPs[0].Address.IP, nil); err != nil {
return fmt.Errorf("failed to set pod interface MAC address: %v", err)
}
if err := ipam.ConfigureIface(args.IfName, result030); err != nil {
return fmt.Errorf("failed to configure container IPAM: %v", err)
}
// Set up lo
link, err := netlink.LinkByName("lo")
if err == nil {
err = netlink.LinkSetUp(link)
}
if err != nil {
return fmt.Errorf("failed to configure container loopback: %v", err)
}
// Set up macvlan0 (if it exists)
link, err = netlink.LinkByName("macvlan0")
if err == nil {
err = netlink.LinkSetUp(link)
if err != nil {
return fmt.Errorf("failed to configure macvlan device: %v", err)
}
}
return nil
})
if err != nil {
return err
}
return result.Print()
}
func (p *cniPlugin) CmdDel(args *skel.CmdArgs) error {
_, err := p.doCNI("http://dummy/", newCNIRequest(args))
return err
}
func main() {
rand.Seed(time.Now().UTC().UnixNano())
hostNS, err := ns.GetCurrentNS()
if err != nil {
panic(fmt.Sprintf("could not get current kernel netns: %v", err))
}
defer hostNS.Close()
p := NewCNIPlugin(cniserver.CNIServerSocketPath, hostNS)
skel.PluginMain(p.CmdAdd, p.CmdDel, version.Legacy)
}
| apache-2.0 |
microsoft/TypeScript | tests/cases/fourslash/tsxFindAllReferences1.ts | 600 | /// <reference path='fourslash.ts' />
//@Filename: file.tsx
//// declare module JSX {
//// interface Element { }
//// interface IntrinsicElements {
//// [|[|{| "isWriteAccess": true, "isDefinition": true, "contextRangeIndex": 0 |}div|]: {
//// name?: string;
//// isOpen?: boolean;
//// };|]
//// span: { n: string; };
//// }
//// }
//// var x = [|<[|{| "contextRangeIndex": 2 |}div|] />|];
verify.singleReferenceGroup(
`(property) JSX.IntrinsicElements.div: {
name?: string;
isOpen?: boolean;
}`, "div");
| apache-2.0 |
jsdosa/TizenRT | os/logm/logm_process.c | 2907 | /****************************************************************************
*
* Copyright 2016-2017 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <sys/types.h>
#include <arch/irq.h>
#include <tinyara/logm.h>
#include <tinyara/config.h>
#include <tinyara/kmalloc.h>
#include "logm.h"
#ifdef CONFIG_LOGM_TEST
#include "logm_test.h"
#endif
uint8_t logm_status;
int logm_bufsize = LOGM_BUFFER_SIZE;
char * g_logm_rsvbuf = NULL;
volatile int logm_print_interval = LOGM_PRINT_INTERVAL * 1000;
static int logm_change_bufsize(int buflen)
{
/* Keep using old size if a parameter is invalid */
if (buflen < 0) {
LOGM_STATUS_CLEAR(LOGM_BUFFER_RESIZE_REQ);
return ERROR;
}
/* Realloc new buffer with new length */
char *new_g_logm_rsvbuf = (char *)kmm_realloc(g_logm_rsvbuf, buflen);
if (new_g_logm_rsvbuf == NULL) {
wdbg("Realloc Fail\n");
return ERROR;
}
g_logm_rsvbuf = new_g_logm_rsvbuf;
memset(g_logm_rsvbuf, 0, buflen);
/* Reinitialize all */
g_logm_head = 0;
g_logm_tail = 0;
logm_bufsize = buflen;
g_logm_dropmsg_count = 0;
g_logm_overflow_offset = -1;
LOGM_STATUS_CLEAR(LOGM_BUFFER_RESIZE_REQ);
return OK;
}
int logm_task(int argc, char *argv[])
{
irqstate_t flags;
g_logm_rsvbuf = (char *)kmm_malloc(logm_bufsize);
memset(g_logm_rsvbuf, 0, logm_bufsize);
/* Now logm is ready */
LOGM_STATUS_SET(LOGM_READY);
#ifdef CONFIG_LOGM_TEST
logmtest_init();
#endif
while (1) {
while (g_logm_head != g_logm_tail) {
fputc(g_logm_rsvbuf[g_logm_head], stdout);
g_logm_head = (g_logm_head + 1) % logm_bufsize;
if (LOGM_STATUS(LOGM_BUFFER_OVERFLOW)) {
LOGM_STATUS_CLEAR(LOGM_BUFFER_OVERFLOW);
}
if (g_logm_overflow_offset >= 0 && g_logm_overflow_offset == g_logm_head) {
fprintf(stdout, "\n[LOGM BUFFER OVERFLOW] %d messages are dropped\n", g_logm_dropmsg_count);
g_logm_overflow_offset = -1;
}
}
if (LOGM_STATUS(LOGM_BUFFER_RESIZE_REQ)) {
flags = irqsave();
if (logm_change_bufsize(new_logm_bufsize) != OK) {
fprintf(stdout, "\n[LOGM] Failed to change buffer size\n");
}
irqrestore(flags);
}
usleep(logm_print_interval);
}
kmm_free(g_logm_rsvbuf);
return 0; // Just to make compiler happy
}
| apache-2.0 |
amyvmiwei/hbase | hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java | 22973 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.coprocessor.RowProcessorClient;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.FriendsOfFriendsProcessorRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.FriendsOfFriendsProcessorResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.IncCounterProcessorRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.IncCounterProcessorResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.RowSwapProcessorRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.RowSwapProcessorResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.TimeoutProcessorRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.TimeoutProcessorResponse;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest;
import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessResponse;
import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService;
import org.apache.hadoop.hbase.regionserver.BaseRowProcessor;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.protobuf.Message;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Verifies ProcessEndpoint works.
* The tested RowProcessor performs two scans and a read-modify-write.
*/
@Category({CoprocessorTests.class, MediumTests.class})
public class TestRowProcessorEndpoint {
static final Log LOG = LogFactory.getLog(TestRowProcessorEndpoint.class);
private static final TableName TABLE = TableName.valueOf("testtable");
private final static byte[] ROW = Bytes.toBytes("testrow");
private final static byte[] ROW2 = Bytes.toBytes("testrow2");
private final static byte[] FAM = Bytes.toBytes("friendlist");
// Column names
private final static byte[] A = Bytes.toBytes("a");
private final static byte[] B = Bytes.toBytes("b");
private final static byte[] C = Bytes.toBytes("c");
private final static byte[] D = Bytes.toBytes("d");
private final static byte[] E = Bytes.toBytes("e");
private final static byte[] F = Bytes.toBytes("f");
private final static byte[] G = Bytes.toBytes("g");
private final static byte[] COUNTER = Bytes.toBytes("counter");
private final static AtomicLong myTimer = new AtomicLong(0);
private final AtomicInteger failures = new AtomicInteger(0);
private static HBaseTestingUtility util = new HBaseTestingUtility();
private static volatile int expectedCounter = 0;
private static int rowSize, row2Size;
private volatile static Table table = null;
private volatile static boolean swapped = false;
private volatile CountDownLatch startSignal;
private volatile CountDownLatch doneSignal;
@BeforeClass
public static void setupBeforeClass() throws Exception {
Configuration conf = util.getConfiguration();
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
RowProcessorEndpoint.class.getName());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
conf.setLong("hbase.hregion.row.processor.timeout", 1000L);
util.startMiniCluster();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
util.shutdownMiniCluster();
}
public void prepareTestData() throws Exception {
try {
util.getHBaseAdmin().disableTable(TABLE);
util.getHBaseAdmin().deleteTable(TABLE);
} catch (Exception e) {
// ignore table not found
}
table = util.createTable(TABLE, FAM);
{
Put put = new Put(ROW);
put.add(FAM, A, Bytes.add(B, C)); // B, C are friends of A
put.add(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B
put.add(FAM, C, G); // G is a friend of C
table.put(put);
rowSize = put.size();
}
Put put = new Put(ROW2);
put.add(FAM, D, E);
put.add(FAM, F, G);
table.put(put);
row2Size = put.size();
}
@Test
public void testDoubleScan() throws Throwable {
prepareTestData();
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.FriendsOfFriendsProcessor processor =
new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
ProcessResponse protoResult = service.process(null, request);
FriendsOfFriendsProcessorResponse response =
FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult());
Set<String> result = new HashSet<String>();
result.addAll(response.getResultList());
Set<String> expected =
new HashSet<String>(Arrays.asList(new String[]{"d", "e", "f", "g"}));
Get get = new Get(ROW);
LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells()));
assertEquals(expected, result);
}
@Test
public void testReadModifyWrite() throws Throwable {
prepareTestData();
failures.set(0);
int numThreads = 100;
concurrentExec(new IncrementRunner(), numThreads);
Get get = new Get(ROW);
LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells()));
int finalCounter = incrementCounter(table);
assertEquals(numThreads + 1, finalCounter);
assertEquals(0, failures.get());
}
class IncrementRunner implements Runnable {
@Override
public void run() {
try {
incrementCounter(table);
} catch (Throwable e) {
e.printStackTrace();
}
}
}
private int incrementCounter(Table table) throws Throwable {
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.IncrementCounterProcessor processor =
new RowProcessorEndpoint.IncrementCounterProcessor(ROW);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
ProcessResponse protoResult = service.process(null, request);
IncCounterProcessorResponse response = IncCounterProcessorResponse
.parseFrom(protoResult.getRowProcessorResult());
Integer result = response.getResponse();
return result;
}
private void concurrentExec(
final Runnable task, final int numThreads) throws Throwable {
startSignal = new CountDownLatch(numThreads);
doneSignal = new CountDownLatch(numThreads);
for (int i = 0; i < numThreads; ++i) {
new Thread(new Runnable() {
@Override
public void run() {
try {
startSignal.countDown();
startSignal.await();
task.run();
} catch (Throwable e) {
failures.incrementAndGet();
e.printStackTrace();
}
doneSignal.countDown();
}
}).start();
}
doneSignal.await();
}
@Test
public void testMultipleRows() throws Throwable {
prepareTestData();
failures.set(0);
int numThreads = 100;
concurrentExec(new SwapRowsRunner(), numThreads);
LOG.debug("row keyvalues:" +
stringifyKvs(table.get(new Get(ROW)).listCells()));
LOG.debug("row2 keyvalues:" +
stringifyKvs(table.get(new Get(ROW2)).listCells()));
assertEquals(rowSize, table.get(new Get(ROW)).listCells().size());
assertEquals(row2Size, table.get(new Get(ROW2)).listCells().size());
assertEquals(0, failures.get());
}
class SwapRowsRunner implements Runnable {
@Override
public void run() {
try {
swapRows(table);
} catch (Throwable e) {
e.printStackTrace();
}
}
}
private void swapRows(Table table) throws Throwable {
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.RowSwapProcessor processor =
new RowProcessorEndpoint.RowSwapProcessor(ROW, ROW2);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
service.process(null, request);
}
@Test
public void testTimeout() throws Throwable {
prepareTestData();
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.TimeoutProcessor processor =
new RowProcessorEndpoint.TimeoutProcessor(ROW);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
boolean exceptionCaught = false;
try {
service.process(null, request);
} catch (Exception e) {
exceptionCaught = true;
}
assertTrue(exceptionCaught);
}
/**
* This class defines two RowProcessors:
* IncrementCounterProcessor and FriendsOfFriendsProcessor.
*
* We define the RowProcessors as the inner class of the endpoint.
* So they can be loaded with the endpoint on the coprocessor.
*/
public static class RowProcessorEndpoint<S extends Message,T extends Message>
extends BaseRowProcessorEndpoint<S,T> implements CoprocessorService {
public static class IncrementCounterProcessor extends
BaseRowProcessor<IncrementCounterProcessorTestProtos.IncCounterProcessorRequest,
IncrementCounterProcessorTestProtos.IncCounterProcessorResponse> {
int counter = 0;
byte[] row = new byte[0];
/**
* Empty constructor for Writable
*/
IncrementCounterProcessor() {
}
IncrementCounterProcessor(byte[] row) {
this.row = row;
}
@Override
public Collection<byte[]> getRowsToLock() {
return Collections.singleton(row);
}
@Override
public IncCounterProcessorResponse getResult() {
IncCounterProcessorResponse.Builder i = IncCounterProcessorResponse.newBuilder();
i.setResponse(counter);
return i.build();
}
@Override
public boolean readOnly() {
return false;
}
@Override
public void process(long now, HRegion region,
List<Mutation> mutations, WALEdit walEdit) throws IOException {
// Scan current counter
List<Cell> kvs = new ArrayList<Cell>();
Scan scan = new Scan(row, row);
scan.addColumn(FAM, COUNTER);
doScan(region, scan, kvs);
counter = kvs.size() == 0 ? 0 :
Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next()));
// Assert counter value
assertEquals(expectedCounter, counter);
// Increment counter and send it to both memstore and wal edit
counter += 1;
expectedCounter += 1;
Put p = new Put(row);
KeyValue kv =
new KeyValue(row, FAM, COUNTER, now, Bytes.toBytes(counter));
p.add(kv);
mutations.add(p);
walEdit.add(kv);
// We can also inject some meta data to the walEdit
KeyValue metaKv = new KeyValue(
row, WALEdit.METAFAMILY,
Bytes.toBytes("I just increment counter"),
Bytes.toBytes(counter));
walEdit.add(metaKv);
}
@Override
public IncCounterProcessorRequest getRequestData() throws IOException {
IncCounterProcessorRequest.Builder builder = IncCounterProcessorRequest.newBuilder();
builder.setCounter(counter);
builder.setRow(ByteStringer.wrap(row));
return builder.build();
}
@Override
public void initialize(IncCounterProcessorRequest msg) {
this.row = msg.getRow().toByteArray();
this.counter = msg.getCounter();
}
}
public static class FriendsOfFriendsProcessor extends
BaseRowProcessor<FriendsOfFriendsProcessorRequest, FriendsOfFriendsProcessorResponse> {
byte[] row = null;
byte[] person = null;
final Set<String> result = new HashSet<String>();
/**
* Empty constructor for Writable
*/
FriendsOfFriendsProcessor() {
}
FriendsOfFriendsProcessor(byte[] row, byte[] person) {
this.row = row;
this.person = person;
}
@Override
public Collection<byte[]> getRowsToLock() {
return Collections.singleton(row);
}
@Override
public FriendsOfFriendsProcessorResponse getResult() {
FriendsOfFriendsProcessorResponse.Builder builder =
FriendsOfFriendsProcessorResponse.newBuilder();
builder.addAllResult(result);
return builder.build();
}
@Override
public boolean readOnly() {
return true;
}
@Override
public void process(long now, HRegion region,
List<Mutation> mutations, WALEdit walEdit) throws IOException {
List<Cell> kvs = new ArrayList<Cell>();
{ // First scan to get friends of the person
Scan scan = new Scan(row, row);
scan.addColumn(FAM, person);
doScan(region, scan, kvs);
}
// Second scan to get friends of friends
Scan scan = new Scan(row, row);
for (Cell kv : kvs) {
byte[] friends = CellUtil.cloneValue(kv);
for (byte f : friends) {
scan.addColumn(FAM, new byte[]{f});
}
}
doScan(region, scan, kvs);
// Collect result
result.clear();
for (Cell kv : kvs) {
for (byte b : CellUtil.cloneValue(kv)) {
result.add((char)b + "");
}
}
}
@Override
public FriendsOfFriendsProcessorRequest getRequestData() throws IOException {
FriendsOfFriendsProcessorRequest.Builder builder =
FriendsOfFriendsProcessorRequest.newBuilder();
builder.setPerson(ByteStringer.wrap(person));
builder.setRow(ByteStringer.wrap(row));
builder.addAllResult(result);
FriendsOfFriendsProcessorRequest f = builder.build();
return f;
}
@Override
public void initialize(FriendsOfFriendsProcessorRequest request)
throws IOException {
this.person = request.getPerson().toByteArray();
this.row = request.getRow().toByteArray();
result.clear();
result.addAll(request.getResultList());
}
}
public static class RowSwapProcessor extends
BaseRowProcessor<RowSwapProcessorRequest, RowSwapProcessorResponse> {
byte[] row1 = new byte[0];
byte[] row2 = new byte[0];
/**
* Empty constructor for Writable
*/
RowSwapProcessor() {
}
RowSwapProcessor(byte[] row1, byte[] row2) {
this.row1 = row1;
this.row2 = row2;
}
@Override
public Collection<byte[]> getRowsToLock() {
List<byte[]> rows = new ArrayList<byte[]>();
rows.add(row1);
rows.add(row2);
return rows;
}
@Override
public boolean readOnly() {
return false;
}
@Override
public RowSwapProcessorResponse getResult() {
return RowSwapProcessorResponse.getDefaultInstance();
}
@Override
public void process(long now, HRegion region,
List<Mutation> mutations, WALEdit walEdit) throws IOException {
// Override the time to avoid race-condition in the unit test caused by
// inacurate timer on some machines
now = myTimer.getAndIncrement();
// Scan both rows
List<Cell> kvs1 = new ArrayList<Cell>();
List<Cell> kvs2 = new ArrayList<Cell>();
doScan(region, new Scan(row1, row1), kvs1);
doScan(region, new Scan(row2, row2), kvs2);
// Assert swapped
if (swapped) {
assertEquals(rowSize, kvs2.size());
assertEquals(row2Size, kvs1.size());
} else {
assertEquals(rowSize, kvs1.size());
assertEquals(row2Size, kvs2.size());
}
swapped = !swapped;
// Add and delete keyvalues
List<List<Cell>> kvs = new ArrayList<List<Cell>>();
kvs.add(kvs1);
kvs.add(kvs2);
byte[][] rows = new byte[][]{row1, row2};
for (int i = 0; i < kvs.size(); ++i) {
for (Cell kv : kvs.get(i)) {
// Delete from the current row and add to the other row
Delete d = new Delete(rows[i]);
KeyValue kvDelete =
new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
kv.getTimestamp(), KeyValue.Type.Delete);
d.addDeleteMarker(kvDelete);
Put p = new Put(rows[1 - i]);
KeyValue kvAdd =
new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
now, CellUtil.cloneValue(kv));
p.add(kvAdd);
mutations.add(d);
walEdit.add(kvDelete);
mutations.add(p);
walEdit.add(kvAdd);
}
}
}
@Override
public String getName() {
return "swap";
}
@Override
public RowSwapProcessorRequest getRequestData() throws IOException {
RowSwapProcessorRequest.Builder builder = RowSwapProcessorRequest.newBuilder();
builder.setRow1(ByteStringer.wrap(row1));
builder.setRow2(ByteStringer.wrap(row2));
return builder.build();
}
@Override
public void initialize(RowSwapProcessorRequest msg) {
this.row1 = msg.getRow1().toByteArray();
this.row2 = msg.getRow2().toByteArray();
}
}
public static class TimeoutProcessor extends
BaseRowProcessor<TimeoutProcessorRequest, TimeoutProcessorResponse> {
byte[] row = new byte[0];
/**
* Empty constructor for Writable
*/
public TimeoutProcessor() {
}
public TimeoutProcessor(byte[] row) {
this.row = row;
}
public Collection<byte[]> getRowsToLock() {
return Collections.singleton(row);
}
@Override
public TimeoutProcessorResponse getResult() {
return TimeoutProcessorResponse.getDefaultInstance();
}
@Override
public void process(long now, HRegion region,
List<Mutation> mutations, WALEdit walEdit) throws IOException {
try {
// Sleep for a long time so it timeout
Thread.sleep(100 * 1000L);
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
public boolean readOnly() {
return true;
}
@Override
public String getName() {
return "timeout";
}
@Override
public TimeoutProcessorRequest getRequestData() throws IOException {
TimeoutProcessorRequest.Builder builder = TimeoutProcessorRequest.newBuilder();
builder.setRow(ByteStringer.wrap(row));
return builder.build();
}
@Override
public void initialize(TimeoutProcessorRequest msg) throws IOException {
this.row = msg.getRow().toByteArray();
}
}
public static void doScan(
HRegion region, Scan scan, List<Cell> result) throws IOException {
InternalScanner scanner = null;
try {
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
scanner = region.getScanner(scan);
result.clear();
scanner.next(result);
} finally {
if (scanner != null) scanner.close();
}
}
}
static String stringifyKvs(Collection<Cell> kvs) {
StringBuilder out = new StringBuilder();
out.append("[");
if (kvs != null) {
for (Cell kv : kvs) {
byte[] col = CellUtil.cloneQualifier(kv);
byte[] val = CellUtil.cloneValue(kv);
if (Bytes.equals(col, COUNTER)) {
out.append(Bytes.toStringBinary(col) + ":" +
Bytes.toInt(val) + " ");
} else {
out.append(Bytes.toStringBinary(col) + ":" +
Bytes.toStringBinary(val) + " ");
}
}
}
out.append("]");
return out.toString();
}
}
| apache-2.0 |
nysan/alpine | alpine/newmail.h | 697 | /*
* $Id: newmail.h 1266 2009-07-14 18:39:12Z [email protected] $
*
* ========================================================================
* Copyright 2009 University of Washington
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* ========================================================================
*/
#ifndef ALPINE_NEWMAIL_INCLUDED
#define ALPINE_NEWMAIL_INCLUDED
/* exported protoypes */
void newmail_status_message(MAILSTREAM *, long, long);
#endif /* ALPINE_NEWMAIL_INCLUDED */
| apache-2.0 |
facetothefate/contrail-controller | src/vnsw/agent/contrail/linux/pkt0_interface.cc | 6144 | /*
* Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
*/
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/if_packet.h>
#include "base/logging.h"
#include "cmn/agent_cmn.h"
#include "sandesh/sandesh_types.h"
#include "sandesh/sandesh.h"
#include "sandesh/sandesh_trace.h"
#include "pkt/pkt_types.h"
#include "pkt/pkt_init.h"
#include "../pkt0_interface.h"
#define TUN_INTF_CLONE_DEV "/dev/net/tun"
#define TAP_TRACE(obj, ...) \
do { \
Tap##obj::TraceMsg(PacketTraceBuf, __FILE__, __LINE__, __VA_ARGS__); \
} while (false) \
///////////////////////////////////////////////////////////////////////////////
void Pkt0Interface::InitControlInterface() {
pkt_handler()->agent()->set_pkt_interface_name(name_);
if ((tap_fd_ = open(TUN_INTF_CLONE_DEV, O_RDWR)) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> opening tap-device");
assert(0);
}
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
strncpy(ifr.ifr_name, name_.c_str(), IF_NAMESIZE);
if (ioctl(tap_fd_, TUNSETIFF, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> creating " << name_ << "tap-device");
assert(0);
}
// We dont want the fd to be inherited by child process such as
// virsh etc... So, close tap fd on fork.
if (fcntl(tap_fd_, F_SETFD, FD_CLOEXEC) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> setting fcntl on " << name_ );
assert(0);
}
if (ioctl(tap_fd_, TUNSETPERSIST, 0) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> making tap interface non-persistent");
assert(0);
}
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, name_.c_str(), IF_NAMESIZE);
if (ioctl(tap_fd_, SIOCGIFHWADDR, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " << strerror(errno) <<
"> retrieving MAC address of the tap interface");
assert(0);
}
memcpy(mac_address_, ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
int raw;
if ((raw = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) == -1) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> creating socket");
assert(0);
}
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, name_.data(), IF_NAMESIZE);
if (ioctl(raw, SIOCGIFINDEX, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> getting ifindex of the tap interface");
assert(0);
}
struct sockaddr_ll sll;
memset(&sll, 0, sizeof(struct sockaddr_ll));
sll.sll_family = AF_PACKET;
sll.sll_ifindex = ifr.ifr_ifindex;
sll.sll_protocol = htons(ETH_P_ALL);
if (bind(raw, (struct sockaddr *)&sll,
sizeof(struct sockaddr_ll)) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> binding the socket to the tap interface");
assert(0);
}
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, name_.data(), IF_NAMESIZE);
if (ioctl(raw, SIOCGIFFLAGS, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> getting socket flags");
assert(0);
}
ifr.ifr_flags |= IFF_UP;
if (ioctl(raw, SIOCSIFFLAGS, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> setting socket flags");
assert(0);
}
close(raw);
boost::system::error_code ec;
input_.assign(tap_fd_, ec);
assert(ec == 0);
VrouterControlInterface::InitControlInterface();
AsyncRead();
}
void Pkt0RawInterface::InitControlInterface() {
pkt_handler()->agent()->set_pkt_interface_name(name_);
int raw_;
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
if ((raw_ = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) == -1) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> creating socket");
assert(0);
}
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name,
pkt_handler()->agent()->pkt_interface_name().c_str(), IF_NAMESIZE);
if (ioctl(raw_, SIOCGIFINDEX, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> getting ifindex of the " <<
"expception packet interface");
assert(0);
}
struct sockaddr_ll sll;
memset(&sll, 0, sizeof(struct sockaddr_ll));
sll.sll_family = AF_PACKET;
sll.sll_ifindex = ifr.ifr_ifindex;
sll.sll_protocol = htons(ETH_P_ALL);
if (bind(raw_, (struct sockaddr *)&sll,
sizeof(struct sockaddr_ll)) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> binding the socket to the tap interface");
assert(0);
}
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, name_.data(), IF_NAMESIZE);
if (ioctl(raw_, SIOCGIFFLAGS, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> getting socket flags");
assert(0);
}
ifr.ifr_flags |= IFF_UP;
if (ioctl(raw_, SIOCSIFFLAGS, (void *)&ifr) < 0) {
LOG(ERROR, "Packet Tap Error <" << errno << ": " <<
strerror(errno) << "> setting socket flags");
assert(0);
}
tap_fd_ = raw_;
boost::system::error_code ec;
input_.assign(tap_fd_, ec);
assert(ec == 0);
VrouterControlInterface::InitControlInterface();
AsyncRead();
}
| apache-2.0 |
xuegongzi/rabbitframework | rabbitframework-security-pom/rabbitframework-security/src/main/java/org/apache/shiro/authz/ModularRealmAuthorizer.java | 19390 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.shiro.authz;
import org.apache.shiro.authz.permission.PermissionResolver;
import org.apache.shiro.authz.permission.PermissionResolverAware;
import org.apache.shiro.authz.permission.RolePermissionResolver;
import org.apache.shiro.authz.permission.RolePermissionResolverAware;
import org.apache.shiro.realm.Realm;
import org.apache.shiro.subject.PrincipalCollection;
import java.util.Collection;
import java.util.List;
/**
* A <tt>ModularRealmAuthorizer</tt> is an <tt>Authorizer</tt> implementation that consults one or more configured
* {@link Realm Realm}s during an authorization operation.
*
* @since 0.2
*/
public class ModularRealmAuthorizer implements Authorizer, PermissionResolverAware, RolePermissionResolverAware {
/**
* The realms to consult during any authorization check.
*/
protected Collection<Realm> realms;
/**
* A PermissionResolver to be used by <em>all</em> configured realms. Leave <code>null</code> if you wish
* to configure different resolvers for different realms.
*/
protected PermissionResolver permissionResolver;
/**
* A RolePermissionResolver to be used by <em>all</em> configured realms. Leave <code>null</code> if you wish
* to configure different resolvers for different realms.
*/
protected RolePermissionResolver rolePermissionResolver;
/**
* Default no-argument constructor, does nothing.
*/
public ModularRealmAuthorizer() {
}
/**
* Constructor that accepts the <code>Realm</code>s to consult during an authorization check. Immediately calls
* {@link #setRealms setRealms(realms)}.
*
* @param realms the realms to consult during an authorization check.
*/
public ModularRealmAuthorizer(Collection<Realm> realms) {
setRealms(realms);
}
/**
* Returns the realms wrapped by this <code>Authorizer</code> which are consulted during an authorization check.
*
* @return the realms wrapped by this <code>Authorizer</code> which are consulted during an authorization check.
*/
public Collection<Realm> getRealms() {
return this.realms;
}
/**
* Sets the realms wrapped by this <code>Authorizer</code> which are consulted during an authorization check.
*
* @param realms the realms wrapped by this <code>Authorizer</code> which are consulted during an authorization check.
*/
public void setRealms(Collection<Realm> realms) {
this.realms = realms;
applyPermissionResolverToRealms();
applyRolePermissionResolverToRealms();
}
/**
* Returns the PermissionResolver to be used on <em>all</em> configured realms, or <code>null</code (the default)
* if all realm instances will each configure their own permission resolver.
*
* @return the PermissionResolver to be used on <em>all</em> configured realms, or <code>null</code (the default)
* if realm instances will each configure their own permission resolver.
* @since 1.0
*/
public PermissionResolver getPermissionResolver() {
return this.permissionResolver;
}
/**
* Sets the specified {@link PermissionResolver PermissionResolver} on <em>all</em> of the wrapped realms that
* implement the {@link org.apache.shiro.authz.permission.PermissionResolverAware PermissionResolverAware} interface.
* <p/>
* Only call this method if you want the permission resolver to be passed to all realms that implement the
* <code>PermissionResolver</code> interface. If you do not want this to occur, the realms must
* configure themselves individually (or be configured individually).
*
* @param permissionResolver the permissionResolver to set on all of the wrapped realms that implement the
* {@link org.apache.shiro.authz.permission.PermissionResolverAware PermissionResolverAware} interface.
*/
public void setPermissionResolver(PermissionResolver permissionResolver) {
this.permissionResolver = permissionResolver;
applyPermissionResolverToRealms();
}
/**
* Sets the internal {@link #getPermissionResolver} on any internal configured
* {@link #getRealms Realms} that implement the {@link org.apache.shiro.authz.permission.PermissionResolverAware PermissionResolverAware} interface.
* <p/>
* This method is called after setting a permissionResolver on this ModularRealmAuthorizer via the
* {@link #setPermissionResolver(org.apache.shiro.authz.permission.PermissionResolver) setPermissionResolver} method.
* <p/>
* It is also called after setting one or more realms via the {@link #setRealms setRealms} method to allow these
* newly available realms to be given the <code>PermissionResolver</code> already in use.
*
* @since 1.0
*/
protected void applyPermissionResolverToRealms() {
PermissionResolver resolver = getPermissionResolver();
Collection<Realm> realms = getRealms();
if (resolver != null && realms != null && !realms.isEmpty()) {
for (Realm realm : realms) {
if (realm instanceof PermissionResolverAware) {
((PermissionResolverAware) realm).setPermissionResolver(resolver);
}
}
}
}
/**
* Returns the RolePermissionResolver to be used on <em>all</em> configured realms, or <code>null</code (the default)
* if all realm instances will each configure their own permission resolver.
*
* @return the RolePermissionResolver to be used on <em>all</em> configured realms, or <code>null</code (the default)
* if realm instances will each configure their own role permission resolver.
* @since 1.0
*/
public RolePermissionResolver getRolePermissionResolver() {
return this.rolePermissionResolver;
}
/**
* Sets the specified {@link RolePermissionResolver RolePermissionResolver} on <em>all</em> of the wrapped realms that
* implement the {@link org.apache.shiro.authz.permission.RolePermissionResolverAware PermissionResolverAware} interface.
* <p/>
* Only call this method if you want the permission resolver to be passed to all realms that implement the
* <code>RolePermissionResolver</code> interface. If you do not want this to occur, the realms must
* configure themselves individually (or be configured individually).
*
* @param rolePermissionResolver the rolePermissionResolver to set on all of the wrapped realms that implement the
* {@link org.apache.shiro.authz.permission.RolePermissionResolverAware RolePermissionResolverAware} interface.
*/
public void setRolePermissionResolver(RolePermissionResolver rolePermissionResolver) {
this.rolePermissionResolver = rolePermissionResolver;
applyRolePermissionResolverToRealms();
}
/**
* Sets the internal {@link #getRolePermissionResolver} on any internal configured
* {@link #getRealms Realms} that implement the {@link org.apache.shiro.authz.permission.RolePermissionResolverAware RolePermissionResolverAware} interface.
* <p/>
* This method is called after setting a rolePermissionResolver on this ModularRealmAuthorizer via the
* {@link #setRolePermissionResolver(org.apache.shiro.authz.permission.RolePermissionResolver) setRolePermissionResolver} method.
* <p/>
* It is also called after setting one or more realms via the {@link #setRealms setRealms} method to allow these
* newly available realms to be given the <code>RolePermissionResolver</code> already in use.
*
* @since 1.0
*/
protected void applyRolePermissionResolverToRealms() {
RolePermissionResolver resolver = getRolePermissionResolver();
Collection<Realm> realms = getRealms();
if (resolver != null && realms != null && !realms.isEmpty()) {
for (Realm realm : realms) {
if (realm instanceof RolePermissionResolverAware) {
((RolePermissionResolverAware) realm).setRolePermissionResolver(resolver);
}
}
}
}
/**
* Used by the {@link Authorizer Authorizer} implementation methods to ensure that the {@link #setRealms realms}
* has been set. The default implementation ensures the property is not null and not empty.
*
* @throws IllegalStateException if the <tt>realms</tt> property is configured incorrectly.
*/
protected void assertRealmsConfigured() throws IllegalStateException {
Collection<Realm> realms = getRealms();
if (realms == null || realms.isEmpty()) {
String msg = "Configuration error: No realms have been configured! One or more realms must be " +
"present to execute an authorization operation.";
throw new IllegalStateException(msg);
}
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, String)} returns <code>true</code>,
* <code>false</code> otherwise.
*/
public boolean isPermitted(PrincipalCollection principals, String permission) {
assertRealmsConfigured();
for (Realm realm : getRealms()) {
if (!(realm instanceof Authorizer)) continue;
if (((Authorizer) realm).isPermitted(principals, permission)) {
return true;
}
}
return false;
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, Permission)} call returns <code>true</code>,
* <code>false</code> otherwise.
*/
public boolean isPermitted(PrincipalCollection principals, Permission permission) {
assertRealmsConfigured();
for (Realm realm : getRealms()) {
if (!(realm instanceof Authorizer)) continue;
if (((Authorizer) realm).isPermitted(principals, permission)) {
return true;
}
}
return false;
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #isPermittedAll(org.apache.shiro.subject.PrincipalCollection, String...)} call returns
* <code>true</code>, <code>false</code> otherwise.
*/
public boolean[] isPermitted(PrincipalCollection principals, String... permissions) {
assertRealmsConfigured();
if (permissions != null && permissions.length > 0) {
boolean[] isPermitted = new boolean[permissions.length];
for (int i = 0; i < permissions.length; i++) {
isPermitted[i] = isPermitted(principals, permissions[i]);
}
return isPermitted;
}
return new boolean[0];
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, List)} call returns <code>true</code>,
* <code>false</code> otherwise.
*/
public boolean[] isPermitted(PrincipalCollection principals, List<Permission> permissions) {
assertRealmsConfigured();
if (permissions != null && !permissions.isEmpty()) {
boolean[] isPermitted = new boolean[permissions.size()];
int i = 0;
for (Permission p : permissions) {
isPermitted[i++] = isPermitted(principals, p);
}
return isPermitted;
}
return new boolean[0];
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, String)} call returns <code>true</code>
* for <em>all</em> of the specified string permissions, <code>false</code> otherwise.
*/
public boolean isPermittedAll(PrincipalCollection principals, String... permissions) {
assertRealmsConfigured();
if (permissions != null && permissions.length > 0) {
for (String perm : permissions) {
if (!isPermitted(principals, perm)) {
return false;
}
}
}
return true;
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, Permission)} call returns <code>true</code>
* for <em>all</em> of the specified Permissions, <code>false</code> otherwise.
*/
public boolean isPermittedAll(PrincipalCollection principals, Collection<Permission> permissions) {
assertRealmsConfigured();
if (permissions != null && !permissions.isEmpty()) {
for (Permission permission : permissions) {
if (!isPermitted(principals, permission)) {
return false;
}
}
}
return true;
}
/**
* If !{@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, String) isPermitted(permission)}, throws
* an <code>UnauthorizedException</code> otherwise returns quietly.
*/
public void checkPermission(PrincipalCollection principals, String permission) throws AuthorizationException {
assertRealmsConfigured();
if (!isPermitted(principals, permission)) {
throw new UnauthorizedException("Subject does not have permission [" + permission + "]");
}
}
/**
* If !{@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, Permission) isPermitted(permission)}, throws
* an <code>UnauthorizedException</code> otherwise returns quietly.
*/
public void checkPermission(PrincipalCollection principals, Permission permission) throws AuthorizationException {
assertRealmsConfigured();
if (!isPermitted(principals, permission)) {
throw new UnauthorizedException("Subject does not have permission [" + permission + "]");
}
}
/**
* If !{@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, String...) isPermitted(permission)},
* throws an <code>UnauthorizedException</code> otherwise returns quietly.
*/
public void checkPermissions(PrincipalCollection principals, String... permissions) throws AuthorizationException {
assertRealmsConfigured();
if (permissions != null && permissions.length > 0) {
for (String perm : permissions) {
checkPermission(principals, perm);
}
}
}
/**
* If !{@link #isPermitted(org.apache.shiro.subject.PrincipalCollection, Permission) isPermitted(permission)} for
* <em>all</em> the given Permissions, throws
* an <code>UnauthorizedException</code> otherwise returns quietly.
*/
public void checkPermissions(PrincipalCollection principals, Collection<Permission> permissions) throws AuthorizationException {
assertRealmsConfigured();
if (permissions != null) {
for (Permission permission : permissions) {
checkPermission(principals, permission);
}
}
}
/**
* Returns <code>true</code> if any of the configured realms'
* {@link #hasRole(org.apache.shiro.subject.PrincipalCollection, String)} call returns <code>true</code>,
* <code>false</code> otherwise.
*/
public boolean hasRole(PrincipalCollection principals, String roleIdentifier) {
assertRealmsConfigured();
for (Realm realm : getRealms()) {
if (!(realm instanceof Authorizer)) continue;
if (((Authorizer) realm).hasRole(principals, roleIdentifier)) {
return true;
}
}
return false;
}
/**
* Calls {@link #hasRole(org.apache.shiro.subject.PrincipalCollection, String)} for each role name in the specified
* collection and places the return value from each call at the respective location in the returned array.
*/
public boolean[] hasRoles(PrincipalCollection principals, List<String> roleIdentifiers) {
assertRealmsConfigured();
if (roleIdentifiers != null && !roleIdentifiers.isEmpty()) {
boolean[] hasRoles = new boolean[roleIdentifiers.size()];
int i = 0;
for (String roleId : roleIdentifiers) {
hasRoles[i++] = hasRole(principals, roleId);
}
return hasRoles;
}
return new boolean[0];
}
/**
* Returns <code>true</code> iff any of the configured realms'
* {@link #hasRole(org.apache.shiro.subject.PrincipalCollection, String)} call returns <code>true</code> for
* <em>all</em> roles specified, <code>false</code> otherwise.
*/
public boolean hasAllRoles(PrincipalCollection principals, Collection<String> roleIdentifiers) {
assertRealmsConfigured();
for (String roleIdentifier : roleIdentifiers) {
if (!hasRole(principals, roleIdentifier)) {
return false;
}
}
return true;
}
/**
* If !{@link #hasRole(org.apache.shiro.subject.PrincipalCollection, String) hasRole(role)}, throws
* an <code>UnauthorizedException</code> otherwise returns quietly.
*/
public void checkRole(PrincipalCollection principals, String role) throws AuthorizationException {
assertRealmsConfigured();
if (!hasRole(principals, role)) {
throw new UnauthorizedException("Subject does not have role [" + role + "]");
}
}
/**
* Calls {@link #checkRoles(PrincipalCollection principals, String... roles) checkRoles(PrincipalCollection principals, String... roles) }.
*/
public void checkRoles(PrincipalCollection principals, Collection<String> roles) throws AuthorizationException {
//SHIRO-234 - roles.toArray() -> roles.toArray(new String[roles.size()])
if (roles != null && !roles.isEmpty()) checkRoles(principals, roles.toArray(new String[roles.size()]));
}
/**
* Calls {@link #checkRole(org.apache.shiro.subject.PrincipalCollection, String) checkRole} for each role specified.
*/
public void checkRoles(PrincipalCollection principals, String... roles) throws AuthorizationException {
assertRealmsConfigured();
if (roles != null) {
for (String role : roles) {
checkRole(principals, role);
}
}
}
}
| apache-2.0 |
ntt-sic/nova | nova/tests/api/openstack/compute/contrib/test_volumes.py | 32169 | # Copyright 2013 Josh Durgin
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from oslo.config import cfg
import webob
from webob import exc
from nova.api.openstack.compute.contrib import assisted_volume_snapshots as \
assisted_snaps
from nova.api.openstack.compute.contrib import volumes
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_get_instance(self, context, instance_id, want_objects=False):
return {'uuid': instance_id}
def fake_get_volume(self, context, id):
return {'id': 'woot'}
def fake_attach_volume(self, context, instance, volume_id, device):
pass
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_create_snapshot(self, context, volume, name, description):
return {'id': 123,
'volume_id': 'fakeVolId',
'status': 'available',
'volume_size': 123,
'created_at': '2013-01-01 00:00:01',
'display_name': 'myVolumeName',
'display_description': 'myVolumeDescription'}
def fake_delete_snapshot(self, context, snapshot_id):
pass
def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def fake_compute_volume_snapshot_create(self, context, volume_id,
create_info):
pass
def fake_get_instance_bdms(self, context, instance):
return [{'id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1},
{'id': 2,
'instance_uuid': instance['uuid'],
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1}]
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(compute_api.API, 'create',
self._get_fake_compute_api_create())
fakes.stub_out_nw_api(self.stubs)
self._block_device_mapping_seen = None
self._legacy_bdm_seen = True
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
return _fake_compute_api_create
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id=1,
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
req = webob.Request.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertTrue(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
def test_create_root_volume_bdm_v2(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping_v2=[dict(
source_type='volume',
uuid=1,
device_name='/dev/vda',
boot_index=0,
delete_on_termination=False,
)]
))
req = webob.Request.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertFalse(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
0)
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = fakes.wsgi_app(init_only=('os-volumes',))
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = webob.Request.blank('/v2/fake/os-volumes')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertTrue('volume' in resp_dict)
self.assertEqual(resp_dict['volume']['size'],
vol['size'])
self.assertEqual(resp_dict['volume']['displayName'],
vol['display_name'])
self.assertEqual(resp_dict['volume']['displayDescription'],
vol['display_description'])
self.assertEqual(resp_dict['volume']['availabilityZone'],
vol['availability_zone'])
def test_volume_create_bad(self):
def fake_volume_create(self, context, size, name, description,
snapshot, **param):
raise exception.InvalidInput(reason="bad request data")
self.stubs.Set(cinder.API, "create", fake_volume_create)
vol = {"size": '#$?',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
volumes.VolumeController().create, req, body)
def test_volume_index(self):
req = webob.Request.blank('/v2/fake/os-volumes')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_detail(self):
req = webob.Request.blank('/v2/fake/os-volumes/detail')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show(self):
req = webob.Request.blank('/v2/fake/os-volumes/123')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_volume_delete(self):
req = webob.Request.blank('/v2/fake/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
class VolumeAttachTests(test.TestCase):
def setUp(self):
super(VolumeAttachTests, self).setUp()
self.stubs.Set(compute_api.API,
'get_instance_bdms',
fake_get_instance_bdms)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.attachments = volumes.VolumeAttachmentController(self.ext_mgr)
def test_show(self):
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
def test_detach(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual('202 Accepted', result.status)
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
def test_attach_volume(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
def test_attach_volume_bad_id(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None,
'volumeId': 'TESTVOLUME',
}
}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
req, FAKE_UUID, body)
def _test_swap(self, uuid=FAKE_UUID_A):
self.stubs.Set(compute_api.API,
'swap_volume',
fake_swap_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'PUT'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
return self.attachments.update(req, FAKE_UUID, uuid, body)
def test_swap_volume_no_extension(self):
self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
def test_swap_volume(self):
self.ext_mgr.extensions['os-volume-attachment-update'] = True
result = self._test_swap()
self.assertEqual('202 Accepted', result.status)
def test_swap_volume_no_attachment(self):
self.ext_mgr.extensions['os-volume-attachment-update'] = True
self.assertRaises(exc.HTTPNotFound, self._test_swap, FAKE_UUID_C)
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volumeId', 'serverId', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, 'volume')
for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
'displayName', 'displayDescription', 'volumeType',
'snapshotId'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertTrue(child.tag in ('attachments', 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertTrue(gr_child.get("key") in not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get("key"))
self.assertEqual(0, len(not_seen))
def test_attach_show_create_serializer(self):
serializer = volumes.VolumeAttachmentTemplate()
raw_attach = dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')
text = serializer.serialize(dict(volumeAttachment=raw_attach))
tree = etree.fromstring(text)
self.assertEqual('volumeAttachment', tree.tag)
self._verify_volume_attachment(raw_attach, tree)
def test_attach_index_serializer(self):
serializer = volumes.VolumeAttachmentsTemplate()
raw_attaches = [dict(
id='vol_id1',
volumeId='vol_id1',
serverId='instance1_uuid',
device='/foo1'),
dict(
id='vol_id2',
volumeId='vol_id2',
serverId='instance2_uuid',
device='/foo2')]
text = serializer.serialize(dict(volumeAttachments=raw_attaches))
tree = etree.fromstring(text)
self.assertEqual('volumeAttachments', tree.tag)
self.assertEqual(len(raw_attaches), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('volumeAttachment', child.tag)
self._verify_volume_attachment(raw_attaches[idx], child)
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availabilityZone='vol_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')],
displayName='vol_name',
displayDescription='vol_desc',
volumeType='vol_type',
snapshotId='snap_id',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availabilityZone='vol1_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol1_id',
volumeId='vol1_id',
serverId='instance_uuid',
device='/foo1')],
displayName='vol1_name',
displayDescription='vol1_desc',
volumeType='vol1_type',
snapshotId='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availabilityZone='vol2_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol2_id',
volumeId='vol2_id',
serverId='instance_uuid',
device='/foo2')],
displayName='vol2_name',
displayDescription='vol2_desc',
volumeType='vol2_type',
snapshotId='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual('volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
},
}
self.assertEquals(request['body'], expected)
def test_display_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
},
}
self.assertEquals(request['body'], expected)
def test_display_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
},
}
self.assertEquals(request['body'], expected)
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEquals(request['body'], expected)
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEquals(request['body'], expected)
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
display_name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEquals(request['body'], expected)
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.maxDiff = None
self.assertEquals(request['body'], expected)
class CommonUnprocessableEntityTestCase(object):
resource = None
entity_name = None
controller_cls = None
kwargs = {}
"""
Tests of places we throw 422 Unprocessable Entity from
"""
def setUp(self):
super(CommonUnprocessableEntityTestCase, self).setUp()
self.controller = self.controller_cls()
def _unprocessable_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
req.method = 'POST'
kwargs = self.kwargs.copy()
kwargs['body'] = body
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, **kwargs)
def test_create_no_body(self):
self._unprocessable_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_create(body=body)
def test_create_malformed_entity(self):
body = {self.entity_name: 'string'}
self._unprocessable_create(body=body)
class UnprocessableVolumeTestCase(CommonUnprocessableEntityTestCase,
test.TestCase):
resource = 'os-volumes'
entity_name = 'volume'
controller_cls = volumes.VolumeController
class UnprocessableAttachmentTestCase(CommonUnprocessableEntityTestCase,
test.TestCase):
resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
entity_name = 'volumeAttachment'
controller_cls = volumes.VolumeAttachmentController
kwargs = {'server_id': FAKE_UUID}
class UnprocessableSnapshotTestCase(CommonUnprocessableEntityTestCase,
test.TestCase):
resource = 'os-snapshots'
entity_name = 'snapshot'
controller_cls = volumes.SnapshotController
class CreateSnapshotTestCase(test.TestCase):
def setUp(self):
super(CreateSnapshotTestCase, self).setUp()
self.controller = volumes.SnapshotController()
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.stubs.Set(cinder.API, 'create_snapshot_force',
fake_create_snapshot)
self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
self.req.method = 'POST'
self.body = {'snapshot': {'volume_id': 1}}
def test_force_true(self):
self.body['snapshot']['force'] = 'True'
self.controller.create(self.req, body=self.body)
def test_force_false(self):
self.body['snapshot']['force'] = 'f'
self.controller.create(self.req, body=self.body)
def test_force_invalid(self):
self.body['snapshot']['force'] = 'foo'
self.assertRaises(exception.InvalidParameterValue,
self.controller.create, self.req, body=self.body)
class DeleteSnapshotTestCase(test.TestCase):
def setUp(self):
super(DeleteSnapshotTestCase, self).setUp()
self.controller = volumes.SnapshotController()
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.stubs.Set(cinder.API, 'create_snapshot_force',
fake_create_snapshot)
self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
self.stubs.Set(cinder.API, 'delete_snapshot', fake_delete_snapshot)
self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
def test_normal_delete(self):
self.req.method = 'POST'
self.body = {'snapshot': {'volume_id': 1}}
result = self.controller.create(self.req, body=self.body)
self.req.method = 'DELETE'
result = self.controller.delete(self.req, result['snapshot']['id'])
self.assertEqual(result.status_int, 202)
class AssistedSnapshotCreateTestCase(test.TestCase):
def setUp(self):
super(AssistedSnapshotCreateTestCase, self).setUp()
self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fake_compute_volume_snapshot_create)
def test_assisted_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': 1, 'create_info': {}}}
req.method = 'POST'
self.controller.create(req, body=body)
def test_assisted_create_missing_create_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': 1}}
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body=body)
class AssistedSnapshotDeleteTestCase(test.TestCase):
def setUp(self):
super(AssistedSnapshotDeleteTestCase, self).setUp()
self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fake_compute_volume_snapshot_delete)
def test_assisted_delete(self):
params = {
'delete_info': jsonutils.dumps({'volume_id': 1}),
}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-assisted-volume-snapshots?%s' %
'&'.join(['%s=%s' % (k, v) for k, v in params.iteritems()]))
req.method = 'DELETE'
result = self.controller.delete(req, '5')
self.assertEqual(result.status_int, 204)
def test_assisted_delete_missing_delete_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '5')
| apache-2.0 |
tmess567/syncope | core/persistence-api/src/main/java/org/apache/syncope/core/persistence/api/entity/anyobject/ARelationship.java | 1023 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.syncope.core.persistence.api.entity.anyobject;
import org.apache.syncope.core.persistence.api.entity.Relationship;
public interface ARelationship extends Relationship<AnyObject, AnyObject> {
}
| apache-2.0 |
apache/maven-plugins | maven-install-plugin/src/test/java/org/apache/maven/plugins/install/stubs/AttachedArtifactStub0.java | 1311 | package org.apache.maven.plugins.install.stubs;
import java.io.File;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class AttachedArtifactStub0
extends InstallArtifactStub
{
public String getArtifactId()
{
return "attached-artifact-test-0";
}
public File getFile()
{
return new File( System.getProperty( "basedir" ),
"target/test-classes/unit/basic-install-test-with-attached-artifacts/"
+ "target/maven-install-test-1.0-SNAPSHOT.jar" );
}
}
| apache-2.0 |
barneykim/pinpoint | commons-server/src/main/java/com/navercorp/pinpoint/common/server/cluster/zookeeper/exception/NoNodeException.java | 1056 | /*
* Copyright 2018 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.common.server.cluster.zookeeper.exception;
/**
* @author koo.taejin
*/
public class NoNodeException extends PinpointZookeeperException {
public NoNodeException() {
}
public NoNodeException(String message) {
super(message);
}
public NoNodeException(String message, Throwable cause) {
super(message, cause);
}
public NoNodeException(Throwable cause) {
super(cause);
}
}
| apache-2.0 |
pdion891/cloudstack-www | source/api/apidocs-4.8/root_admin/listEvents.html | 7196 | <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" href="../includes/main.css" type="text/css">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<title>Apache CloudStack | The Power Behind Your Cloud</title>
</head>
<body>
<div id="insidetopbg">
<div id="inside_wrapper">
<div class="uppermenu_panel">
<div class="uppermenu_box"></div>
</div>
<div id="main_master">
<div id="inside_header">
<div class="header_top">
<a class="cloud_logo" href="http://cloudstack.org"></a>
<div class="mainemenu_panel"></div>
</div>
</div>
<div id="main_content">
<div class="inside_apileftpanel">
<div class="inside_contentpanel" style="width:930px;">
<div class="api_titlebox">
<div class="api_titlebox_left">
<span>
Apache CloudStack v4.8.0 Root Admin API Reference
</span>
<p></p>
<h1>listEvents</h1>
<p>A command to list events.</p>
</div>
<div class="api_titlebox_right">
<a class="api_backbutton" href="../TOC_Root_Admin.html"></a>
</div>
</div>
<div class="api_tablepanel">
<h2>Request parameters</h2>
<table class="apitable">
<tr class="hed">
<td style="width:200px;"><strong>Parameter Name</strong></td><td style="width:500px;">Description</td><td style="width:180px;">Required</td>
</tr>
<tr>
<td style="width:200px;"><i>account</i></td><td style="width:500px;"><i>list resources by account. Must be used with the domainId parameter.</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>domainid</i></td><td style="width:500px;"><i>list only resources belonging to the domain specified</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>duration</i></td><td style="width:500px;"><i>the duration of the event</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>enddate</i></td><td style="width:500px;"><i>the end date range of the list you want to retrieve (use format "yyyy-MM-dd" or the new format "yyyy-MM-dd HH:mm:ss")</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>entrytime</i></td><td style="width:500px;"><i>the time the event was entered</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>id</i></td><td style="width:500px;"><i>the ID of the event</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>isrecursive</i></td><td style="width:500px;"><i>defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves.</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>keyword</i></td><td style="width:500px;"><i>List by keyword</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>level</i></td><td style="width:500px;"><i>the event level (INFO, WARN, ERROR)</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>listall</i></td><td style="width:500px;"><i>If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>page</i></td><td style="width:500px;"><i></i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>pagesize</i></td><td style="width:500px;"><i></i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>projectid</i></td><td style="width:500px;"><i>list objects by project</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>startdate</i></td><td style="width:500px;"><i>the start date range of the list you want to retrieve (use format "yyyy-MM-dd" or the new format "yyyy-MM-dd HH:mm:ss")</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>type</i></td><td style="width:500px;"><i>the event type (see event types)</i></td><td style="width:180px;"><i>false</i></td>
</tr>
</table>
</div>
<div class="api_tablepanel">
<h2>Response Tags</h2>
<table class="apitable">
<tr class="hed">
<td style="width:200px;"><strong>Response Name</strong></td><td style="width:500px;">Description</td>
</tr>
<tr>
<td style="width:200px;"><strong>id</strong></td><td style="width:500px;">the ID of the event</td>
</tr>
<tr>
<td style="width:200px;"><strong>account</strong></td><td style="width:500px;">the account name for the account that owns the object being acted on in the event (e.g. the owner of the virtual machine, ip address, or security group)</td>
</tr>
<tr>
<td style="width:200px;"><strong>created</strong></td><td style="width:500px;">the date the event was created</td>
</tr>
<tr>
<td style="width:200px;"><strong>description</strong></td><td style="width:500px;">a brief description of the event</td>
</tr>
<tr>
<td style="width:200px;"><strong>domain</strong></td><td style="width:500px;">the name of the account's domain</td>
</tr>
<tr>
<td style="width:200px;"><strong>domainid</strong></td><td style="width:500px;">the id of the account's domain</td>
</tr>
<tr>
<td style="width:200px;"><strong>level</strong></td><td style="width:500px;">the event level (INFO, WARN, ERROR)</td>
</tr>
<tr>
<td style="width:200px;"><strong>parentid</strong></td><td style="width:500px;">whether the event is parented</td>
</tr>
<tr>
<td style="width:200px;"><strong>project</strong></td><td style="width:500px;">the project name of the address</td>
</tr>
<tr>
<td style="width:200px;"><strong>projectid</strong></td><td style="width:500px;">the project id of the ipaddress</td>
</tr>
<tr>
<td style="width:200px;"><strong>state</strong></td><td style="width:500px;">the state of the event</td>
</tr>
<tr>
<td style="width:200px;"><strong>type</strong></td><td style="width:500px;">the type of the event (see event types)</td>
</tr>
<tr>
<td style="width:200px;"><strong>username</strong></td><td style="width:500px;">the name of the user who performed the action (can be different from the account if an admin is performing an action for a user, e.g. starting/stopping a user's virtual machine)</td>
</tr>
</table>
</div>
</div>
</div>
</div>
</div>
<div id="footer">
<div id="comments_thread">
<script type="text/javascript" src="https://comments.apache.org/show_comments.lua?site=test" async="true"></script>
<noscript>
<iframe width="930" height="500" src="https://comments.apache.org/iframe.lua?site=test&page=4.2.0/rootadmin"></iframe>
</noscript>
</div>
<div id="footer_mainmaster">
<p>Copyright © 2016 The Apache Software Foundation, Licensed under the
<a href="http://www.apache.org/licenses/LICENSE-2.0">Apache License, Version 2.0.</a>
<br>
Apache, CloudStack, Apache CloudStack, the Apache CloudStack logo, the CloudMonkey logo and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
</div>
</div>
</div>
</div>
</body>
</html>
| apache-2.0 |
domchen/typescript-plus | tests/baselines/reference/augmentedTypesExternalModule1.js | 467 | //// [augmentedTypesExternalModule1.ts]
export var a = 1;
class c5 { public foo() { } }
module c5 { } // should be ok everywhere
//// [augmentedTypesExternalModule1.js]
define(["require", "exports"], function (require, exports) {
"use strict";
exports.__esModule = true;
exports.a = 1;
var c5 = /** @class */ (function () {
function c5() {
}
c5.prototype.foo = function () { };
return c5;
}());
});
| apache-2.0 |
RichieSams/thehalflingproject | libs/stlsoft/include/winstl/window/textmetrics_functions.h | 6257 | /* /////////////////////////////////////////////////////////////////////////
* File: winstl/window/textmetrics_functions.h (originally MWGdi.h, ::SynesisWin)
*
* Purpose: TEXTMETRICS functions.
*
* Created: 20th October 1994
* Updated: 10th August 2009
*
* Home: http://stlsoft.org/
*
* Copyright (c) 1994-2009, Matthew Wilson and Synesis Software
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name(s) of Matthew Wilson and Synesis Software nor the names of
* any contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* ////////////////////////////////////////////////////////////////////// */
/** \file winstl/window/textmetrics_functions.h
*
* \brief [C, C++] TEXTMETRICS functions
* (\ref group__library__windows_window "Windows Window" Library).
*/
#ifndef WINSTL_INCL_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS
#define WINSTL_INCL_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS
#ifndef STLSOFT_DOCUMENTATION_SKIP_SECTION
# define WINSTL_VER_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS_MAJOR 3
# define WINSTL_VER_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS_MINOR 0
# define WINSTL_VER_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS_REVISION 1
# define WINSTL_VER_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS_EDIT 37
#endif /* !STLSOFT_DOCUMENTATION_SKIP_SECTION */
/* /////////////////////////////////////////////////////////////////////////
* Includes
*/
#ifndef WINSTL_INCL_WINSTL_H_WINSTL
# include <winstl/winstl.h>
#endif /* !WINSTL_INCL_WINSTL_H_WINSTL */
#ifdef __cplusplus
# ifndef WINSTL_INCL_WINSTL_WINDOW_HPP_HDC_SCOPE
# include <winstl/window/hdc_scope.hpp>
# endif /* !WINSTL_INCL_WINSTL_WINDOW_HPP_HDC_SCOPE */
#endif /* __cplusplus */
/* /////////////////////////////////////////////////////////////////////////
* Namespace
*/
#ifndef _WINSTL_NO_NAMESPACE
# if defined(_STLSOFT_NO_NAMESPACE) || \
defined(STLSOFT_DOCUMENTATION_SKIP_SECTION)
/* There is no stlsoft namespace, so must define ::winstl */
namespace winstl
{
# else
/* Define stlsoft::winstl_project */
namespace stlsoft
{
namespace winstl_project
{
# endif /* _STLSOFT_NO_NAMESPACE */
#endif /* !_WINSTL_NO_NAMESPACE */
/* /////////////////////////////////////////////////////////////////////////
* C functions
*/
STLSOFT_INLINE TEXTMETRICA winstl__get_textmetrics_a(HDC hdc)
{
static TEXTMETRICA s_tm;
TEXTMETRICA tm;
return STLSOFT_NS_GLOBAL(GetTextMetricsA)(hdc, &tm) ? tm : s_tm;
}
STLSOFT_INLINE TEXTMETRICW winstl__get_textmetrics_w(HDC hdc)
{
static TEXTMETRICW s_tm;
TEXTMETRICW tm;
return STLSOFT_NS_GLOBAL(GetTextMetricsW)(hdc, &tm) ? tm : s_tm;
}
STLSOFT_INLINE TEXTMETRIC winstl__get_textmetrics(HDC hdc)
{
# ifdef UNICODE
return winstl__get_textmetrics_w(hdc);
# else /* ? UNICODE */
return winstl__get_textmetrics_a(hdc);
# endif /* UNICODE */
}
/* /////////////////////////////////////////////////////////////////////////
* C++ functions
*/
#ifdef __cplusplus
inline TEXTMETRICA get_textmetrics_a(HDC hdc)
{
return winstl__get_textmetrics_a(hdc);
}
inline TEXTMETRICW get_textmetrics_w(HDC hdc)
{
return winstl__get_textmetrics_w(hdc);
}
inline TEXTMETRIC get_textmetrics(HDC hdc)
{
return winstl__get_textmetrics(hdc);
}
inline TEXTMETRICA get_window_textmetrics_a(HWND hwnd)
{
HDC_scope hdc(::GetWindowDC(hwnd), hwnd);
return winstl__get_textmetrics_a(hdc.get_hdc());
}
inline TEXTMETRICW get_window_textmetrics_w(HWND hwnd)
{
HDC_scope hdc(::GetWindowDC(hwnd), hwnd);
return winstl__get_textmetrics_w(hdc.get_hdc());
}
inline TEXTMETRIC get_window_textmetrics(HWND hwnd)
{
# ifdef UNICODE
return get_window_textmetrics_w(hwnd);
# else /* ? UNICODE */
return get_window_textmetrics_a(hwnd);
# endif /* UNICODE */
}
inline TEXTMETRICA get_client_textmetrics_a(HWND hwnd)
{
HDC_scope hdc(::GetDC(hwnd), hwnd);
return winstl__get_textmetrics_a(hdc.get_hdc());
}
inline TEXTMETRICW get_client_textmetrics_w(HWND hwnd)
{
HDC_scope hdc(::GetDC(hwnd), hwnd);
return winstl__get_textmetrics_w(hdc.get_hdc());
}
inline TEXTMETRIC get_client_textmetrics(HWND hwnd)
{
# ifdef UNICODE
return get_client_textmetrics_w(hwnd);
# else /* ? UNICODE */
return get_client_textmetrics_a(hwnd);
# endif /* UNICODE */
}
#endif /* __cplusplus */
/* ////////////////////////////////////////////////////////////////////// */
#ifndef _WINSTL_NO_NAMESPACE
# if defined(_STLSOFT_NO_NAMESPACE) || \
defined(STLSOFT_DOCUMENTATION_SKIP_SECTION)
} /* namespace winstl */
# else
} /* namespace winstl_project */
} /* namespace stlsoft */
# endif /* _STLSOFT_NO_NAMESPACE */
#endif /* !_WINSTL_NO_NAMESPACE */
/* ////////////////////////////////////////////////////////////////////// */
#endif /* WINSTL_INCL_WINSTL_WINDOW_H_TEXTMETRICS_FUNCTIONS */
/* ///////////////////////////// end of file //////////////////////////// */
| apache-2.0 |
AlienQueen/wicket | wicket-core/src/main/java/org/apache/wicket/resource/ResourceUtil.java | 8028 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.wicket.resource;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.Charset;
import java.util.Locale;
import org.apache.wicket.WicketRuntimeException;
import org.apache.wicket.request.Url;
import org.apache.wicket.request.resource.ResourceReference;
import org.apache.wicket.util.io.IOUtils;
import org.apache.wicket.util.lang.Args;
import org.apache.wicket.util.resource.IResourceStream;
import org.apache.wicket.util.resource.ResourceStreamNotFoundException;
import org.apache.wicket.util.string.Strings;
/**
* Utilities for resources.
*
* @author Jeremy Thomerson
*/
public class ResourceUtil
{
/**
* Reads resource reference attributes (style, locale, variation) encoded in the given string.
*
* @param encodedAttributes
* the string containing the resource attributes
* @return the encoded attributes
*
* @see ResourceReference.UrlAttributes
*/
public static ResourceReference.UrlAttributes decodeResourceReferenceAttributes(String encodedAttributes)
{
Locale locale = null;
String style = null;
String variation = null;
if (Strings.isEmpty(encodedAttributes) == false)
{
String split[] = Strings.split(encodedAttributes, '-');
locale = parseLocale(split[0]);
if (split.length == 2)
{
style = Strings.defaultIfEmpty(unescapeAttributesSeparator(split[1]), null);
}
else if (split.length == 3)
{
style = Strings.defaultIfEmpty(unescapeAttributesSeparator(split[1]), null);
variation = Strings.defaultIfEmpty(unescapeAttributesSeparator(split[2]), null);
}
}
return new ResourceReference.UrlAttributes(locale, style, variation);
}
/**
* Reads resource reference attributes (style, locale, variation) encoded in the given URL.
*
* @param url
* the url containing the resource attributes
* @return the encoded attributes
*
* @see ResourceReference.UrlAttributes
*/
public static ResourceReference.UrlAttributes decodeResourceReferenceAttributes(Url url)
{
Args.notNull(url, "url");
if (url.getQueryParameters().size() > 0)
{
Url.QueryParameter param = url.getQueryParameters().get(0);
if (Strings.isEmpty(param.getValue()))
{
return decodeResourceReferenceAttributes(param.getName());
}
}
return new ResourceReference.UrlAttributes(null, null, null);
}
/**
* Encodes the given resource reference attributes returning the corresponding textual representation.
*
* @param attributes
* the resource reference attributes to encode
* @return the textual representation for the given attributes
*
* @see ResourceReference.UrlAttributes
*/
public static String encodeResourceReferenceAttributes(ResourceReference.UrlAttributes attributes)
{
if (attributes == null ||
(attributes.getLocale() == null && attributes.getStyle() == null && attributes.getVariation() == null))
{
return null;
}
else
{
StringBuilder res = new StringBuilder(32);
if (attributes.getLocale() != null)
{
res.append(attributes.getLocale());
}
boolean styleEmpty = Strings.isEmpty(attributes.getStyle());
if (!styleEmpty)
{
res.append('-');
res.append(escapeAttributesSeparator(attributes.getStyle()));
}
if (!Strings.isEmpty(attributes.getVariation()))
{
if (styleEmpty)
{
res.append("--");
}
else
{
res.append('-');
}
res.append(escapeAttributesSeparator(attributes.getVariation()));
}
return res.toString();
}
}
/**
* Encodes the attributes of the given resource reference in the specified url.
*
* @param url
* the resource reference attributes to encode
* @param reference
*
* @see ResourceReference.UrlAttributes
* @see Url
*/
public static void encodeResourceReferenceAttributes(Url url, ResourceReference reference)
{
Args.notNull(url, "url");
Args.notNull(reference, "reference");
String encoded = encodeResourceReferenceAttributes(reference.getUrlAttributes());
if (!Strings.isEmpty(encoded))
{
url.getQueryParameters().add(new Url.QueryParameter(encoded, ""));
}
}
/**
* Escapes any occurrences of <em>-</em> character in the style and variation
* attributes with <em>~</em>. Any occurrence of <em>~</em> is encoded as <em>~~</em>.
*
* @param attribute
* the attribute to escape
* @return the attribute with escaped separator character
*/
public static CharSequence escapeAttributesSeparator(String attribute)
{
CharSequence tmp = Strings.replaceAll(attribute, "~", "~~");
return Strings.replaceAll(tmp, "-", "~");
}
/**
* Parses the string representation of a {@link java.util.Locale} (for example 'en_GB').
*
* @param locale
* the string representation of a {@link java.util.Locale}
* @return the corresponding {@link java.util.Locale} instance
*/
public static Locale parseLocale(String locale)
{
if (Strings.isEmpty(locale))
{
return null;
}
else
{
String parts[] = locale.toLowerCase().split("_", 3);
if (parts.length == 1)
{
return new Locale(parts[0]);
}
else if (parts.length == 2)
{
return new Locale(parts[0], parts[1]);
}
else if (parts.length == 3)
{
return new Locale(parts[0], parts[1], parts[2]);
}
else
{
return null;
}
}
}
/**
* read string with platform default encoding from resource stream
*
* @param resourceStream
* @return string read from resource stream
*
* @see #readString(org.apache.wicket.util.resource.IResourceStream, java.nio.charset.Charset)
*/
public static String readString(IResourceStream resourceStream)
{
return readString(resourceStream, null);
}
/**
* read string with specified encoding from resource stream
*
* @param resourceStream
* string source
* @param charset
* charset for the string encoding (use <code>null</code> for platform default)
* @return string read from resource stream
*/
public static String readString(IResourceStream resourceStream, Charset charset)
{
try
{
InputStream stream = resourceStream.getInputStream();
try
{
byte[] bytes = IOUtils.toByteArray(stream);
if (charset == null)
{
charset = Charset.defaultCharset();
}
return new String(bytes, charset.name());
}
finally
{
resourceStream.close();
}
}
catch (IOException e)
{
throw new WicketRuntimeException("failed to read string from " + resourceStream, e);
}
catch (ResourceStreamNotFoundException e)
{
throw new WicketRuntimeException("failed to locate stream from " + resourceStream, e);
}
}
/**
* Reverts the escaping applied by {@linkplain #escapeAttributesSeparator(String)} - unescapes
* occurrences of <em>~</em> character in the style and variation attributes with <em>-</em>.
*
* @param attribute
* the attribute to unescape
* @return the attribute with escaped separator character
*/
public static String unescapeAttributesSeparator(String attribute)
{
String tmp = attribute.replaceAll("(\\w)~(\\w)", "$1-$2");
return Strings.replaceAll(tmp, "~~", "~").toString();
}
private ResourceUtil()
{
// no-op
}
}
| apache-2.0 |
NixaSoftware/CVis | venv/bin/libs/context/doc/html/context/overview.html | 5485 | <html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>Overview</title>
<link rel="stylesheet" href="../../../../../doc/src/boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.78.1">
<link rel="home" href="../index.html" title="Context">
<link rel="up" href="../index.html" title="Context">
<link rel="prev" href="../index.html" title="Context">
<link rel="next" href="requirements.html" title="Requirements">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr>
<td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../boost.png"></td>
<td align="center"><a href="../../../../../index.html">Home</a></td>
<td align="center"><a href="../../../../../libs/libraries.htm">Libraries</a></td>
<td align="center"><a href="http://www.boost.org/users/people.html">People</a></td>
<td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td>
<td align="center"><a href="../../../../../more/index.htm">More</a></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="../index.html"><img src="../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../index.html"><img src="../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../index.html"><img src="../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="requirements.html"><img src="../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
<div class="section">
<div class="titlepage"><div><div><h2 class="title" style="clear: both">
<a name="context.overview"></a><a class="link" href="overview.html" title="Overview">Overview</a>
</h2></div></div></div>
<p>
<span class="bold"><strong>Boost.Context</strong></span> is a foundational library that
provides a sort of cooperative multitasking on a single thread. By providing
an abstraction of the current execution state in the current thread, including
the stack (with local variables) and stack pointer, all registers and CPU flags,
and the instruction pointer, a <span class="emphasis"><em>fcontext_t</em></span> instance represents
a specific point in the application's execution path. This is useful for building
higher-level abstractions, like <span class="emphasis"><em>coroutines</em></span>, <span class="emphasis"><em>cooperative
threads (userland threads)</em></span> or an equivalent to <a href="http://msdn.microsoft.com/en-us/library/9k7k7cf0%28v=vs.80%29.aspx" target="_top">C#
keyword <span class="emphasis"><em>yield</em></span></a> in C++.
</p>
<p>
A <span class="emphasis"><em>fcontext_t</em></span> provides the means to suspend the current
execution path and to transfer execution control, thereby permitting another
<span class="emphasis"><em>fcontext_t</em></span> to run on the current thread. This state full
transfer mechanism enables a <span class="emphasis"><em>fcontext_t</em></span> to suspend execution
from within nested functions and, later, to resume from where it was suspended.
While the execution path represented by a <span class="emphasis"><em>fcontext_t</em></span> only
runs on a single thread, it can be migrated to another thread at any given
time.
</p>
<p>
A context switch between threads requires system calls (involving the OS kernel),
which can cost more than thousand CPU cycles on x86 CPUs. By contrast, transferring
control among them requires only fewer than hundred CPU cycles because it does
not involve system calls as it is done within a single thread.
</p>
<p>
In order to use the classes and functions described here, you can either include
the specific headers specified by the descriptions of each class or function,
or include the master library header:
</p>
<pre class="programlisting"><span class="preprocessor">#include</span> <span class="special"><</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">context</span><span class="special">/</span><span class="identifier">all</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">></span>
</pre>
<p>
which includes all the other headers in turn.
</p>
<p>
All functions and classes are contained in the namespace <span class="emphasis"><em>boost::context</em></span>.
</p>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 2009 Oliver Kowalke<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="../index.html"><img src="../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../index.html"><img src="../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../index.html"><img src="../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="requirements.html"><img src="../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
</body>
</html>
| apache-2.0 |
rahul27/GearVRf | GVRf/Framework/framework/src/main/jni/vulkan/vulkanCore.cpp | 46740 | /* Copyright 2015 Samsung Electronics Co., LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "vulkanCore.h"
#include "util/gvr_log.h"
#include <assert.h>
#include <cstring>
VulkanCore* VulkanCore::theInstance = NULL;
bool VulkanCore::CreateInstance(){
VkResult ret = VK_SUCCESS;
// Discover the number of extensions listed in the instance properties in order to allocate
// a buffer large enough to hold them.
uint32_t instanceExtensionCount = 0;
ret = vkEnumerateInstanceExtensionProperties(nullptr, &instanceExtensionCount, nullptr);
GVR_VK_CHECK(!ret);
VkBool32 surfaceExtFound = 0;
VkBool32 platformSurfaceExtFound = 0;
VkExtensionProperties* instanceExtensions = nullptr;
instanceExtensions = new VkExtensionProperties[instanceExtensionCount];
// Now request instanceExtensionCount VkExtensionProperties elements be read into out buffer
ret = vkEnumerateInstanceExtensionProperties(nullptr, &instanceExtensionCount, instanceExtensions);
GVR_VK_CHECK(!ret);
// We require two extensions, VK_KHR_surface and VK_KHR_android_surface. If they are found,
// add them to the extensionNames list that we'll use to initialize our instance with later.
uint32_t enabledExtensionCount = 0;
const char* extensionNames[16];
for (uint32_t i = 0; i < instanceExtensionCount; i++) {
if (!strcmp(VK_KHR_SURFACE_EXTENSION_NAME, instanceExtensions[i].extensionName)) {
surfaceExtFound = 1;
extensionNames[enabledExtensionCount++] = VK_KHR_SURFACE_EXTENSION_NAME;
}
if (!strcmp(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME, instanceExtensions[i].extensionName)) {
platformSurfaceExtFound = 1;
extensionNames[enabledExtensionCount++] = VK_KHR_ANDROID_SURFACE_EXTENSION_NAME;
}
GVR_VK_CHECK(enabledExtensionCount < 16);
}
if (!surfaceExtFound) {
LOGE("vkEnumerateInstanceExtensionProperties failed to find the " VK_KHR_SURFACE_EXTENSION_NAME" extension.");
return false;
}
if (!platformSurfaceExtFound) {
LOGE("vkEnumerateInstanceExtensionProperties failed to find the " VK_KHR_ANDROID_SURFACE_EXTENSION_NAME" extension.");
return false;
}
// We specify the Vulkan version our application was built with,
// as well as names and versions for our application and engine,
// if applicable. This allows the driver to gain insight to what
// is utilizing the vulkan driver, and serve appropriate versions.
VkApplicationInfo applicationInfo = {};
applicationInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
applicationInfo.pNext = nullptr;
applicationInfo.pApplicationName = GVR_VK_SAMPLE_NAME;
applicationInfo.applicationVersion = 0;
applicationInfo.pEngineName = "VkSample";
applicationInfo.engineVersion = 1;
applicationInfo.apiVersion = VK_API_VERSION_1_0;
// Creation information for the instance points to details about
// the application, and also the list of extensions to enable.
VkInstanceCreateInfo instanceCreateInfo = {};
instanceCreateInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
instanceCreateInfo.pNext = nullptr;
instanceCreateInfo.pApplicationInfo = &applicationInfo;
instanceCreateInfo.enabledLayerCount = 0;
instanceCreateInfo.ppEnabledLayerNames = nullptr;
instanceCreateInfo.enabledExtensionCount = enabledExtensionCount;
instanceCreateInfo.ppEnabledExtensionNames = extensionNames;
// The main Vulkan instance is created with the creation infos above.
// We do not specify a custom memory allocator for instance creation.
ret = vkCreateInstance(&instanceCreateInfo, nullptr, &(m_instance));
// we can delete the list of extensions after calling vkCreateInstance
delete[] instanceExtensions;
// Vulkan API return values can expose further information on a failure.
// For instance, INCOMPATIBLE_DRIVER may be returned if the API level
// an application is built with, exposed through VkApplicationInfo, is
// newer than the driver present on a device.
if (ret == VK_ERROR_INCOMPATIBLE_DRIVER) {
LOGE("Cannot find a compatible Vulkan installable client driver: vkCreateInstance Failure");
return false;
} else if (ret == VK_ERROR_EXTENSION_NOT_PRESENT) {
LOGE("Cannot find a specified extension library: vkCreateInstance Failure");
return false;
} else {
GVR_VK_CHECK(!ret);
}
return true;
}
bool VulkanCore::GetPhysicalDevices(){
VkResult ret = VK_SUCCESS;
// Query number of physical devices available
ret = vkEnumeratePhysicalDevices(m_instance, &(m_physicalDeviceCount), nullptr);
GVR_VK_CHECK(!ret);
if (m_physicalDeviceCount == 0)
{
LOGE("No physical devices detected.");
return false;
}
// Allocate space the the correct number of devices, before requesting their data
m_pPhysicalDevices = new VkPhysicalDevice[m_physicalDeviceCount];
ret = vkEnumeratePhysicalDevices(m_instance, &(m_physicalDeviceCount), m_pPhysicalDevices);
GVR_VK_CHECK(!ret);
// For purposes of this sample, we simply use the first device.
m_physicalDevice = m_pPhysicalDevices[0];
// By querying the device properties, we learn the device name, amongst
// other details.
vkGetPhysicalDeviceProperties(m_physicalDevice, &(m_physicalDeviceProperties));
LOGI("Vulkan Device: %s", m_physicalDeviceProperties.deviceName);
// Get Memory information and properties - this is required later, when we begin
// allocating buffers to store data.
vkGetPhysicalDeviceMemoryProperties(m_physicalDevice, &(m_physicalDeviceMemoryProperties));
return true;
}
void VulkanCore::InitDevice() {
VkResult ret = VK_SUCCESS;
// Akin to when creating the instance, we can query extensions supported by the physical device
// that we have selected to use.
uint32_t deviceExtensionCount = 0;
VkExtensionProperties *device_extensions = nullptr;
ret = vkEnumerateDeviceExtensionProperties(m_physicalDevice, nullptr, &deviceExtensionCount, nullptr);
GVR_VK_CHECK(!ret);
VkBool32 swapchainExtFound = 0;
VkExtensionProperties* deviceExtensions = new VkExtensionProperties[deviceExtensionCount];
ret = vkEnumerateDeviceExtensionProperties(m_physicalDevice, nullptr, &deviceExtensionCount, deviceExtensions);
GVR_VK_CHECK(!ret);
// For our example, we require the swapchain extension, which is used to present backbuffers efficiently
// to the users screen.
uint32_t enabledExtensionCount = 0;
const char* extensionNames[16] = {0};
for (uint32_t i = 0; i < deviceExtensionCount; i++) {
if (!strcmp(VK_KHR_SWAPCHAIN_EXTENSION_NAME, deviceExtensions[i].extensionName)) {
swapchainExtFound = 1;
extensionNames[enabledExtensionCount++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
}
GVR_VK_CHECK(enabledExtensionCount < 16);
}
if (!swapchainExtFound) {
LOGE("vkEnumerateDeviceExtensionProperties failed to find the " VK_KHR_SWAPCHAIN_EXTENSION_NAME " extension: vkCreateInstance Failure");
// Always attempt to enable the swapchain
extensionNames[enabledExtensionCount++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
}
//InitSurface();
// Before we create our main Vulkan device, we must ensure our physical device
// has queue families which can perform the actions we require. For this, we request
// the number of queue families, and their properties.
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(m_physicalDevice, &queueFamilyCount, nullptr);
VkQueueFamilyProperties* queueProperties = new VkQueueFamilyProperties[queueFamilyCount];
vkGetPhysicalDeviceQueueFamilyProperties(m_physicalDevice, &queueFamilyCount, queueProperties);
GVR_VK_CHECK(queueFamilyCount >= 1);
// We query each queue family in turn for the ability to support the android surface
// that was created earlier. We need the device to be able to present its images to
// this surface, so it is important to test for this.
VkBool32* supportsPresent = new VkBool32[queueFamilyCount];
for (uint32_t i = 0; i < queueFamilyCount; i++) {
vkGetPhysicalDeviceSurfaceSupportKHR(m_physicalDevice, i, m_surface, &supportsPresent[i]);
}
// Search for a graphics queue, and ensure it also supports our surface. We want a
// queue which can be used for both, as to simplify operations.
uint32_t queueIndex = queueFamilyCount + 1;
for (uint32_t i = 0; i < queueFamilyCount; i++) {
if ((queueProperties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) {
if (supportsPresent[i] == VK_TRUE) {
queueIndex = i;
break;
}
}
}
delete [] supportsPresent;
delete [] queueProperties;
if (queueIndex == (queueFamilyCount + 1)) {
GVR_VK_CHECK("Could not obtain a queue family for both graphics and presentation." && 0);
}
// We have identified a queue family which both supports our android surface,
// and can be used for graphics operations.
m_queueFamilyIndex = queueIndex;
// As we create the device, we state we will be creating a queue of the
// family type required. 1.0 is the highest priority and we use that.
float queuePriorities[1] = { 1.0 };
VkDeviceQueueCreateInfo deviceQueueCreateInfo = {};
deviceQueueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
deviceQueueCreateInfo.pNext = nullptr;
deviceQueueCreateInfo.queueFamilyIndex = m_queueFamilyIndex;
deviceQueueCreateInfo.queueCount = 1;
deviceQueueCreateInfo.pQueuePriorities = queuePriorities;
// Now we pass the queue create info, as well as our requested extensions,
// into our DeviceCreateInfo structure.
VkDeviceCreateInfo deviceCreateInfo = {};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.pNext = nullptr;
deviceCreateInfo.queueCreateInfoCount = 1;
deviceCreateInfo.pQueueCreateInfos = &deviceQueueCreateInfo;
deviceCreateInfo.enabledLayerCount = 0;
deviceCreateInfo.ppEnabledLayerNames = nullptr;
deviceCreateInfo.enabledExtensionCount = enabledExtensionCount;
deviceCreateInfo.ppEnabledExtensionNames = extensionNames;
// Create the device.
ret = vkCreateDevice(m_physicalDevice, &deviceCreateInfo, nullptr, &m_device);
GVR_VK_CHECK(!ret);
// Obtain the device queue that we requested.
vkGetDeviceQueue(m_device, m_queueFamilyIndex, 0, &m_queue);
}
void VulkanCore::InitSwapchain(uint32_t width, uint32_t height){
VkResult ret = VK_SUCCESS;
m_width = width;// 320;//surfaceCapabilities.currentExtent.width;
m_height = height;//240;//surfaceCapabilities.currentExtent.height;
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = VK_FORMAT_R8G8B8A8_UINT;//VK_FORMAT_R8G8B8A8_UNORM;//m_surfaceFormat.format;//VK_FORMAT_R32G32B32A32_SFLOAT;
imageCreateInfo.extent = {m_width, m_height, 1};
imageCreateInfo .mipLevels = 1;
imageCreateInfo .arrayLayers = 1;
imageCreateInfo .samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
imageCreateInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT ;
imageCreateInfo .flags = 0;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
//LOGI("Vulkan Format %d ", m_surfaceFormat.format);
// Create the image with details as imageCreateInfo
m_swapchainImageCount = 2;
m_swapchainBuffers = new GVR_VK_SwapchainBuffer[m_swapchainImageCount];
GVR_VK_CHECK(m_swapchainBuffers);
for(int i = 0; i < m_swapchainImageCount; i++) {
VkMemoryRequirements mem_reqs;
VkResult err;
bool pass;
ret = vkCreateImage(m_device, &imageCreateInfo, nullptr, &m_swapchainBuffers[i].image);
GVR_VK_CHECK(!ret);
// discover what memory requirements are for this image.
vkGetImageMemoryRequirements(m_device, m_swapchainBuffers[i].image, &mem_reqs);
//LOGD("Vulkan image memreq %d", mem_reqs.size);
m_swapchainBuffers[i].size = mem_reqs.size;
// Allocate memory according to requirements
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.pNext = nullptr;
memoryAllocateInfo.allocationSize = 0;
memoryAllocateInfo.memoryTypeIndex = 0;
memoryAllocateInfo.allocationSize = mem_reqs.size;
pass = GetMemoryTypeFromProperties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memoryAllocateInfo.memoryTypeIndex);
GVR_VK_CHECK(pass);
err = vkAllocateMemory(m_device, &memoryAllocateInfo, nullptr, &m_swapchainBuffers[i].mem);
GVR_VK_CHECK(!err);
// Bind memory to the image
err = vkBindImageMemory(m_device, m_swapchainBuffers[i].image, m_swapchainBuffers[i].mem, 0);
GVR_VK_CHECK(!err);
VkImageViewCreateInfo imageViewCreateInfo = {};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.pNext = nullptr;
imageViewCreateInfo.format = VK_FORMAT_R8G8B8A8_UINT;//VK_FORMAT_R8G8B8A8_UNORM;//m_surfaceFormat.format;
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_R;
imageViewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_G;
imageViewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_B;
imageViewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_A;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 1;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imageViewCreateInfo.flags = 0;
imageViewCreateInfo.image = m_swapchainBuffers[i].image;
err = vkCreateImageView(m_device, &imageViewCreateInfo, nullptr, &m_swapchainBuffers[i].view);
GVR_VK_CHECK(!err);
}
m_depthBuffers = new GVR_VK_DepthBuffer[m_swapchainImageCount];
for (int i = 0; i < m_swapchainImageCount; i++) {
const VkFormat depthFormat = VK_FORMAT_D16_UNORM;
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = depthFormat;
imageCreateInfo.extent = {m_width, m_height, 1};
imageCreateInfo .mipLevels = 1;
imageCreateInfo .arrayLayers = 1;
imageCreateInfo .samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
imageCreateInfo .flags = 0;
VkImageViewCreateInfo imageViewCreateInfo = {};
imageViewCreateInfo .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo .pNext = nullptr;
imageViewCreateInfo .image = VK_NULL_HANDLE;
imageViewCreateInfo.format = depthFormat;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 1;
imageViewCreateInfo.flags = 0;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
VkMemoryRequirements mem_reqs;
VkResult err;
bool pass;
m_depthBuffers[i].format = depthFormat;
// Create the image with details as imageCreateInfo
err = vkCreateImage(m_device, &imageCreateInfo, nullptr, &m_depthBuffers[i].image);
GVR_VK_CHECK(!err);
// discover what memory requirements are for this image.
vkGetImageMemoryRequirements(m_device, m_depthBuffers[i].image, &mem_reqs);
// Allocate memory according to requirements
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.pNext = nullptr;
memoryAllocateInfo.allocationSize = 0;
memoryAllocateInfo.memoryTypeIndex = 0;
memoryAllocateInfo.allocationSize = mem_reqs.size;
pass = GetMemoryTypeFromProperties(mem_reqs.memoryTypeBits, 0, &memoryAllocateInfo.memoryTypeIndex);
GVR_VK_CHECK(pass);
err = vkAllocateMemory(m_device, &memoryAllocateInfo, nullptr, &m_depthBuffers[i].mem);
GVR_VK_CHECK(!err);
// Bind memory to the image
err = vkBindImageMemory(m_device, m_depthBuffers[i].image, m_depthBuffers[i].mem, 0);
GVR_VK_CHECK(!err);
// Create the view for this image
imageViewCreateInfo.image = m_depthBuffers[i].image;
err = vkCreateImageView(m_device, &imageViewCreateInfo, nullptr, &m_depthBuffers[i].view);
GVR_VK_CHECK(!err);
}
}
bool VulkanCore::GetMemoryTypeFromProperties( uint32_t typeBits, VkFlags requirements_mask, uint32_t* typeIndex)
{
GVR_VK_CHECK(typeIndex != nullptr);
// Search memtypes to find first index with those properties
for (uint32_t i = 0; i < 32; i++) {
if ((typeBits & 1) == 1) {
// Type is available, does it match user properties?
if ((m_physicalDeviceMemoryProperties.memoryTypes[i].propertyFlags &
requirements_mask) == requirements_mask) {
*typeIndex = i;
return true;
}
}
typeBits >>= 1;
}
// No memory types matched, return failure
return false;
}
void VulkanCore::InitCommandbuffers(){
VkResult ret = VK_SUCCESS;
// Command buffers are allocated from a pool; we define that pool here and create it.
VkCommandPoolCreateInfo commandPoolCreateInfo = {};
commandPoolCreateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
commandPoolCreateInfo.pNext = nullptr;
commandPoolCreateInfo.queueFamilyIndex = m_queueFamilyIndex;
commandPoolCreateInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ret = vkCreateCommandPool(m_device, &commandPoolCreateInfo, nullptr, &m_commandPool);
GVR_VK_CHECK(!ret);
VkCommandBufferAllocateInfo commandBufferAllocateInfo = {};
commandBufferAllocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
commandBufferAllocateInfo.pNext = nullptr;
commandBufferAllocateInfo.commandPool = m_commandPool;
commandBufferAllocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
commandBufferAllocateInfo.commandBufferCount = 1;
// Create render command buffers, one per swapchain image
for (int i=0; i < m_swapchainImageCount; i++)
{
ret = vkAllocateCommandBuffers(m_device, &commandBufferAllocateInfo, &m_swapchainBuffers[i].cmdBuffer);
GVR_VK_CHECK(!ret);
}
}
void VulkanCore::InitVertexBuffers(){
// Our vertex buffer data is a simple triangle, with associated vertex colors.
const float vb[3][7] = {
// position color
{ -0.9f, -0.9f, 0.9f, 1.0f, 0.0f, 0.0f, 1.0f },
{ 0.9f, -0.9f, 0.9f, 1.0f, 0.0f, 0.0f, 1.0f },
{ 0.0f, 0.9f, 0.9f, 1.0f, 0.0f, 0.0f, 1.0f },
};
VkResult err;
bool pass;
// Our m_vertices member contains the types required for storing
// and defining our vertex buffer within the graphics pipeline.
memset(&m_vertices, 0, sizeof(m_vertices));
// Create our buffer object.
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.pNext = nullptr;
bufferCreateInfo.size = sizeof(vb);
bufferCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
bufferCreateInfo.flags = 0;
err = vkCreateBuffer(m_device, &bufferCreateInfo, nullptr, &m_vertices.buf);
GVR_VK_CHECK(!err);
// Obtain the memory requirements for this buffer.
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device, m_vertices.buf, &mem_reqs);
GVR_VK_CHECK(!err);
// And allocate memory according to those requirements.
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.pNext = nullptr;
memoryAllocateInfo.allocationSize = 0;
memoryAllocateInfo.memoryTypeIndex = 0;
memoryAllocateInfo.allocationSize = mem_reqs.size;
pass = GetMemoryTypeFromProperties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memoryAllocateInfo.memoryTypeIndex);
GVR_VK_CHECK(pass);
err = vkAllocateMemory(m_device, &memoryAllocateInfo, nullptr, &m_vertices.mem);
GVR_VK_CHECK(!err);
// Now we need to map the memory of this new allocation so the CPU can edit it.
void *data;
err = vkMapMemory(m_device, m_vertices.mem, 0, memoryAllocateInfo.allocationSize, 0, &data);
GVR_VK_CHECK(!err);
// Copy our triangle verticies and colors into the mapped memory area.
memcpy(data, vb, sizeof(vb));
// Unmap the memory back from the CPU.
vkUnmapMemory(m_device, m_vertices.mem);
// Bind our buffer to the memory.
err = vkBindBufferMemory(m_device, m_vertices.buf, m_vertices.mem, 0);
GVR_VK_CHECK(!err);
// The vertices need to be defined so that the pipeline understands how the
// data is laid out. This is done by providing a VkPipelineVertexInputStateCreateInfo
// structure with the correct information.
m_vertices.vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
m_vertices.vi.pNext = nullptr;
m_vertices.vi.vertexBindingDescriptionCount = 1;
m_vertices.vi.pVertexBindingDescriptions = m_vertices.vi_bindings;
m_vertices.vi.vertexAttributeDescriptionCount = 2;
m_vertices.vi.pVertexAttributeDescriptions = m_vertices.vi_attrs;
// We bind the buffer as a whole, using the correct buffer ID.
// This defines the stride for each element of the vertex array.
m_vertices.vi_bindings[0].binding = GVR_VK_VERTEX_BUFFER_BIND_ID;
m_vertices.vi_bindings[0].stride = sizeof(vb[0]);
m_vertices.vi_bindings[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
// Within each element, we define the attributes. At location 0,
// the vertex positions, in float3 format, with offset 0 as they are
// first in the array structure.
m_vertices.vi_attrs[0].binding = GVR_VK_VERTEX_BUFFER_BIND_ID;
m_vertices.vi_attrs[0].location = 0;
m_vertices.vi_attrs[0].format = VK_FORMAT_R32G32B32_SFLOAT; //float3
m_vertices.vi_attrs[0].offset = 0;
// The second location is the vertex colors, in RGBA float4 format.
// These appear in each element in memory after the float3 vertex
// positions, so the offset is set accordingly.
m_vertices.vi_attrs[1].binding = GVR_VK_VERTEX_BUFFER_BIND_ID;
m_vertices.vi_attrs[1].location = 1;
m_vertices.vi_attrs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; //float4
m_vertices.vi_attrs[1].offset = sizeof(float) * 3;
}
void VulkanCore::InitLayouts(){
VkResult ret = VK_SUCCESS;
// This sample has no bindings, so the layout is empty.
VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {};
descriptorSetLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
descriptorSetLayoutCreateInfo.pNext = nullptr;
descriptorSetLayoutCreateInfo.bindingCount = 0;
descriptorSetLayoutCreateInfo.pBindings = nullptr;
ret = vkCreateDescriptorSetLayout(m_device, &descriptorSetLayoutCreateInfo, nullptr, &m_descriptorLayout);
GVR_VK_CHECK(!ret);
// Our pipeline layout simply points to the empty descriptor layout.
VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {};
pipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutCreateInfo.pNext = nullptr;
pipelineLayoutCreateInfo.setLayoutCount = 1;
pipelineLayoutCreateInfo.pSetLayouts = &m_descriptorLayout;
ret = vkCreatePipelineLayout(m_device, &pipelineLayoutCreateInfo, nullptr, &m_pipelineLayout);
GVR_VK_CHECK(!ret);
}
void VulkanCore::InitRenderPass(){
// The renderpass defines the attachments to the framebuffer object that gets
// used in the pipeline. We have two attachments, the colour buffer, and the
// depth buffer. The operations and layouts are set to defaults for this type
// of attachment.
VkAttachmentDescription attachmentDescriptions[2] = {};
attachmentDescriptions[0].flags = 0;
attachmentDescriptions[0].format = m_surfaceFormat.format;
attachmentDescriptions[0].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attachmentDescriptions[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attachmentDescriptions[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachmentDescriptions[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachmentDescriptions[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentDescriptions[0].finalLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
attachmentDescriptions[1].flags = 0;
attachmentDescriptions[1].format = m_depthBuffers[0].format;
attachmentDescriptions[1].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attachmentDescriptions[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachmentDescriptions[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachmentDescriptions[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachmentDescriptions[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachmentDescriptions[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
// We have references to the attachment offsets, stating the layout type.
VkAttachmentReference colorReference = {};
colorReference.attachment = 0;
colorReference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentReference depthReference = {};
depthReference.attachment = 1;
depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
// There can be multiple subpasses in a renderpass, but this example has only one.
// We set the color and depth references at the grahics bind point in the pipeline.
VkSubpassDescription subpassDescription = {};
subpassDescription.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpassDescription.flags = 0;
subpassDescription.inputAttachmentCount = 0;
subpassDescription.pInputAttachments = nullptr;
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &colorReference;
subpassDescription.pResolveAttachments = nullptr;
subpassDescription.pDepthStencilAttachment = nullptr;//&depthReference;
subpassDescription.preserveAttachmentCount = 0;
subpassDescription.pPreserveAttachments = nullptr;
// The renderpass itself is created with the number of subpasses, and the
// list of attachments which those subpasses can reference.
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassCreateInfo.pNext = nullptr;
renderPassCreateInfo.attachmentCount = 2;
renderPassCreateInfo.pAttachments = attachmentDescriptions;
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.dependencyCount = 0;
renderPassCreateInfo.pDependencies = nullptr;
VkResult ret;
ret = vkCreateRenderPass(m_device, &renderPassCreateInfo, nullptr, &m_renderPass);
GVR_VK_CHECK(!ret);
}
void VulkanCore::InitPipeline(){
#if 0
VkResult err;
// The pipeline contains all major state for rendering.
// Our vertex input is a single vertex buffer, and its layout is defined
// in our m_vertices object already. Use this when creating the pipeline.
VkPipelineVertexInputStateCreateInfo vi = {};
vi = m_vertices.vi;
// Our vertex buffer describes a triangle list.
VkPipelineInputAssemblyStateCreateInfo ia = {};
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
// State for rasterization, such as polygon fill mode is defined.
VkPipelineRasterizationStateCreateInfo rs = {};
rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs.polygonMode = VK_POLYGON_MODE_FILL;
rs.cullMode = VK_CULL_MODE_BACK_BIT;
rs.frontFace = VK_FRONT_FACE_CLOCKWISE;
rs.depthClampEnable = VK_FALSE;
rs.rasterizerDiscardEnable = VK_FALSE;
rs.depthBiasEnable = VK_FALSE;
// For this example we do not do blending, so it is disabled.
VkPipelineColorBlendAttachmentState att_state[1] = {};
att_state[0].colorWriteMask = 0xf;
att_state[0].blendEnable = VK_FALSE;
VkPipelineColorBlendStateCreateInfo cb = {};
cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
cb.attachmentCount = 1;
cb.pAttachments = &att_state[0];
// We define a simple viewport and scissor. It does not change during rendering
// in this sample.
VkPipelineViewportStateCreateInfo vp = {};
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp.viewportCount = 1;
vp.scissorCount = 1;
VkViewport viewport = {};
viewport.height = (float) m_height;
viewport.width = (float) m_width;
viewport.minDepth = (float) 0.0f;
viewport.maxDepth = (float) 1.0f;
vp.pViewports = &viewport;
VkRect2D scissor = {};
scissor.extent.width = m_width;
scissor.extent.height = m_height;
scissor.offset.x = 0;
scissor.offset.y = 0;
vp.pScissors = &scissor;
// Standard depth and stencil state is defined
VkPipelineDepthStencilStateCreateInfo ds = {};
ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
ds.depthTestEnable = VK_TRUE;
ds.depthWriteEnable = VK_TRUE;
ds.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
ds.depthBoundsTestEnable = VK_FALSE;
ds.back.failOp = VK_STENCIL_OP_KEEP;
ds.back.passOp = VK_STENCIL_OP_KEEP;
ds.back.compareOp = VK_COMPARE_OP_ALWAYS;
ds.stencilTestEnable = VK_FALSE;
ds.front = ds.back;
// We do not use multisample
VkPipelineMultisampleStateCreateInfo ms = {};
ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
ms.pSampleMask = nullptr;
ms.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
// We define two shader stages: our vertex and fragment shader.
// they are embedded as SPIR-V into a header file for ease of deployment.
VkPipelineShaderStageCreateInfo shaderStages[2] = {};
shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shaderStages[0].module = CreateShaderModule( (const uint32_t*)&shader_tri_vert[0], shader_tri_vert_size);
shaderStages[0].pName = "main";
shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shaderStages[1].module = CreateShaderModule( (const uint32_t*)&shader_tri_frag[0], shader_tri_frag_size);
shaderStages[1].pName = "main";
// Pipelines are allocated from pipeline caches.
VkPipelineCacheCreateInfo pipelineCache = {};
pipelineCache.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
pipelineCache.pNext = nullptr;
pipelineCache.flags = 0;
VkPipelineCache piplineCache;
err = vkCreatePipelineCache(m_device, &pipelineCache, nullptr, &piplineCache);
GVR_VK_CHECK(!err);
// Out graphics pipeline records all state information, including our renderpass
// and pipeline layout. We do not have any dynamic state in this example.
VkGraphicsPipelineCreateInfo pipelineCreateInfo = {};
pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineCreateInfo.layout = m_pipelineLayout;
pipelineCreateInfo.pVertexInputState = &vi;
pipelineCreateInfo.pInputAssemblyState = &ia;
pipelineCreateInfo.pRasterizationState = &rs;
pipelineCreateInfo.pColorBlendState = &cb;
pipelineCreateInfo.pMultisampleState = &ms;
pipelineCreateInfo.pViewportState = &vp;
pipelineCreateInfo.pDepthStencilState = nullptr;//&ds;
pipelineCreateInfo.pStages = &shaderStages[0];
pipelineCreateInfo.renderPass = m_renderPass;
pipelineCreateInfo.pDynamicState = nullptr;
pipelineCreateInfo.stageCount = 2; //vertex and fragment
err = vkCreateGraphicsPipelines(m_device, piplineCache, 1, &pipelineCreateInfo, nullptr, &m_pipeline);
GVR_VK_CHECK(!err);
// We can destroy the cache now as we do not need it. The shader modules also
// can be destroyed after the pipeline is created.
vkDestroyPipelineCache(m_device, piplineCache, nullptr);
vkDestroyShaderModule(m_device, shaderStages[0].module, nullptr);
vkDestroyShaderModule(m_device, shaderStages[1].module, nullptr);
#endif
}
void VulkanCore::InitFrameBuffers(){
//The framebuffer objects reference the renderpass, and allow
// the references defined in that renderpass to now attach to views.
// The views in this example are the colour view, which is our swapchain image,
// and the depth buffer created manually earlier.
VkImageView attachments [2] = {};
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = nullptr;
framebufferCreateInfo.renderPass = m_renderPass;
framebufferCreateInfo.attachmentCount = 2;
framebufferCreateInfo.pAttachments = attachments;
framebufferCreateInfo.width = m_width;
framebufferCreateInfo.height = m_height;
framebufferCreateInfo.layers = 1;
VkResult ret;
m_frameBuffers = new VkFramebuffer[m_swapchainImageCount];
// Reusing the framebufferCreateInfo to create m_swapchainImageCount framebuffers,
// only the attachments to the relevent image views change each time.
for (uint32_t i = 0; i < m_swapchainImageCount; i++) {
attachments[0] = m_swapchainBuffers[i].view;
//framebufferCreateInfo.pAttachments = &m_swapchainBuffers[i].view;
attachments[1] = m_depthBuffers[i].view;
LOGI("Vulkan view %d created", i);
if((m_swapchainBuffers[i].view == VK_NULL_HANDLE) || (m_renderPass == VK_NULL_HANDLE)){
LOGI("Vulkan image view null");
}
else
LOGI("Vulkan image view not null");
ret = vkCreateFramebuffer(m_device, &framebufferCreateInfo, nullptr, &m_frameBuffers[i]);
GVR_VK_CHECK(!ret);
}
}
void VulkanCore::InitSync(){
VkResult ret = VK_SUCCESS;
// For synchronization, we have semaphores for rendering and backbuffer signalling.
VkSemaphoreCreateInfo semaphoreCreateInfo = {};
semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphoreCreateInfo.pNext = nullptr;
semaphoreCreateInfo.flags = 0;
ret = vkCreateSemaphore(m_device, &semaphoreCreateInfo, nullptr, &m_backBufferSemaphore);
GVR_VK_CHECK(!ret);
ret = vkCreateSemaphore(m_device, &semaphoreCreateInfo, nullptr, &m_renderCompleteSemaphore);
GVR_VK_CHECK(!ret);
}
void VulkanCore::BuildCmdBuffer()
{
// For the triangle sample, we pre-record our command buffer, as it is static.
// We have a buffer per swap chain image, so loop over the creation process.
for (uint32_t i = 0; i < m_swapchainImageCount; i++) {
VkCommandBuffer &cmdBuffer = m_swapchainBuffers[i].cmdBuffer;
// vkBeginCommandBuffer should reset the command buffer, but Reset can be called
// to make it more explicit.
VkResult err;
err = vkResetCommandBuffer(cmdBuffer, 0);
GVR_VK_CHECK(!err);
VkCommandBufferInheritanceInfo cmd_buf_hinfo = {};
cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
cmd_buf_hinfo.pNext = nullptr;
cmd_buf_hinfo.renderPass = VK_NULL_HANDLE;
cmd_buf_hinfo.subpass = 0;
cmd_buf_hinfo.framebuffer = VK_NULL_HANDLE;
cmd_buf_hinfo.occlusionQueryEnable = VK_FALSE;
cmd_buf_hinfo.queryFlags = 0;
cmd_buf_hinfo.pipelineStatistics = 0;
VkCommandBufferBeginInfo cmd_buf_info = {};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = nullptr;
cmd_buf_info.flags = 0;
cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo;
// By calling vkBeginCommandBuffer, cmdBuffer is put into the recording state.
err = vkBeginCommandBuffer(cmdBuffer, &cmd_buf_info);
GVR_VK_CHECK(!err);
// Before we can use the back buffer from the swapchain, we must change the
// image layout from the PRESENT mode to the COLOR_ATTACHMENT mode.
// PRESENT mode is optimal for sending to the screen for users to see, so the
// image will be set back to that mode after we have completed rendering.
VkImageMemoryBarrier preRenderBarrier = {};
preRenderBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
preRenderBarrier.pNext = nullptr;
preRenderBarrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT;
preRenderBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
preRenderBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
preRenderBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
preRenderBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
preRenderBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
preRenderBarrier.image = m_swapchainBuffers[i].image;
preRenderBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
preRenderBarrier.subresourceRange.baseArrayLayer = 0;
preRenderBarrier.subresourceRange.baseMipLevel = 1;
preRenderBarrier.subresourceRange.layerCount = 0;
preRenderBarrier.subresourceRange.levelCount = 1;
// Thie PipelineBarrier function can operate on memoryBarriers,
// bufferMemory and imageMemory buffers. We only provide a single
// imageMemoryBarrier.
vkCmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
0, 0, nullptr, 0, nullptr, 1, &preRenderBarrier);
// When starting the render pass, we can set clear values.
VkClearValue clear_values[2] = {};
clear_values[0].color.float32[0] = 0.3f;
clear_values[0].color.float32[1] = 0.3f;
clear_values[0].color.float32[2] = 0.3f;
clear_values[0].color.float32[3] = 1.0f;
clear_values[1].depthStencil.depth = 1.0f;
clear_values[1].depthStencil.stencil = 0;
VkRenderPassBeginInfo rp_begin = {};
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rp_begin.pNext = nullptr;
rp_begin.renderPass = m_renderPass;
rp_begin.framebuffer = m_frameBuffers[i];
rp_begin.renderArea.offset.x = 0;
rp_begin.renderArea.offset.y = 0;
rp_begin.renderArea.extent.width = m_width;
rp_begin.renderArea.extent.height = m_height;
rp_begin.clearValueCount = 2;
rp_begin.pClearValues = clear_values;
vkCmdBeginRenderPass(cmdBuffer, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
// Set our pipeline. This holds all major state
// the pipeline defines, for example, that the vertex buffer is a triangle list.
vkCmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline);
// Bind our vertex buffer, with a 0 offset.
VkDeviceSize offsets[1] = {0};
vkCmdBindVertexBuffers(cmdBuffer, GVR_VK_VERTEX_BUFFER_BIND_ID, 1, &m_vertices.buf, offsets);
// Issue a draw command, with our 3 vertices.
vkCmdDraw(cmdBuffer, 3, 1, 0, 0);
// Copy Image to Buffer
VkOffset3D off = {};
off.x = 0;
off.y = 0;
off.z = 0;
VkExtent3D extent3D = {};
extent3D.width = 320;
extent3D.height = 240;
VkImageSubresourceLayers subResource = {};
subResource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subResource.baseArrayLayer = 0;
subResource.mipLevel = 0;
subResource.layerCount = 1;
VkBufferImageCopy someDetails = {};
someDetails.bufferOffset = 0;
someDetails.bufferRowLength = 0;
someDetails.bufferImageHeight = 0;
someDetails.imageSubresource = subResource;
someDetails.imageOffset = off;
someDetails.imageExtent = extent3D;
VkBufferImageCopy region = { 0 };
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.width = m_width;
region.imageExtent.height = m_height;
region.imageExtent.depth = 1;
// Now our render pass has ended.
vkCmdEndRenderPass(cmdBuffer);
//vkCmdCopyImageToBuffer(cmdBuffer, m_swapchainBuffers[i].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_outputBuffers[i].imageOutputBuffer, 1, ®ion);
// As stated earlier, now transition the swapchain image to the PRESENT mode.
VkImageMemoryBarrier prePresentBarrier = {};
prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
prePresentBarrier.pNext = nullptr;
prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
prePresentBarrier.image = m_swapchainBuffers[i].image;
prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
prePresentBarrier.subresourceRange.baseArrayLayer = 0;
prePresentBarrier.subresourceRange.baseMipLevel = 1;
prePresentBarrier.subresourceRange.layerCount = 0;
vkCmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
0, 0, nullptr, 0, nullptr, 1, &prePresentBarrier);
// By ending the command buffer, it is put out of record mode.
err = vkEndCommandBuffer(cmdBuffer);
GVR_VK_CHECK(!err);
}
VkFence nullFence = VK_NULL_HANDLE;
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.pNext = nullptr;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &m_backBufferSemaphore;
submitInfo.pWaitDstStageMask = nullptr;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &m_swapchainBuffers[m_swapchainCurrentIdx].cmdBuffer;
submitInfo.signalSemaphoreCount = 1;
submitInfo.pSignalSemaphores = &m_renderCompleteSemaphore;
VkResult err;
err = vkQueueSubmit(m_queue, 1, &submitInfo, VK_NULL_HANDLE);
GVR_VK_CHECK(!err);
err = vkQueueWaitIdle(m_queue);
if(err != VK_SUCCESS)
LOGI("Vulkan vkQueueWaitIdle submit failed");
LOGI("Vulkan vkQueueWaitIdle submitted");
uint8_t * data;
static bool printflag = true;
if(printflag){
uint8_t * data;
err = vkMapMemory(m_device, m_swapchainBuffers[m_swapchainCurrentIdx].mem, 0, m_swapchainBuffers[m_swapchainCurrentIdx].size, 0, (void **)&data);
GVR_VK_CHECK(!err);
//void* data;
uint8_t *finaloutput = (uint8_t*)malloc(m_width*m_height*4* sizeof(uint8_t));
for(int i = 0; i < (320); i++)
finaloutput[i] = 0;
LOGI("Vulkna size of %d", sizeof(finaloutput));
//while(1) {
memcpy(finaloutput, data, (m_width*m_height*4* sizeof(uint8_t)));
LOGI("Vulkan memcpy map done");
float tt;
for (int i = 0; i < (m_width*m_height)-4; i++) {
//tt = (float) data[i];
LOGI("Vulkan Data %u, %u %u %u", data[i], data[i+1], data[i+2], data[i+3]);
i+=3;
}
texDataVulkan = data;//finaloutput;
LOGI("Vulkan data reading done");
vkUnmapMemory(m_device,m_swapchainBuffers[m_swapchainCurrentIdx].mem);
printflag = false;
}
}
void VulkanCore::initVulkanCore()
{
#if 0
InitVulkan();
CreateInstance();
GetPhysicalDevices();
InitDevice();
InitSwapchain(1024 , 1024);
LOGI("Vulkan after swap chain");
InitCommandbuffers();
LOGI("Vulkan after cmd buffers");
InitVertexBuffers();
LOGI("Vulkan after vert buf");
InitLayouts();
LOGI("Vulkan after layout");
InitRenderPass();
LOGI("Vulkan after render pass");
InitPipeline();
LOGI("Vulkan after piplen");
InitFrameBuffers();
LOGI("Vulkan after FBO");
InitSync();
LOGI("Vulkan after synch");
// Initialize our command buffers
BuildCmdBuffer();
#endif
} | apache-2.0 |
determinedcheetahs/cheetah_juniper | hadoop/docs/api/org/apache/hadoop/mapred/class-use/InvalidFileTypeException.html | 6104 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_31) on Mon Jul 22 15:25:23 PDT 2013 -->
<TITLE>
Uses of Class org.apache.hadoop.mapred.InvalidFileTypeException (Hadoop 1.2.1 API)
</TITLE>
<META NAME="date" CONTENT="2013-07-22">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.mapred.InvalidFileTypeException (Hadoop 1.2.1 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../org/apache/hadoop/mapred/InvalidFileTypeException.html" title="class in org.apache.hadoop.mapred"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/hadoop/mapred//class-useInvalidFileTypeException.html" target="_top"><B>FRAMES</B></A>
<A HREF="InvalidFileTypeException.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.hadoop.mapred.InvalidFileTypeException</B></H2>
</CENTER>
No usage of org.apache.hadoop.mapred.InvalidFileTypeException
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../org/apache/hadoop/mapred/InvalidFileTypeException.html" title="class in org.apache.hadoop.mapred"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/hadoop/mapred//class-useInvalidFileTypeException.html" target="_top"><B>FRAMES</B></A>
<A HREF="InvalidFileTypeException.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2009 The Apache Software Foundation
</BODY>
</HTML>
| apache-2.0 |
newrocknj/horizon | openstack_dashboard/dashboards/identity/projects/tests.py | 82571 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import logging
import os
import django
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects import workflows
from openstack_dashboard import policy_backend
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
PROJECT_DETAIL_URL = reverse('horizon:identity:projects:detail', args=[1])
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list', )})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
if django.VERSION >= (1, 6):
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
else:
self.assertContains(res, '''
<input class="form-control"
name="subnet" id="id_subnet"
value="10" type="text" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'tenant_list'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_name_already_in_use_error(self):
keystone_api_version = api.keystone.VERSIONS.active
if keystone_api_version < 3:
return
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
filters={"name": project.name})\
.AndReturn(project)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, 'already in use')
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
def _check_role_list(self, keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data):
if keystone_api_version >= 3:
# admin role with attempt to remove current admin, results in
# warning message
workflow_data[USER_ROLE_PREFIX + "1"] = ['3']
# member role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '3']
# admin role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['2', '3']
# member role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3']
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
# Give user 1 role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='1',
role='2',)
# remove role 2 from user 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# Give user 3 role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1',)
api.keystone.group_list(IsA(http.HttpRequest),
domain=self.domain.id,
project=self.tenant.id) \
.AndReturn(groups)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.remove_group_role(IsA(http.HttpRequest),
project=self.tenant.id,
group='1',
role='1')
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn(roles)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
# django 1.7 and later does not handle the thrown keystoneclient
# exception well enough.
# TODO(mrunge): re-check when django-1.8 is stable
@unittest.skipIf(django.VERSION >= (1, 7, 0),
'Currently skipped with Django >= 1.7')
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Time since created (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
class DetailProjectViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(project)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertTemplateUsed(res, 'identity/projects/detail.html')
self.assertEqual(res.context['project'].name, project.name)
self.assertEqual(res.context['project'].id, project.id)
self.assertContains(res, "Project Details: %s" % project.name,
1, 200)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view_with_exception(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertRedirectsNoFollow(res, INDEX_URL)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
| apache-2.0 |
jk1/intellij-community | platform/vcs-impl/src/com/intellij/openapi/vcs/configurable/VcsGeneralConfigurationPanel.java | 10031 | /*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.vcs.configurable;
import com.intellij.ide.actions.ShowFilePathAction;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vcs.*;
import com.intellij.openapi.vcs.ex.ProjectLevelVcsManagerEx;
import com.intellij.openapi.vcs.readOnlyHandler.ReadonlyStatusHandlerImpl;
import com.intellij.openapi.vfs.ReadonlyStatusHandler;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.util.*;
import java.util.List;
public class VcsGeneralConfigurationPanel {
private JCheckBox myShowReadOnlyStatusDialog;
private JRadioButton myShowDialogOnAddingFile;
private JRadioButton myPerformActionOnAddingFile;
private JRadioButton myDoNothingOnAddingFile;
private JRadioButton myShowDialogOnRemovingFile;
private JRadioButton myPerformActionOnRemovingFile;
private JRadioButton myDoNothingOnRemovingFile;
private JPanel myPanel;
private final JRadioButton[] myOnFileAddingGroup;
private final JRadioButton[] myOnFileRemovingGroup;
private final Project myProject;
private JPanel myPromptsPanel;
Map<VcsShowOptionsSettingImpl, JCheckBox> myPromptOptions = new LinkedHashMap<>();
private JPanel myRemoveConfirmationPanel;
private JPanel myAddConfirmationPanel;
private JComboBox myOnPatchCreation;
private JCheckBox myReloadContext;
private ButtonGroup myEmptyChangelistRemovingGroup;
public VcsGeneralConfigurationPanel(final Project project) {
myProject = project;
myOnFileAddingGroup = new JRadioButton[]{
myShowDialogOnAddingFile,
myPerformActionOnAddingFile,
myDoNothingOnAddingFile
};
myOnFileRemovingGroup = new JRadioButton[]{
myShowDialogOnRemovingFile,
myPerformActionOnRemovingFile,
myDoNothingOnRemovingFile
};
myPromptsPanel.setLayout(new GridLayout(3, 0));
List<VcsShowOptionsSettingImpl> options = ProjectLevelVcsManagerEx.getInstanceEx(project).getAllOptions();
for (VcsShowOptionsSettingImpl setting : options) {
if (!setting.getApplicableVcses().isEmpty() || project.isDefault()) {
final JCheckBox checkBox = new JCheckBox(setting.getDisplayName());
myPromptsPanel.add(checkBox);
myPromptOptions.put(setting, checkBox);
}
}
myPromptsPanel.setSize(myPromptsPanel.getPreferredSize()); // todo check text!
myOnPatchCreation.setName((SystemInfo.isMac ? "Reveal patch in" : "Show patch in ") +
ShowFilePathAction.getFileManagerName() + " after creation:");
}
public void apply() {
VcsConfiguration settings = VcsConfiguration.getInstance(myProject);
settings.REMOVE_EMPTY_INACTIVE_CHANGELISTS = getSelected(myEmptyChangelistRemovingGroup);
settings.RELOAD_CONTEXT = myReloadContext.isSelected();
for (VcsShowOptionsSettingImpl setting : myPromptOptions.keySet()) {
setting.setValue(myPromptOptions.get(setting).isSelected());
}
getAddConfirmation().setValue(getSelected(myOnFileAddingGroup));
getRemoveConfirmation().setValue(getSelected(myOnFileRemovingGroup));
applyPatchOption(settings);
getReadOnlyStatusHandler().getState().SHOW_DIALOG = myShowReadOnlyStatusDialog.isSelected();
}
private void applyPatchOption(VcsConfiguration settings) {
settings.SHOW_PATCH_IN_EXPLORER = getShowPatchValue();
}
@Nullable
private Boolean getShowPatchValue() {
final int index = myOnPatchCreation.getSelectedIndex();
if (index == 0) {
return null;
} else if (index == 1) {
return true;
} else {
return false;
}
}
private VcsShowConfirmationOption getAddConfirmation() {
return ProjectLevelVcsManagerEx.getInstanceEx(myProject)
.getConfirmation(VcsConfiguration.StandardConfirmation.ADD);
}
private VcsShowConfirmationOption getRemoveConfirmation() {
return ProjectLevelVcsManagerEx.getInstanceEx(myProject)
.getConfirmation(VcsConfiguration.StandardConfirmation.REMOVE);
}
private static VcsShowConfirmationOption.Value getSelected(JRadioButton[] group) {
if (group[0].isSelected()) return VcsShowConfirmationOption.Value.SHOW_CONFIRMATION;
if (group[1].isSelected()) return VcsShowConfirmationOption.Value.DO_ACTION_SILENTLY;
return VcsShowConfirmationOption.Value.DO_NOTHING_SILENTLY;
}
private static VcsShowConfirmationOption.Value getSelected(ButtonGroup group) {
switch (UIUtil.getSelectedButton(group)) {
case 0:
return VcsShowConfirmationOption.Value.SHOW_CONFIRMATION;
case 1:
return VcsShowConfirmationOption.Value.DO_ACTION_SILENTLY;
}
return VcsShowConfirmationOption.Value.DO_NOTHING_SILENTLY;
}
private ReadonlyStatusHandlerImpl getReadOnlyStatusHandler() {
return ((ReadonlyStatusHandlerImpl)ReadonlyStatusHandler.getInstance(myProject));
}
public boolean isModified() {
VcsConfiguration settings = VcsConfiguration.getInstance(myProject);
if (settings.REMOVE_EMPTY_INACTIVE_CHANGELISTS != getSelected(myEmptyChangelistRemovingGroup)){
return true;
}
if (settings.RELOAD_CONTEXT != myReloadContext.isSelected()) return true;
if (getReadOnlyStatusHandler().getState().SHOW_DIALOG != myShowReadOnlyStatusDialog.isSelected()) {
return true;
}
for (VcsShowOptionsSettingImpl setting : myPromptOptions.keySet()) {
if (setting.getValue() != myPromptOptions.get(setting).isSelected()) return true;
}
if (getSelected(myOnFileAddingGroup) != getAddConfirmation().getValue()) return true;
if (getSelected(myOnFileRemovingGroup) != getRemoveConfirmation().getValue()) return true;
if (! Comparing.equal(settings.SHOW_PATCH_IN_EXPLORER, getShowPatchValue())) return true;
return false;
}
public void reset() {
VcsConfiguration settings = VcsConfiguration.getInstance(myProject);
myReloadContext.setSelected(settings.RELOAD_CONTEXT);
VcsShowConfirmationOption.Value value = settings.REMOVE_EMPTY_INACTIVE_CHANGELISTS;
UIUtil.setSelectedButton(myEmptyChangelistRemovingGroup, value == VcsShowConfirmationOption.Value.SHOW_CONFIRMATION
? 0
: value == VcsShowConfirmationOption.Value.DO_NOTHING_SILENTLY ? 2 : 1);
myShowReadOnlyStatusDialog.setSelected(getReadOnlyStatusHandler().getState().SHOW_DIALOG);
for (VcsShowOptionsSettingImpl setting : myPromptOptions.keySet()) {
myPromptOptions.get(setting).setSelected(setting.getValue());
}
selectInGroup(myOnFileAddingGroup, getAddConfirmation());
selectInGroup(myOnFileRemovingGroup, getRemoveConfirmation());
if (settings.SHOW_PATCH_IN_EXPLORER == null) {
myOnPatchCreation.setSelectedIndex(0);
} else if (Boolean.TRUE.equals(settings.SHOW_PATCH_IN_EXPLORER)) {
myOnPatchCreation.setSelectedIndex(1);
} else {
myOnPatchCreation.setSelectedIndex(2);
}
}
private static void selectInGroup(final JRadioButton[] group, final VcsShowConfirmationOption confirmation) {
final VcsShowConfirmationOption.Value value = confirmation.getValue();
final int index;
//noinspection EnumSwitchStatementWhichMissesCases
switch(value) {
case SHOW_CONFIRMATION: index = 0; break;
case DO_ACTION_SILENTLY: index = 1; break;
default: index = 2;
}
group[index].setSelected(true);
}
public JComponent getPanel() {
return myPanel;
}
public void updateAvailableOptions(final Collection<AbstractVcs> activeVcses) {
for (VcsShowOptionsSettingImpl setting : myPromptOptions.keySet()) {
final JCheckBox checkBox = myPromptOptions.get(setting);
checkBox.setEnabled(setting.isApplicableTo(activeVcses) || myProject.isDefault());
if (!myProject.isDefault()) {
checkBox.setToolTipText(VcsBundle.message("tooltip.text.action.applicable.to.vcses", composeText(setting.getApplicableVcses())));
}
}
if (!myProject.isDefault()) {
final ProjectLevelVcsManagerEx vcsManager = ProjectLevelVcsManagerEx.getInstanceEx(myProject);
final VcsShowConfirmationOptionImpl addConfirmation = vcsManager.getConfirmation(VcsConfiguration.StandardConfirmation.ADD);
UIUtil.setEnabled(myAddConfirmationPanel, addConfirmation.isApplicableTo(activeVcses), true);
myAddConfirmationPanel.setToolTipText(
VcsBundle.message("tooltip.text.action.applicable.to.vcses", composeText(addConfirmation.getApplicableVcses())));
final VcsShowConfirmationOptionImpl removeConfirmation = vcsManager.getConfirmation(VcsConfiguration.StandardConfirmation.REMOVE);
UIUtil.setEnabled(myRemoveConfirmationPanel, removeConfirmation.isApplicableTo(activeVcses), true);
myRemoveConfirmationPanel.setToolTipText(
VcsBundle.message("tooltip.text.action.applicable.to.vcses", composeText(removeConfirmation.getApplicableVcses())));
}
}
private static String composeText(final List<AbstractVcs> applicableVcses) {
final TreeSet<String> result = new TreeSet<>();
for (AbstractVcs abstractVcs : applicableVcses) {
result.add(abstractVcs.getDisplayName());
}
return StringUtil.join(result, ", ");
}
}
| apache-2.0 |
cpcloud/arrow | csharp/test/Apache.Arrow.Tests/BitUtilityTests.cs | 7010 | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using Xunit;
namespace Apache.Arrow.Tests
{
public class BitUtilityTests
{
public class ByteCount
{
[Theory]
[InlineData(0, 0)]
[InlineData(1, 1)]
[InlineData(8, 1)]
[InlineData(9, 2)]
[InlineData(32, 4)]
public void HasExpectedResult(int n, int expected)
{
var count = BitUtility.ByteCount(n);
Assert.Equal(expected, count);
}
}
public class CountBits
{
[Theory]
[InlineData(new byte[] { 0b00000000 }, 0)]
[InlineData(new byte[] { 0b00000001 }, 1)]
[InlineData(new byte[] { 0b11111111 }, 8)]
[InlineData(new byte[] { 0b01001001, 0b01010010 }, 6)]
public void CountsAllOneBits(byte[] data, int expectedCount)
{
Assert.Equal(expectedCount,
BitUtility.CountBits(data));
}
[Theory]
[InlineData(new byte[] { 0b11111111 }, 0, 8)]
[InlineData(new byte[] { 0b11111111 }, 3, 5)]
[InlineData(new byte[] { 0b11111111, 0b11111111 }, 9, 7)]
[InlineData(new byte[] { 0b11111111 }, -1, 0)]
public void CountsAllOneBitsFromAnOffset(byte[] data, int offset, int expectedCount)
{
Assert.Equal(expectedCount,
BitUtility.CountBits(data, offset));
}
[Theory]
[InlineData(new byte[] { 0b11111111 }, 0, 8, 8)]
[InlineData(new byte[] { 0b11111111 }, 0, 4, 4)]
[InlineData(new byte[] { 0b11111111 }, 3, 2, 2)]
[InlineData(new byte[] { 0b11111111 }, 3, 5, 5)]
[InlineData(new byte[] { 0b11111111, 0b11111111 }, 9, 7, 7)]
[InlineData(new byte[] { 0b11111111, 0b11111111 }, 7, 2, 2)]
[InlineData(new byte[] { 0b11111111, 0b11111111, 0b11111111 }, 0, 24, 24)]
[InlineData(new byte[] { 0b11111111, 0b11111111, 0b11111111 }, 8, 16, 16)]
[InlineData(new byte[] { 0b11111111, 0b11111111, 0b11111111 }, 0, 16, 16)]
[InlineData(new byte[] { 0b11111111, 0b11111111, 0b11111111 }, 3, 18, 18)]
[InlineData(new byte[] { 0b11111111 }, -1, 0, 0)]
public void CountsAllOneBitsFromOffsetWithinLength(byte[] data, int offset, int length, int expectedCount)
{
var actualCount = BitUtility.CountBits(data, offset, length);
Assert.Equal(expectedCount, actualCount);
}
[Fact]
public void CountsZeroBitsWhenDataIsEmpty()
{
Assert.Equal(0,
BitUtility.CountBits(null));
}
}
public class GetBit
{
[Theory]
[InlineData(new byte[] { 0b01001001 }, 0, true)]
[InlineData(new byte[] { 0b01001001 }, 1, false)]
[InlineData(new byte[] { 0b01001001 }, 2, false)]
[InlineData(new byte[] { 0b01001001 }, 3, true)]
[InlineData(new byte[] { 0b01001001 }, 4, false)]
[InlineData(new byte[] { 0b01001001 }, 5, false)]
[InlineData(new byte[] { 0b01001001 }, 6, true)]
[InlineData(new byte[] { 0b01001001 }, 7, false)]
[InlineData(new byte[] { 0b01001001, 0b01010010 }, 8, false)]
[InlineData(new byte[] { 0b01001001, 0b01010010 }, 14, true)]
public void GetsCorrectBitForIndex(byte[] data, int index, bool expectedValue)
{
Assert.Equal(expectedValue,
BitUtility.GetBit(data, index));
}
[Theory]
[InlineData(null, 0)]
[InlineData(new byte[] { 0b00000000 }, -1)]
public void ThrowsWhenBitIndexOutOfRange(byte[] data, int index)
{
Assert.Throws<IndexOutOfRangeException>(() =>
BitUtility.GetBit(data, index));
}
}
public class SetBit
{
[Theory]
[InlineData(new byte[] { 0b00000000 }, 0, new byte[] { 0b00000001 })]
[InlineData(new byte[] { 0b00000000 }, 2, new byte[] { 0b00000100 })]
[InlineData(new byte[] { 0b00000000 }, 7, new byte[] { 0b10000000 })]
[InlineData(new byte[] { 0b00000000, 0b00000000 }, 8, new byte[] { 0b00000000, 0b00000001 })]
[InlineData(new byte[] { 0b00000000, 0b00000000 }, 15, new byte[] { 0b00000000, 0b10000000 })]
public void SetsBitAtIndex(byte[] data, int index, byte[] expectedValue)
{
BitUtility.SetBit(data, index);
Assert.Equal(expectedValue, data);
}
}
public class ClearBit
{
[Theory]
[InlineData(new byte[] { 0b00000001 }, 0, new byte[] { 0b00000000 })]
[InlineData(new byte[] { 0b00000010 }, 1, new byte[] { 0b00000000 })]
[InlineData(new byte[] { 0b10000001 }, 7, new byte[] { 0b00000001 })]
[InlineData(new byte[] { 0b11111111, 0b11111111 }, 15, new byte[] { 0b11111111, 0b01111111 })]
public void ClearsBitAtIndex(byte[] data, int index, byte[] expectedValue)
{
BitUtility.ClearBit(data, index);
Assert.Equal(expectedValue, data);
}
}
public class RoundUpToMultipleOf64
{
[Theory]
[InlineData(0, 0)]
[InlineData(1, 64)]
[InlineData(63, 64)]
[InlineData(64, 64)]
[InlineData(65, 128)]
[InlineData(129, 192)]
public void ReturnsNextMultiple(int size, int expectedSize)
{
Assert.Equal(expectedSize,
BitUtility.RoundUpToMultipleOf64(size));
}
[Theory]
[InlineData(0)]
[InlineData(-1)]
public void ReturnsZeroWhenSizeIsLessThanOrEqualToZero(int size)
{
Assert.Equal(0,
BitUtility.RoundUpToMultipleOf64(size));
}
}
}
}
| apache-2.0 |
dillia23/code-dot-org | pegasus/sites/virtual/curriculum-course1/8/Teacher.md | 6516 | ---
title: "Artist: Sequence"
view: page_curriculum
theme: none
---
<%= partial('curriculum_header', :unittitle=>'Course 1', :lesson=>8, :title=> 'Artist: Sequence', :unplugged=>false, :time=>30) %>
[content]
[together]
## Lesson Overview
In this lesson students will take control of the Artist to complete simple drawings on the screen.
[summary]
## Teaching Summary
### **Getting Started**
[Introduction](#GetStarted) <br/>
### **Activity: Artist Sequence**
[Artist: Sequence](#Activity)
### **Extended Learning**
[Extension Activities](#Extended)
[/summary]
## Lesson Objectives
### Students will:
- Create a program to complete an image using sequential steps
- Select an argument for a given command
- Choose the appropriate blocks to draw images with non-continuous lines
[/together]
[together]
## Getting Started
### <a name="GetStarted"></a> Introduction
Brainstorm with students ways to tell someone else how to draw a picture:
- How would you do that with a computer?
- In these puzzles you will be moving a character who leaves a line everywhere it goes.
- You'll use the cardinal directions to do this, just like we've been doing to move the bird and bee.
[/together]
[together]
## Activity
### <a name="Activity"></a> [Artist: Sequence](http://learn.code.org/s/course1/stage/8/puzzle/1)
If students struggle to use the correct number of blocks to draw a line, point out that each line segment has a dot on both ends.
[/together]
<!--(this is left in here as an example of how to include an image in Markdown)
 -->
[together]
## Extended Learning
<a name="Extended"></a>Use these activities to enhance student learning. They can be used as outside of class activities or other enrichment.
### The Copy Machine
- Give students two pieces of paper.
- On one sheet, draw a simple image (right angles and straight lines only).
- On the second sheet, draw instructions for recreating that image using a series of arrows.
- Trade instruction sheets and attempt to recreate the image using only the provided instructions.
[/together]
[standards]
## Connections and Background Information
### PARCC / Smarter Balanced Assessment Skills
- Click / tap
- Drag and drop
- Select object
- Use video player
### ISTE Standards (formerly NETS)
- 1.a - Apply existing knowledge to generate new ideas, products, or processes.
- 1.c - Use models and simulation to explore complex systems and issues.
- 4.b - Plan and manage activities to develop a solution or complete a project.
- 6.a - Understand and use technology systems.
- 6.c - Troubleshoot systems and applications.
- 6.d - Transfer current knowledge to learning of new technologies.
### CSTA K-12 Computer Science Standards
- CT.L1:3-01. Use technology resources (e.g., puzzles, logical thinking programs) to solve age appropriate problems.
- CL.L1:3-02. Work cooperatively and collaboratively with peers teachers, and others using technology.
- CPP.L1:6-05. Construct a program as a set of step-by-step instructions to be acted out.
- CPP.L1:6-06. Implement problem solutions using a block-based visual programming language.
- CT.L2-01. Use the basic steps in algorithmic problem solving to design solutions.
- CT.L2-06. Describe and analyze a sequence of instructions being followed.
- CT.L2-08. Use visual representations of problem states, structures, and data.
- CT.L2-12. Use abstraction to decompose a problem into sub problems.
### Next-Gen Science Standards
- K-2-PS3-2. Use tools and materials provided to design and build a device that solves a specific problem or a solution to a specific problem.
### Common Core Mathematical Practices
- 1. Make sense of problems and persevere in solving them.
- 2. Reason abstractly and quantitatively.
- 4. Model with mathematics
- 5. Use appropriate tools strategically.
- 6. Attend to precision.
- 7. Look for and make use of structure.
- 8. Look for and express regularity in repeated reasoning.
### Common Core Math Standards
- K.G.A.1 - Describe objects in the environment using names of shapes, and describe the relative positions of these objects using terms such as above, below, beside, in front of, behind, and next to.
- K.G.A.2 - Correctly name shapes regardless of their orientations or overall size.
- K.G.B.6 - Compose simple shapes to form larger shapes. For example, "Can you join these two triangles with full sides touching to make a rectangle?"
- 1.G.A.1 - Distinguish between defining attributes (e.g., triangles are closed and three-sided) versus non-defining attributes (e.g., color, orientation, overall size); build and draw shapes to possess defining attributes.
- 1.G.A.2 - Compose two-dimensional shapes (rectangles, squares, trapezoids, triangles, half-circles, and quarter-circles) or three-dimensional shapes (cubes, right rectangular prisms, right circular cones, and right circular cylinders) to create a composite shape, and compose new shapes from the composite shape.
- 2.G.A.1 - Recognize and draw shapes having specified attributes, such as a given number of angles or a given number of equal faces.1 Identify triangles, quadrilaterals, pentagons, hexagons, and cubes.
### Common Core Language Arts Standards
- SL.K.1 - Participate in collaborative conversations with diverse partners about kindergarten topics and texts with peers and adults in small and larger groups.
- SL.K.5 - Add drawings or other visual displays to descriptions as desired to provide additional detail.
- L.K.6 - Use words and phrases acquired through conversations, reading and being read to, and responding to texts.
- SL.1.1 - Participate in collaborative conversations with diverse partners about grade 1 topics and texts with peers and adults in small and larger groups.
- SL.1.5 - Add drawings or other visual displays to descriptions when appropriate to clarify ideas, thoughts, and feelings.
- L.1.6 - Use words and phrases acquired through conversations, reading and being read to, and responding to texts, including using frequently occurring conjunctions to signal simple relationships.
- SL.2.1 - Participate in collaborative conversations with diverse partners about grade 2 topics and texts with peers and adults in small and larger groups.
- L.2.6 - Use words and phrases acquired through conversations, reading and being read to, and responding to texts, including using adjectives and adverbs to describe.
[/standards]
[/content]
<link rel="stylesheet" type="text/css" href="../docs/morestyle.css"/>
| apache-2.0 |
Subsets and Splits