repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
jtux270/translate | ovirt/3.6_source/frontend/webadmin/modules/uicommonweb/src/main/java/org/ovirt/engine/ui/uicommonweb/models/providers/NewExternalSubnetModel.java | 3910 | package org.ovirt.engine.ui.uicommonweb.models.providers;
import org.ovirt.engine.core.common.action.AddExternalSubnetParameters;
import org.ovirt.engine.core.common.action.VdcActionType;
import org.ovirt.engine.core.common.action.VdcReturnValueBase;
import org.ovirt.engine.core.common.businessentities.network.NetworkView;
import org.ovirt.engine.core.common.businessentities.network.ProviderNetwork;
import org.ovirt.engine.ui.frontend.Frontend;
import org.ovirt.engine.ui.uicommonweb.UICommand;
import org.ovirt.engine.ui.uicommonweb.help.HelpTag;
import org.ovirt.engine.ui.uicommonweb.models.EntityModel;
import org.ovirt.engine.ui.uicommonweb.models.Model;
import org.ovirt.engine.ui.uicommonweb.models.SearchableListModel;
import org.ovirt.engine.ui.uicompat.ConstantsManager;
import org.ovirt.engine.ui.uicompat.FrontendActionAsyncResult;
import org.ovirt.engine.ui.uicompat.IFrontendActionAsyncCallback;
public class NewExternalSubnetModel extends Model {
private EntityModel<NetworkView> network;
private ExternalSubnetModel subnetModel;
private final SearchableListModel sourceModel;
public NewExternalSubnetModel(NetworkView network, SearchableListModel sourceModel) {
this.sourceModel = sourceModel;
setNetwork(new EntityModel<NetworkView>());
getNetwork().setEntity(network);
setSubnetModel(new ExternalSubnetModel());
getSubnetModel().setExternalNetwork(network.getProvidedBy());
setTitle(ConstantsManager.getInstance().getConstants().newExternalSubnetTitle());
setHelpTag(HelpTag.new_external_subnet);
setHashName("new_external_subnet"); //$NON-NLS-1$
initCommands();
}
protected void initCommands() {
UICommand okCommand = UICommand.createDefaultOkUiCommand("OnSave", this); //$NON-NLS-1$
getCommands().add(okCommand);
UICommand cancelCommand = UICommand.createCancelUiCommand("Cancel", this); //$NON-NLS-1$
getCommands().add(cancelCommand);
}
public EntityModel<NetworkView> getNetwork() {
return network;
}
private void setNetwork(EntityModel<NetworkView> network) {
this.network = network;
}
public ExternalSubnetModel getSubnetModel() {
return subnetModel;
}
private void setSubnetModel(ExternalSubnetModel subnetModel) {
this.subnetModel = subnetModel;
}
private void onSave() {
if (!validate()) {
return;
}
// Save changes.
flush();
startProgress(null);
ProviderNetwork providedBy = getNetwork().getEntity().getProvidedBy();
Frontend.getInstance().runAction(VdcActionType.AddSubnetToProvider,
new AddExternalSubnetParameters(getSubnetModel().getSubnet(),
providedBy.getProviderId(), providedBy.getExternalId()),
new IFrontendActionAsyncCallback() {
@Override
public void executed(FrontendActionAsyncResult result) {
VdcReturnValueBase returnValue = result.getReturnValue();
stopProgress();
if (returnValue != null && returnValue.getSucceeded()) {
cancel();
}
}
},
this,
true);
}
public void flush() {
getSubnetModel().flush();
}
private void cancel() {
sourceModel.setWindow(null);
}
@Override
public void executeCommand(UICommand command) {
super.executeCommand(command);
if ("OnSave".equals(command.getName())) { //$NON-NLS-1$
onSave();
} else if ("Cancel".equals(command.getName())) { //$NON-NLS-1$
cancel();
}
}
public boolean validate() {
return getSubnetModel().validate();
}
}
| gpl-3.0 |
Toshiro90/rathena | .github/ISSUE_TEMPLATE/bug_report.md | 1633 | ---
name: Bug report
about: Create a report to help us improve
title: ''
labels: type:bug
assignees: ''
---
<!-- NOTE: Anything within these brackets will be hidden on the preview of the Issue. -->
* **rAthena Hash**:
<!-- Please specify the rAthena [GitHub hash](https://help.github.com/articles/autolinked-references-and-urls/#commit-shas) on which you encountered this issue.
How to get your GitHub Hash:
1. cd your/rAthena/directory/
2. git rev-parse --short HEAD
3. Copy the resulting hash.
-->
* **Client Date**:
<!-- Please specify the client date you used. -->
* **Server Mode**:
<!-- Which mode does your server use: Pre-Renewal or Renewal? -->
* **Description of Issue**:
* Result: <!-- Describe the issue that you experienced in detail. -->
* Expected Result: <!-- Describe what you would expect to happen in detail. -->
* How to Reproduce: <!-- If you have not stated in the description of the result already, please give us a short guide how we can reproduce your issue. -->
* Official Information: <!-- If possible, provide information from official servers (kRO or other sources) which prove that the result is wrong. Please take into account that iRO (especially iRO Wiki) is not always the same as kRO. -->
<!-- * _NOTE: Make sure you quote ``` `@atcommands` ``` just like this so that you do not tag uninvolved GitHub users!_ -->
* **Modifications that may affect results**:
<!-- * Please provide any information that could influence the expected result. -->
<!-- * This can be either configurations you changed, database values you changed, or even external source modifications. -->
| gpl-3.0 |
nmaillat/Phraseanet | lib/classes/media/adapter.php | 556 | <?php
/*
* This file is part of Phraseanet
*
* (c) 2005-2016 Alchemy
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use Guzzle\Http\Url;
class media_adapter extends media_abstract
{
/**
* Constructor
*
* Enforces Url to de defined
*
* @param int $width
* @param int $height
* @param Url $url
*/
public function __construct($width, $height, Url $url)
{
parent::__construct($width, $height, $url);
}
}
| gpl-3.0 |
TR-Host/easy-wi-mirror | web/stuff/data/table_easywi_statistics.php | 8010 | <?php
/**
* File: table_easywi_statistics.php.
* Author: Ulrich Block
* Date: 17.10.15
* Contact: <[email protected]>
*
* This file is part of Easy-WI.
*
* Easy-WI is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Easy-WI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Easy-WI. If not, see <http://www.gnu.org/licenses/>.
*
* Diese Datei ist Teil von Easy-WI.
*
* Easy-WI ist Freie Software: Sie koennen es unter den Bedingungen
* der GNU General Public License, wie von der Free Software Foundation,
* Version 3 der Lizenz oder (nach Ihrer Wahl) jeder spaeteren
* veroeffentlichten Version, weiterverbreiten und/oder modifizieren.
*
* Easy-WI wird in der Hoffnung, dass es nuetzlich sein wird, aber
* OHNE JEDE GEWAEHELEISTUNG, bereitgestellt; sogar ohne die implizite
* Gewaehrleistung der MARKTFAEHIGKEIT oder EIGNUNG FUER EINEN BESTIMMTEN ZWECK.
* Siehe die GNU General Public License fuer weitere Details.
*
* Sie sollten eine Kopie der GNU General Public License zusammen mit diesem
* Programm erhalten haben. Wenn nicht, siehe <http://www.gnu.org/licenses/>.
*/
$defined['easywi_statistics'] = array(
'gameMasterInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameMasterActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameMasterServerAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameMasterSlotsAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameMasterCrashed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverSlotsInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverSlotsActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverSlotsUsed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverNoPassword' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverNoTag' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'gameserverNotRunning' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlMasterInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlMasterActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlMasterDBAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlMasterCrashed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlDBInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlDBActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'mysqlDBSpaceUsed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'ticketsCompleted' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'ticketsInProcess' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'ticketsNew' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'userAmount' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'userAmountActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'virtualMasterInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'virtualMasterActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'virtualMasterVserverAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'virtualInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'virtualActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceMasterInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceMasterActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceMasterServerAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceMasterSlotsAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceMasterCrashed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverSlotsInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverSlotsActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverSlotsUsed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverTrafficAllowed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverTrafficUsed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'voiceserverCrashed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webMasterInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webMasterActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webMasterCrashed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webMasterSpaceAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webMasterVhostAvailable' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webspaceInstalled' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webspaceActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webspaceSpaceGiven' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webspaceSpaceGivenActive' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'webspaceSpaceUsed' => array("Type"=>"int(10) unsigned","Null"=>"YES","Key"=>"","Default"=>"0","Extra"=>""),
'userID' => array("Type"=>"int(10) unsigned","Null"=>"NO","Key"=>"PRI","Default"=>"0","Extra"=>""),
'statDate' => array("Type"=>"date","Null"=>"NO","Key"=>"PRI","Default"=>"2015-01-01","Extra"=>""),
'countUpdates' => array("Type"=>"int(10) unsigned","Null"=>"NO","Key"=>"","Default"=>"0","Extra"=>"")
); | gpl-3.0 |
xinhuang327/bugtv | Bugzilla/BugUrl/Bugzilla/Local.pm | 2669 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This Source Code Form is "Incompatible With Secondary Licenses", as
# defined by the Mozilla Public License, v. 2.0.
package Bugzilla::BugUrl::Bugzilla::Local;
use 5.10.1;
use strict;
use warnings;
use parent qw(Bugzilla::BugUrl::Bugzilla);
use Bugzilla::Error;
use Bugzilla::Util;
###############################
#### Initialization ####
###############################
use constant VALIDATOR_DEPENDENCIES => {
value => ['bug_id'],
};
###############################
#### Methods ####
###############################
sub ref_bug_url {
my $self = shift;
if (!exists $self->{ref_bug_url}) {
my $ref_bug_id = new URI($self->name)->query_param('id');
my $ref_bug = Bugzilla::Bug->check($ref_bug_id);
my $ref_value = $self->local_uri($self->bug_id);
$self->{ref_bug_url} =
new Bugzilla::BugUrl::Bugzilla::Local({ bug_id => $ref_bug->id,
value => $ref_value });
}
return $self->{ref_bug_url};
}
sub should_handle {
my ($class, $uri) = @_;
# Check if it is either a bug id number or an alias.
return 1 if $uri->as_string =~ m/^\w+$/;
# Check if it is a local Bugzilla uri and call
# Bugzilla::BugUrl::Bugzilla to check if it's a valid Bugzilla
# see also url.
my $canonical_local = URI->new($class->local_uri)->canonical;
if ($canonical_local->authority eq $uri->canonical->authority
and $canonical_local->path eq $uri->canonical->path)
{
return $class->SUPER::should_handle($uri);
}
return 0;
}
sub _check_value {
my ($class, $uri, undef, $params) = @_;
# At this point we are going to treat any word as a
# bug id/alias to the local Bugzilla.
my $value = $uri->as_string;
if ($value =~ m/^\w+$/) {
$uri = new URI($class->local_uri($value));
} else {
# It's not a word, then we have to check
# if it's a valid Bugzilla url.
$uri = $class->SUPER::_check_value($uri);
}
my $ref_bug_id = $uri->query_param('id');
my $ref_bug = Bugzilla::Bug->check($ref_bug_id);
my $self_bug_id = $params->{bug_id};
$params->{ref_bug} = $ref_bug;
if ($ref_bug->id == $self_bug_id) {
ThrowUserError('see_also_self_reference');
}
return $uri;
}
sub local_uri {
my ($self, $bug_id) = @_;
$bug_id ||= '';
return correct_urlbase() . "show_bug.cgi?id=$bug_id";
}
1;
| mpl-2.0 |
UK992/servo | tests/wpt/web-platform-tests/html/rendering/replaced-elements/attributes-for-embedded-content-and-images/canvas-aspect-ratio.html | 963 | <!doctype html>
<title>Canvas width and height attributes are used as the surface size</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<style>
canvas {
width: 100%;
max-width: 100px;
height: auto;
}
</style>
<body>
<script>
let t = async_test("Canvas width and height attributes are used as the surface size");
function assert_ratio(img, expected) {
let epsilon = 0.001;
assert_approx_equals(parseInt(getComputedStyle(img).width, 10) / parseInt(getComputedStyle(img).height, 10), expected, epsilon);
}
// Create and append a new canvas and immediately check the ratio.
t.step(function() {
var canvas = document.createElement("canvas");
canvas.setAttribute("width", "250");
canvas.setAttribute("height", "100");
document.body.appendChild(canvas);
// Canvases always use the aspect ratio from their surface size.
assert_ratio(canvas, 2.5);
t.done();
});
</script>
| mpl-2.0 |
TeXitoi/navitia | source/calendar/calendar.cpp | 2441 | /* Copyright © 2001-2014, Canal TP and/or its affiliates. All rights reserved.
This file is part of Navitia,
the software to build cool stuff with public transport.
Hope you'll enjoy and contribute to this project,
powered by Canal TP (www.canaltp.fr).
Help us simplify mobility and open public transport:
a non ending quest to the responsive locomotion way of traveling!
LICENCE: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Stay tuned using
twitter @navitia
IRC #navitia on freenode
https://groups.google.com/d/forum/navitia
www.navitia.io
*/
#include "calendar.h"
#include "ptreferential/ptreferential.h"
#include "type/data.h"
namespace navitia { namespace calendar {
type::Indexes Calendar::get_calendars(const std::string& filter,
const std::vector<std::string>& forbidden_uris,
const type::Data &d,
const boost::gregorian::date_period filter_period,
const boost::posix_time::ptime){
type::Indexes to_return;
to_return = ptref::make_query(type::Type_e::Calendar, filter, forbidden_uris, d);
if (to_return.empty() || (filter_period.begin().is_not_a_date())) {
return to_return;
}
type::Indexes indexes;
for (type::idx_t idx : to_return) {
navitia::type::Calendar* cal = d.pt_data->calendars[idx];
for (const boost::gregorian::date_period per : cal->active_periods) {
if (filter_period.begin() == filter_period.end()) {
if (per.contains(filter_period.begin())) {
indexes.insert(cal->idx);
break;
}
} else {
if (filter_period.intersects(per)) {
indexes.insert(cal->idx);
break;
}
}
}
}
return indexes;
}
}
}
| agpl-3.0 |
poiesisconsulting/openerp-restaurant | portal/__openerp__.py | 2288 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Portal',
'version': '1.0',
'depends': [
'base',
'share',
'auth_signup',
],
'author': 'OpenERP SA',
'category': 'Portal',
'description': """
Customize access to your OpenERP database to external users by creating portals.
================================================================================
A portal defines a specific user menu and access rights for its members. This
menu can ben seen by portal members, public users and any other user that
have the access to technical features (e.g. the administrator).
Also, each portal member is linked to a specific partner.
The module also associates user groups to the portal users (adding a group in
the portal automatically adds it to the portal users, etc). That feature is
very handy when used in combination with the module 'share'.
""",
'website': 'http://www.openerp.com',
'data': [
'portal_data.xml',
'portal_view.xml',
'wizard/portal_wizard_view.xml',
'wizard/share_wizard_view.xml',
'security/ir.model.access.csv',
],
'demo': ['portal_demo.xml'],
'css': ['static/src/css/portal.css'],
'auto_install': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ShinDarth/azerothcore-wotlk | src/server/game/Movement/MovementGenerators/HomeMovementGenerator.h | 1582 | /*
* This file is part of the AzerothCore Project. See AUTHORS file for Copyright information
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Affero General Public License as published by the
* Free Software Foundation; either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ACORE_HOMEMOVEMENTGENERATOR_H
#define ACORE_HOMEMOVEMENTGENERATOR_H
#include "MovementGenerator.h"
class Creature;
template < class T >
class HomeMovementGenerator;
template <>
class HomeMovementGenerator<Creature> : public MovementGeneratorMedium< Creature, HomeMovementGenerator<Creature> >
{
public:
HomeMovementGenerator() : arrived(false), i_recalculateTravel(false) {}
~HomeMovementGenerator() {}
void DoInitialize(Creature*);
void DoFinalize(Creature*);
void DoReset(Creature*);
bool DoUpdate(Creature*, const uint32);
MovementGeneratorType GetMovementGeneratorType() { return HOME_MOTION_TYPE; }
void unitSpeedChanged() { i_recalculateTravel = true; }
private:
void _setTargetLocation(Creature*);
bool arrived : 1;
bool i_recalculateTravel : 1;
};
#endif
| agpl-3.0 |
beni55/edx-platform | cms/djangoapps/contentstore/views/tests/test_assets.py | 15204 | """
Unit tests for the asset upload endpoint.
"""
from datetime import datetime
from io import BytesIO
from pytz import UTC
import json
from django.conf import settings
from contentstore.tests.utils import CourseTestCase
from contentstore.views import assets
from contentstore.utils import reverse_course_url
from xmodule.assetstore.assetmgr import AssetMetadataFoundTemporary
from xmodule.assetstore import AssetMetadata
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import import_course_from_xml
from django.test.utils import override_settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
import mock
from ddt import ddt
from ddt import data
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
MAX_FILE_SIZE = settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB * 1000 ** 2
class AssetsTestCase(CourseTestCase):
"""
Parent class for all asset tests.
"""
def setUp(self):
super(AssetsTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
def upload_asset(self, name="asset-1", extension=".txt"):
"""
Post to the asset upload url
"""
f = self.get_sample_asset(name, extension)
return self.client.post(self.url, {"name": name, "file": f})
def get_sample_asset(self, name, extension=".txt"):
"""Returns an in-memory file with the given name for testing"""
f = BytesIO(name)
f.name = name + extension
return f
class BasicAssetsTestCase(AssetsTestCase):
"""
Test getting assets via html w/o additional args
"""
def test_basic(self):
resp = self.client.get(self.url, HTTP_ACCEPT='text/html')
self.assertEquals(resp.status_code, 200)
def test_static_url_generation(self):
course_key = SlashSeparatedCourseKey('org', 'class', 'run')
location = course_key.make_asset_key('asset', 'my_file_name.jpg')
path = StaticContent.get_static_path_from_location(location)
self.assertEquals(path, '/static/my_file_name.jpg')
def test_pdf_asset(self):
module_store = modulestore()
course_items = import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['toy'],
static_content_store=contentstore(),
verbose=True
)
course = course_items[0]
url = reverse_course_url('assets_handler', course.id)
# Test valid contentType for pdf asset (textbook.pdf)
resp = self.client.get(url, HTTP_ACCEPT='application/json')
self.assertContains(resp, "/c4x/edX/toy/asset/textbook.pdf")
asset_location = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/textbook.pdf')
content = contentstore().find(asset_location)
# Check after import textbook.pdf has valid contentType ('application/pdf')
# Note: Actual contentType for textbook.pdf in asset.json is 'text/pdf'
self.assertEqual(content.content_type, 'application/pdf')
class PaginationTestCase(AssetsTestCase):
"""
Tests the pagination of assets returned from the REST API.
"""
def test_json_responses(self):
"""
Test the ajax asset interfaces
"""
self.upload_asset("asset-1")
self.upload_asset("asset-2")
self.upload_asset("asset-3")
self.upload_asset("asset-4", ".odt")
# Verify valid page requests
self.assert_correct_asset_response(self.url, 0, 4, 4)
self.assert_correct_asset_response(self.url + "?page_size=2", 0, 2, 4)
self.assert_correct_asset_response(
self.url + "?page_size=2&page=1", 2, 2, 4)
self.assert_correct_sort_response(self.url, 'date_added', 'asc')
self.assert_correct_sort_response(self.url, 'date_added', 'desc')
self.assert_correct_sort_response(self.url, 'display_name', 'asc')
self.assert_correct_sort_response(self.url, 'display_name', 'desc')
self.assert_correct_filter_response(self.url, 'asset_type', '')
self.assert_correct_filter_response(self.url, 'asset_type', 'OTHER')
self.assert_correct_filter_response(
self.url, 'asset_type', 'Documents')
# Verify querying outside the range of valid pages
self.assert_correct_asset_response(
self.url + "?page_size=2&page=-1", 0, 2, 4)
self.assert_correct_asset_response(
self.url + "?page_size=2&page=2", 2, 2, 4)
self.assert_correct_asset_response(
self.url + "?page_size=3&page=1", 3, 1, 4)
@mock.patch('xmodule.contentstore.mongo.MongoContentStore.get_all_content_for_course')
def test_mocked_filtered_response(self, mock_get_all_content_for_course):
"""
Test the ajax asset interfaces
"""
asset_key = self.course.id.make_asset_key(
AssetMetadata.GENERAL_ASSET_TYPE, 'test.jpg')
upload_date = datetime(2015, 1, 12, 10, 30, tzinfo=UTC)
thumbnail_location = [
'c4x', 'edX', 'toy', 'thumbnail', 'test_thumb.jpg', None]
mock_get_all_content_for_course.return_value = [
[
{
"asset_key": asset_key,
"displayname": "test.jpg",
"contentType": "image/jpg",
"url": "/c4x/A/CS102/asset/test.jpg",
"uploadDate": upload_date,
"id": "/c4x/A/CS102/asset/test.jpg",
"portable_url": "/static/test.jpg",
"thumbnail": None,
"thumbnail_location": thumbnail_location,
"locked": None
}
],
1
]
# Verify valid page requests
self.assert_correct_filter_response(self.url, 'asset_type', 'OTHER')
def assert_correct_asset_response(self, url, expected_start, expected_length, expected_total):
"""
Get from the url and ensure it contains the expected number of responses
"""
resp = self.client.get(url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
self.assertEquals(json_response['start'], expected_start)
self.assertEquals(len(assets_response), expected_length)
self.assertEquals(json_response['totalCount'], expected_total)
def assert_correct_sort_response(self, url, sort, direction):
"""
Get from the url w/ a sort option and ensure items honor that sort
"""
resp = self.client.get(
url + '?sort=' + sort + '&direction=' + direction, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
name1 = assets_response[0][sort]
name2 = assets_response[1][sort]
name3 = assets_response[2][sort]
if direction == 'asc':
self.assertLessEqual(name1, name2)
self.assertLessEqual(name2, name3)
else:
self.assertGreaterEqual(name1, name2)
self.assertGreaterEqual(name2, name3)
def assert_correct_filter_response(self, url, filter_type, filter_value):
"""
Get from the url w/ a filter option and ensure items honor that filter
"""
requested_file_types = settings.FILES_AND_UPLOAD_TYPE_FILTERS.get(
filter_value, None)
resp = self.client.get(
url + '?' + filter_type + '=' + filter_value, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
if filter_value is not '':
content_types = [asset['content_type'].lower()
for asset in assets_response]
if filter_value is 'OTHER':
all_file_type_extensions = []
for file_type in settings.FILES_AND_UPLOAD_TYPE_FILTERS:
all_file_type_extensions.extend(file_type)
for content_type in content_types:
self.assertNotIn(content_type, all_file_type_extensions)
else:
for content_type in content_types:
self.assertIn(content_type, requested_file_types)
@ddt
class UploadTestCase(AssetsTestCase):
"""
Unit tests for uploading a file
"""
def setUp(self):
super(UploadTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
def test_happy_path(self):
resp = self.upload_asset()
self.assertEquals(resp.status_code, 200)
def test_no_file(self):
resp = self.client.post(self.url, {"name": "file.txt"}, "application/json")
self.assertEquals(resp.status_code, 400)
@data(
(int(MAX_FILE_SIZE / 2.0), "small.file.test", 200),
(MAX_FILE_SIZE, "justequals.file.test", 200),
(MAX_FILE_SIZE + 90, "large.file.test", 413),
)
@mock.patch('contentstore.views.assets.get_file_size')
def test_file_size(self, case, get_file_size):
max_file_size, name, status_code = case
get_file_size.return_value = max_file_size
f = self.get_sample_asset(name=name)
resp = self.client.post(self.url, {
"name": name,
"file": f
})
self.assertEquals(resp.status_code, status_code)
class DownloadTestCase(AssetsTestCase):
"""
Unit tests for downloading a file.
"""
def setUp(self):
super(DownloadTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
# First, upload something.
self.asset_name = 'download_test'
resp = self.upload_asset(self.asset_name)
self.assertEquals(resp.status_code, 200)
self.uploaded_url = json.loads(resp.content)['asset']['url']
def test_download(self):
# Now, download it.
resp = self.client.get(self.uploaded_url, HTTP_ACCEPT='text/html')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.content, self.asset_name)
def test_download_not_found_throw(self):
url = self.uploaded_url.replace(self.asset_name, 'not_the_asset_name')
resp = self.client.get(url, HTTP_ACCEPT='text/html')
self.assertEquals(resp.status_code, 404)
def test_metadata_found_in_modulestore(self):
# Insert asset metadata into the modulestore (with no accompanying asset).
asset_key = self.course.id.make_asset_key(AssetMetadata.GENERAL_ASSET_TYPE, 'pic1.jpg')
asset_md = AssetMetadata(asset_key, {
'internal_name': 'EKMND332DDBK',
'basename': 'pix/archive',
'locked': False,
'curr_version': '14',
'prev_version': '13'
})
modulestore().save_asset_metadata(asset_md, 15)
# Get the asset metadata and have it be found in the modulestore.
# Currently, no asset metadata should be found in the modulestore. The code is not yet storing it there.
# If asset metadata *is* found there, an exception is raised. This test ensures the exception is indeed raised.
# THIS IS TEMPORARY. Soon, asset metadata *will* be stored in the modulestore.
with self.assertRaises((AssetMetadataFoundTemporary, NameError)):
self.client.get(unicode(asset_key), HTTP_ACCEPT='text/html')
class AssetToJsonTestCase(AssetsTestCase):
"""
Unit test for transforming asset information into something
we can send out to the client via JSON.
"""
@override_settings(LMS_BASE="lms_base_url")
def test_basic(self):
upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)
content_type = 'image/jpg'
course_key = SlashSeparatedCourseKey('org', 'class', 'run')
location = course_key.make_asset_key('asset', 'my_file_name.jpg')
thumbnail_location = course_key.make_asset_key('thumbnail', 'my_file_name_thumb.jpg')
# pylint: disable=protected-access
output = assets._get_asset_json("my_file", content_type, upload_date, location, thumbnail_location, True)
self.assertEquals(output["display_name"], "my_file")
self.assertEquals(output["date_added"], "Jun 01, 2013 at 10:30 UTC")
self.assertEquals(output["url"], "/c4x/org/class/asset/my_file_name.jpg")
self.assertEquals(output["external_url"], "lms_base_url/c4x/org/class/asset/my_file_name.jpg")
self.assertEquals(output["portable_url"], "/static/my_file_name.jpg")
self.assertEquals(output["thumbnail"], "/c4x/org/class/thumbnail/my_file_name_thumb.jpg")
self.assertEquals(output["id"], unicode(location))
self.assertEquals(output['locked'], True)
output = assets._get_asset_json("name", content_type, upload_date, location, None, False)
self.assertIsNone(output["thumbnail"])
class LockAssetTestCase(AssetsTestCase):
"""
Unit test for locking and unlocking an asset.
"""
def test_locking(self):
"""
Tests a simple locking and unlocking of an asset in the toy course.
"""
def verify_asset_locked_state(locked):
""" Helper method to verify lock state in the contentstore """
asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')
content = contentstore().find(asset_location)
self.assertEqual(content.locked, locked)
def post_asset_update(lock, course):
""" Helper method for posting asset update. """
content_type = 'application/txt'
upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)
asset_location = course.id.make_asset_key('asset', 'sample_static.txt')
url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})
resp = self.client.post(
url,
# pylint: disable=protected-access
json.dumps(assets._get_asset_json(
"sample_static.txt", content_type, upload_date, asset_location, None, lock)),
"application/json"
)
self.assertEqual(resp.status_code, 201)
return json.loads(resp.content)
# Load the toy course.
module_store = modulestore()
course_items = import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['toy'],
static_content_store=contentstore(),
verbose=True
)
course = course_items[0]
verify_asset_locked_state(False)
# Lock the asset
resp_asset = post_asset_update(True, course)
self.assertTrue(resp_asset['locked'])
verify_asset_locked_state(True)
# Unlock the asset
resp_asset = post_asset_update(False, course)
self.assertFalse(resp_asset['locked'])
verify_asset_locked_state(False)
| agpl-3.0 |
plamut/superdesk | client/app/scripts/superdesk/services/storage.js | 1250 | define(['angular'], function(angular) {
'use strict';
return angular.module('superdesk.services.storage', [])
/**
* LocalStorage wrapper
*
* it stores data as json to keep its type
*/
.service('storage', function() {
/**
* Get item from storage
*
* @param {string} key
* @returns {mixed}
*/
this.getItem = function(key) {
return angular.fromJson(localStorage.getItem(key));
};
/**
* Set storage item
*
* @param {string} key
* @param {mixed} data
*/
this.setItem = function(key, data) {
localStorage.setItem(key, angular.toJson(data));
};
/**
* Remove item from storage
*
* @param {string} key
*/
this.removeItem = function(key) {
localStorage.removeItem(key);
};
/**
* Remove all items from storage
*/
this.clear = function() {
localStorage.clear();
};
});
});
| agpl-3.0 |
liuqr/edx-xiaodun | lms/templates/registration/password_reset_confirm.html | 5545 | {% load i18n %}
{% load compressed %}
{% load staticfiles %}
<!DOCTYPE html>
<html lang="{{LANGUAGE_CODE}}">
<head>
<title>
{% blocktrans with platform_name=platform_name %}
Reset Your {{ platform_name }} Password
{% endblocktrans %}
</title>
{% compressed_css 'style-vendor' %}
{% compressed_css 'style-app' %}
{% compressed_css 'style-app-extend1' %}
{% compressed_css 'style-app-extend2' %}
{% block main_vendor_js %}
{% compressed_js 'main_vendor' %}
{% endblock %}
<!--[if lt IE 9]>
<script src="{% static 'js/html5shiv.js' %}"></script>
<![endif]-->
<script type="text/javascript">
$(function() {
// adding js class for styling with accessibility in mind
$('body').addClass('js');
// new window/tab opening
$('a[rel="external"], a[class="new-vp"]')
.click( function() {
window.open( $(this).attr('href') );
return false;
});
// form field label styling on focus
$("form :input").focus(function() {
$("label[for='" + this.id + "']").parent().addClass("is-focused");
}).blur(function() {
$("label").parent().removeClass("is-focused");
});
});
</script>
</head>
<body class="view-passwordreset">
<header class="global">
<nav>
<h1 class="logo">
<a href="{{MKTG_URL_ROOT}}"><img src="{% static 'images/header-logo.png' %}"></a>
</h1>
</nav>
</header>
<section class="content-wrapper">
<section class="passwordreset container">
<section class="introduction">
<header>
<h1>
{% blocktrans with platform_name=platform_name %}
Reset Your {{ platform_name }} Password
{% endblocktrans %}
</h1>
</header>
</section>
<section role="main" class="content">
{% if validlink %}
<header>
<h2 class="sr">{% trans "Password Reset Form" %}</h2>
</header>
<form role="form" id="passwordreset-form" method="post" data-remote="true" action="">{% csrf_token %}
<!-- status messages -->
<div role="alert" class="status message">
<h3 class="message-title">
{% blocktrans with platform_name=platform_name %}
We're sorry, {{ platform_name }} enrollment is not available in your region
{% endblocktrans %}
</h3>
</div>
<div role="alert" class="status message submission-error">
<h3 class="message-title">{% trans "The following errors occurred while processing your registration: " %}</h3>
<ul class="message-copy">
<li>{% trans "You must complete all fields." %}</li>
<li>{% trans "The two password fields didn't match." %}</li>
</ul>
</div>
<div role="alert" class="status message system-error">
<h3 class="message-title">{% trans "We're sorry, our systems seem to be having trouble processing your password reset" %}</h3>
<p class="message-copy">
{% blocktrans with start_link='<a href="{{MKTG_URL_CONTACT}}">' end_link='</a>' %}
Someone has been made aware of this issue. Please try again shortly. Please {{ start_link }}contact us{{ end_link }} about any concerns you have.
{% endblocktrans %}
</p>
</div>
<p class="instructions">
{% trans 'Please enter your new password twice so we can verify you typed it in correctly. <br /> Required fields are noted by <strong class="indicator">bold text and an asterisk (*)</strong>.' %}
</p>
<fieldset class="group group-form group-form-requiredinformation">
<legend class="sr">{% trans "Required Information" %}</legend>
<ol class="list-input">
<li class="field required password" id="field-new_password1">
<label for="new_password1">{% trans "Your New Password" %}</label>
<input id="new_password1" type="password" name="new_password1" placeholder="*****" />
</li>
<li class="field required password" id="field-new_password2">
<label for="new_password2">{% trans "Your New Password Again" %}</label>
<input id="new_password2" type="password" name="new_password2" placeholder="*****" />
</li>
</ol>
</fieldset>
<div class="form-actions">
<button name="submit" type="submit" id="submit" class="action action-primary action-update">{% trans "Change My Password" %}</button>
</div>
</form>
{% else %}
<header>
<h2 class="sr">{% trans "Your Password Reset Was Unsuccessful" %}</h2>
</header>
<p>
{% blocktrans with start_link='<a href="/login">' end_link='</a>' %}
The password reset link was invalid, possibly because the link has already been used. Please return to the {{ start_link }}login page{{ end_link }} and start the password reset process again.
{% endblocktrans %}
</p>
{% endif %}
</section>
<aside role="complementary">
<header>
<h3 class="sr">{% trans "Password Reset Help" %}</h3>
</header>
<div class="cta cta-help">
<h3>{% trans "Need Help?" %}</h3>
<p>
{% blocktrans with start_link='<a href="{{MKTG_URL_FAQ}}">' end_link='</a>' %}
View our {{ start_link }}help section for contact information and answers to commonly asked questions{{ end_link }}
{% endblocktrans %}
</p>
</div>
</aside>
</section>
</section>
| agpl-3.0 |
gayathri6/stepstream_salute | lib_old/oembedhelper.php | 12206 | <?php
/*
* StatusNet - the distributed open-source microblogging tool
* Copyright (C) 2008-2010, StatusNet, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
if (!defined('STATUSNET')) {
exit(1);
}
/**
* Utility class to wrap basic oEmbed lookups.
*
* Blacklisted hosts will use an alternate lookup method:
* - Twitpic
*
* Whitelisted hosts will use known oEmbed API endpoints:
* - Flickr, YFrog
*
* Sites that provide discovery links will use them directly; a bug
* in use of discovery links with query strings is worked around.
*
* Others will fall back to oohembed (unless disabled).
* The API endpoint can be configured or disabled through config
* as 'oohembed'/'endpoint'.
*/
class oEmbedHelper
{
protected static $apiMap = array(
'flickr.com' => 'http://www.flickr.com/services/oembed/',
'yfrog.com' => 'http://www.yfrog.com/api/oembed',
'youtube.com' => 'http://www.youtube.com/oembed',
'viddler.com' => 'http://lab.viddler.com/services/oembed/',
'qik.com' => 'http://qik.com/api/oembed.json',
'revision3.com' => 'http://revision3.com/api/oembed/',
'hulu.com' => 'http://www.hulu.com/api/oembed.json',
'vimeo.com' => 'http://www.vimeo.com/api/oembed.json',
'my.opera.com' => 'http://my.opera.com/service/oembed',
);
protected static $functionMap = array(
'twitpic.com' => 'oEmbedHelper::twitPic',
);
/**
* Perform or fake an oEmbed lookup for the given resource.
*
* Some known hosts are whitelisted with API endpoints where we
* know they exist but autodiscovery data isn't available.
* If autodiscovery links are missing and we don't recognize the
* host, we'll pass it to noembed.com's public service which
* will either proxy or fake info on a lot of sites.
*
* A few hosts are blacklisted due to known problems with oohembed,
* in which case we'll look up the info another way and return
* equivalent data.
*
* Throws exceptions on failure.
*
* @param string $url
* @param array $params
* @return object
*/
public static function getObject($url, $params=array())
{
$host = parse_url($url, PHP_URL_HOST);
if (substr($host, 0, 4) == 'www.') {
$host = substr($host, 4);
}
common_log(LOG_INFO, 'Checking for oEmbed data for ' . $url);
// You can fiddle with the order of discovery -- either skipping
// some types or re-ordering them.
$order = common_config('oembed', 'order');
foreach ($order as $method) {
switch ($method) {
case 'built-in':
common_log(LOG_INFO, 'Considering built-in oEmbed methods...');
// Blacklist: systems with no oEmbed API of their own, which are
// either missing from or broken on noembed.com's proxy.
// we know how to look data up in another way...
if (array_key_exists($host, self::$functionMap)) {
common_log(LOG_INFO, 'We have a built-in method for ' . $host);
$func = self::$functionMap[$host];
return call_user_func($func, $url, $params);
}
break;
case 'well-known':
common_log(LOG_INFO, 'Considering well-known oEmbed endpoints...');
// Whitelist: known API endpoints for sites that don't provide discovery...
if (array_key_exists($host, self::$apiMap)) {
$api = self::$apiMap[$host];
common_log(LOG_INFO, 'Using well-known endpoint "' . $api . '" for "' . $host . '"');
break 2;
}
break;
case 'discovery':
try {
common_log(LOG_INFO, 'Trying to discover an oEmbed endpoint using link headers.');
$api = self::discover($url);
common_log(LOG_INFO, 'Found API endpoint ' . $api . ' for URL ' . $url);
break 2;
} catch (Exception $e) {
common_log(LOG_INFO, 'Could not find an oEmbed endpoint using link headers.');
// Just ignore it!
}
break;
case 'service':
$api = common_config('oembed', 'endpoint');
common_log(LOG_INFO, 'Using service API endpoint ' . $api);
break 2;
break;
}
}
if (empty($api)) {
// TRANS: Server exception thrown in oEmbed action if no API endpoint is available.
throw new ServerException(_('No oEmbed API endpoint available.'));
}
return self::getObjectFrom($api, $url, $params);
}
/**
* Perform basic discovery.
* @return string
*/
static function discover($url)
{
// @fixme ideally skip this for non-HTML stuff!
$body = self::http($url);
return self::discoverFromHTML($url, $body);
}
/**
* Partially ripped from OStatus' FeedDiscovery class.
*
* @param string $url source URL, used to resolve relative links
* @param string $body HTML body text
* @return mixed string with URL or false if no target found
*/
static function discoverFromHTML($url, $body)
{
// DOMDocument::loadHTML may throw warnings on unrecognized elements,
// and notices on unrecognized namespaces.
$old = error_reporting(error_reporting() & ~(E_WARNING | E_NOTICE));
$dom = new DOMDocument();
$ok = $dom->loadHTML($body);
error_reporting($old);
if (!$ok) {
throw new oEmbedHelper_BadHtmlException();
}
// Ok... now on to the links!
$feeds = array(
'application/json+oembed' => false,
);
$nodes = $dom->getElementsByTagName('link');
for ($i = 0; $i < $nodes->length; $i++) {
$node = $nodes->item($i);
if ($node->hasAttributes()) {
$rel = $node->attributes->getNamedItem('rel');
$type = $node->attributes->getNamedItem('type');
$href = $node->attributes->getNamedItem('href');
if ($rel && $type && $href) {
$rel = array_filter(explode(" ", $rel->value));
$type = trim($type->value);
$href = trim($href->value);
if (in_array('alternate', $rel) && array_key_exists($type, $feeds) && empty($feeds[$type])) {
// Save the first feed found of each type...
$feeds[$type] = $href;
}
}
}
}
// Return the highest-priority feed found
foreach ($feeds as $type => $url) {
if ($url) {
return $url;
}
}
throw new oEmbedHelper_DiscoveryException();
}
/**
* Actually do an oEmbed lookup to a particular API endpoint.
*
* @param string $api oEmbed API endpoint URL
* @param string $url target URL to look up info about
* @param array $params
* @return object
*/
static function getObjectFrom($api, $url, $params=array())
{
$params['url'] = $url;
$params['format'] = 'json';
$data = self::json($api, $params);
return self::normalize($data);
}
/**
* Normalize oEmbed format.
*
* @param object $orig
* @return object
*/
static function normalize($orig)
{
$data = clone($orig);
if (empty($data->type)) {
throw new Exception('Invalid oEmbed data: no type field.');
}
if ($data->type == 'image') {
// YFrog does this.
$data->type = 'photo';
}
if (isset($data->thumbnail_url)) {
if (!isset($data->thumbnail_width)) {
// !?!?!
$data->thumbnail_width = common_config('attachments', 'thumb_width');
$data->thumbnail_height = common_config('attachments', 'thumb_height');
}
}
return $data;
}
/**
* Using a local function for twitpic lookups, as oohembed's adapter
* doesn't return a valid result:
* http://code.google.com/p/oohembed/issues/detail?id=19
*
* This code fetches metadata from Twitpic's own API, and attempts
* to guess proper thumbnail size from the original's size.
*
* @todo respect maxwidth and maxheight params
*
* @param string $url
* @param array $params
* @return object
*/
static function twitPic($url, $params=array())
{
$matches = array();
if (preg_match('!twitpic\.com/(\w+)!', $url, $matches)) {
$id = $matches[1];
} else {
throw new Exception("Invalid twitpic URL");
}
// Grab metadata from twitpic's API...
// http://dev.twitpic.com/docs/2/media_show
$data = self::json('http://api.twitpic.com/2/media/show.json',
array('id' => $id));
$oembed = (object)array('type' => 'photo',
'url' => 'http://twitpic.com/show/full/' . $data->short_id,
'width' => $data->width,
'height' => $data->height);
if (!empty($data->message)) {
$oembed->title = $data->message;
}
// Thumbnail is cropped and scaled to 150x150 box:
// http://dev.twitpic.com/docs/thumbnails/
$thumbSize = 150;
$oembed->thumbnail_url = 'http://twitpic.com/show/thumb/' . $data->short_id;
$oembed->thumbnail_width = $thumbSize;
$oembed->thumbnail_height = $thumbSize;
return $oembed;
}
/**
* Fetch some URL and return JSON data.
*
* @param string $url
* @param array $params query-string params
* @return object
*/
static protected function json($url, $params=array())
{
$data = self::http($url, $params);
return json_decode($data);
}
/**
* Hit some web API and return data on success.
* @param string $url
* @param array $params
* @return string
*/
static protected function http($url, $params=array())
{
$client = HTTPClient::start();
if ($params) {
$query = http_build_query($params, null, '&');
if (strpos($url, '?') === false) {
$url .= '?' . $query;
} else {
$url .= '&' . $query;
}
}
$response = $client->get($url);
if ($response->isOk()) {
return $response->getBody();
} else {
throw new Exception('Bad HTTP response code: ' . $response->getStatus());
}
}
}
class oEmbedHelper_Exception extends Exception
{
public function __construct($message = "", $code = 0, $previous = null)
{
parent::__construct($message, $code);
}
}
class oEmbedHelper_BadHtmlException extends oEmbedHelper_Exception
{
function __construct($previous=null)
{
return parent::__construct('Bad HTML in discovery data.', 0, $previous);
}
}
class oEmbedHelper_DiscoveryException extends oEmbedHelper_Exception
{
function __construct($previous=null)
{
return parent::__construct('No oEmbed discovery data.', 0, $previous);
}
}
| agpl-3.0 |
eduNEXT/edx-platform | lms/templates/instructor/instructor_dashboard_2/data_download_2/certificates.html | 1344 | <%page args="section_data" expression_filter="h"/>
<%namespace name='static' file='/static_content.html'/>
<%!
from django.utils.translation import ugettext as _
from openedx.core.djangolib.markup import HTML, Text
%>
<section id="certificate" class="idash-section tab-data" aria-labelledby="header-cert">
<h6 class="mb-15 font-size-100" id="header-cert">
<strong>${_("Note")}: </strong>
Please certificate report type option and then click Download Report button.
</h6>
<select class="report-type selector">
<option value="viewCertificates" data-csv="false"
data-datatable="true"
data-endpoint="${ section_data['get_issued_certificates_url'] }">View certificates
</option>
<option value="downloadCertificates"
data-csv="true"
data-directdownload="true"
data-endpoint="${ section_data['get_issued_certificates_url'] }">Download csv of
certificates
</option>
</select>
<input type="button"
value="Download Report"
class="mb-20 download-report">
<div>
<p>${_("Click to list certificates that are issued for this course:")}</p>
</div>
</section>
| agpl-3.0 |
adrianbrink/tendereum | vendor/github.com/cosmos/tendereum/vendor/github.com/btcsuite/btcd/netsync/manager.go | 46884 | // Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package netsync
import (
"container/list"
"net"
"sync"
"sync/atomic"
"time"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcd/mempool"
peerpkg "github.com/btcsuite/btcd/peer"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
)
const (
// minInFlightBlocks is the minimum number of blocks that should be
// in the request queue for headers-first mode before requesting
// more.
minInFlightBlocks = 10
// maxRejectedTxns is the maximum number of rejected transactions
// hashes to store in memory.
maxRejectedTxns = 1000
// maxRequestedBlocks is the maximum number of requested block
// hashes to store in memory.
maxRequestedBlocks = wire.MaxInvPerMsg
// maxRequestedTxns is the maximum number of requested transactions
// hashes to store in memory.
maxRequestedTxns = wire.MaxInvPerMsg
)
// zeroHash is the zero value hash (all zeros). It is defined as a convenience.
var zeroHash chainhash.Hash
// newPeerMsg signifies a newly connected peer to the block handler.
type newPeerMsg struct {
peer *peerpkg.Peer
}
// blockMsg packages a bitcoin block message and the peer it came from together
// so the block handler has access to that information.
type blockMsg struct {
block *btcutil.Block
peer *peerpkg.Peer
reply chan struct{}
}
// invMsg packages a bitcoin inv message and the peer it came from together
// so the block handler has access to that information.
type invMsg struct {
inv *wire.MsgInv
peer *peerpkg.Peer
}
// headersMsg packages a bitcoin headers message and the peer it came from
// together so the block handler has access to that information.
type headersMsg struct {
headers *wire.MsgHeaders
peer *peerpkg.Peer
}
// donePeerMsg signifies a newly disconnected peer to the block handler.
type donePeerMsg struct {
peer *peerpkg.Peer
}
// txMsg packages a bitcoin tx message and the peer it came from together
// so the block handler has access to that information.
type txMsg struct {
tx *btcutil.Tx
peer *peerpkg.Peer
reply chan struct{}
}
// getSyncPeerMsg is a message type to be sent across the message channel for
// retrieving the current sync peer.
type getSyncPeerMsg struct {
reply chan int32
}
// processBlockResponse is a response sent to the reply channel of a
// processBlockMsg.
type processBlockResponse struct {
isOrphan bool
err error
}
// processBlockMsg is a message type to be sent across the message channel
// for requested a block is processed. Note this call differs from blockMsg
// above in that blockMsg is intended for blocks that came from peers and have
// extra handling whereas this message essentially is just a concurrent safe
// way to call ProcessBlock on the internal block chain instance.
type processBlockMsg struct {
block *btcutil.Block
flags blockchain.BehaviorFlags
reply chan processBlockResponse
}
// isCurrentMsg is a message type to be sent across the message channel for
// requesting whether or not the sync manager believes it is synced with the
// currently connected peers.
type isCurrentMsg struct {
reply chan bool
}
// pauseMsg is a message type to be sent across the message channel for
// pausing the sync manager. This effectively provides the caller with
// exclusive access over the manager until a receive is performed on the
// unpause channel.
type pauseMsg struct {
unpause <-chan struct{}
}
// headerNode is used as a node in a list of headers that are linked together
// between checkpoints.
type headerNode struct {
height int32
hash *chainhash.Hash
}
// peerSyncState stores additional information that the SyncManager tracks
// about a peer.
type peerSyncState struct {
syncCandidate bool
requestQueue []*wire.InvVect
requestedTxns map[chainhash.Hash]struct{}
requestedBlocks map[chainhash.Hash]struct{}
}
// SyncManager is used to communicate block related messages with peers. The
// SyncManager is started as by executing Start() in a goroutine. Once started,
// it selects peers to sync from and starts the initial block download. Once the
// chain is in sync, the SyncManager handles incoming block and header
// notifications and relays announcements of new blocks to peers.
type SyncManager struct {
peerNotifier PeerNotifier
started int32
shutdown int32
chain *blockchain.BlockChain
txMemPool *mempool.TxPool
chainParams *chaincfg.Params
progressLogger *blockProgressLogger
msgChan chan interface{}
wg sync.WaitGroup
quit chan struct{}
// These fields should only be accessed from the blockHandler thread
rejectedTxns map[chainhash.Hash]struct{}
requestedTxns map[chainhash.Hash]struct{}
requestedBlocks map[chainhash.Hash]struct{}
syncPeer *peerpkg.Peer
peerStates map[*peerpkg.Peer]*peerSyncState
// The following fields are used for headers-first mode.
headersFirstMode bool
headerList *list.List
startHeader *list.Element
nextCheckpoint *chaincfg.Checkpoint
}
// resetHeaderState sets the headers-first mode state to values appropriate for
// syncing from a new peer.
func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) {
sm.headersFirstMode = false
sm.headerList.Init()
sm.startHeader = nil
// When there is a next checkpoint, add an entry for the latest known
// block into the header pool. This allows the next downloaded header
// to prove it links to the chain properly.
if sm.nextCheckpoint != nil {
node := headerNode{height: newestHeight, hash: newestHash}
sm.headerList.PushBack(&node)
}
}
// findNextHeaderCheckpoint returns the next checkpoint after the passed height.
// It returns nil when there is not one either because the height is already
// later than the final checkpoint or some other reason such as disabled
// checkpoints.
func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
checkpoints := sm.chain.Checkpoints()
if len(checkpoints) == 0 {
return nil
}
// There is no next checkpoint if the height is already after the final
// checkpoint.
finalCheckpoint := &checkpoints[len(checkpoints)-1]
if height >= finalCheckpoint.Height {
return nil
}
// Find the next checkpoint.
nextCheckpoint := finalCheckpoint
for i := len(checkpoints) - 2; i >= 0; i-- {
if height >= checkpoints[i].Height {
break
}
nextCheckpoint = &checkpoints[i]
}
return nextCheckpoint
}
// startSync will choose the best peer among the available candidate peers to
// download/sync the blockchain from. When syncing is already running, it
// simply returns. It also examines the candidates for any which are no longer
// candidates and removes them as needed.
func (sm *SyncManager) startSync() {
// Return now if we're already syncing.
if sm.syncPeer != nil {
return
}
// Once the segwit soft-fork package has activated, we only
// want to sync from peers which are witness enabled to ensure
// that we fully validate all blockchain data.
segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
if err != nil {
log.Errorf("Unable to query for segwit soft-fork state: %v", err)
return
}
best := sm.chain.BestSnapshot()
var bestPeer *peerpkg.Peer
for peer, state := range sm.peerStates {
if !state.syncCandidate {
continue
}
if segwitActive && !peer.IsWitnessEnabled() {
log.Debugf("peer %v not witness enabled, skipping", peer)
continue
}
// Remove sync candidate peers that are no longer candidates due
// to passing their latest known block. NOTE: The < is
// intentional as opposed to <=. While technically the peer
// doesn't have a later block when it's equal, it will likely
// have one soon so it is a reasonable choice. It also allows
// the case where both are at 0 such as during regression test.
if peer.LastBlock() < best.Height {
state.syncCandidate = false
continue
}
// TODO(davec): Use a better algorithm to choose the best peer.
// For now, just pick the first available candidate.
bestPeer = peer
}
// Start syncing from the best peer if one was selected.
if bestPeer != nil {
// Clear the requestedBlocks if the sync peer changes, otherwise
// we may ignore blocks we need that the last sync peer failed
// to send.
sm.requestedBlocks = make(map[chainhash.Hash]struct{})
locator, err := sm.chain.LatestBlockLocator()
if err != nil {
log.Errorf("Failed to get block locator for the "+
"latest block: %v", err)
return
}
log.Infof("Syncing to block height %d from peer %v",
bestPeer.LastBlock(), bestPeer.Addr())
// When the current height is less than a known checkpoint we
// can use block headers to learn about which blocks comprise
// the chain up to the checkpoint and perform less validation
// for them. This is possible since each header contains the
// hash of the previous header and a merkle root. Therefore if
// we validate all of the received headers link together
// properly and the checkpoint hashes match, we can be sure the
// hashes for the blocks in between are accurate. Further, once
// the full blocks are downloaded, the merkle root is computed
// and compared against the value in the header which proves the
// full block hasn't been tampered with.
//
// Once we have passed the final checkpoint, or checkpoints are
// disabled, use standard inv messages learn about the blocks
// and fully validate them. Finally, regression test mode does
// not support the headers-first approach so do normal block
// downloads when in regression test mode.
if sm.nextCheckpoint != nil &&
best.Height < sm.nextCheckpoint.Height &&
sm.chainParams != &chaincfg.RegressionNetParams {
bestPeer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
sm.headersFirstMode = true
log.Infof("Downloading headers for blocks %d to "+
"%d from peer %s", best.Height+1,
sm.nextCheckpoint.Height, bestPeer.Addr())
} else {
bestPeer.PushGetBlocksMsg(locator, &zeroHash)
}
sm.syncPeer = bestPeer
} else {
log.Warnf("No sync peer candidates available")
}
}
// isSyncCandidate returns whether or not the peer is a candidate to consider
// syncing from.
func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool {
// Typically a peer is not a candidate for sync if it's not a full node,
// however regression test is special in that the regression tool is
// not a full node and still needs to be considered a sync candidate.
if sm.chainParams == &chaincfg.RegressionNetParams {
// The peer is not a candidate if it's not coming from localhost
// or the hostname can't be determined for some reason.
host, _, err := net.SplitHostPort(peer.Addr())
if err != nil {
return false
}
if host != "127.0.0.1" && host != "localhost" {
return false
}
} else {
// The peer is not a candidate for sync if it's not a full
// node. Additionally, if the segwit soft-fork package has
// activated, then the peer must also be upgraded.
segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
if err != nil {
log.Errorf("Unable to query for segwit "+
"soft-fork state: %v", err)
}
nodeServices := peer.Services()
if nodeServices&wire.SFNodeNetwork != wire.SFNodeNetwork ||
(segwitActive && !peer.IsWitnessEnabled()) {
return false
}
}
// Candidate if all checks passed.
return true
}
// handleNewPeerMsg deals with new peers that have signalled they may
// be considered as a sync peer (they have already successfully negotiated). It
// also starts syncing if needed. It is invoked from the syncHandler goroutine.
func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) {
// Ignore if in the process of shutting down.
if atomic.LoadInt32(&sm.shutdown) != 0 {
return
}
log.Infof("New valid peer %s (%s)", peer, peer.UserAgent())
// Initialize the peer state
isSyncCandidate := sm.isSyncCandidate(peer)
sm.peerStates[peer] = &peerSyncState{
syncCandidate: isSyncCandidate,
requestedTxns: make(map[chainhash.Hash]struct{}),
requestedBlocks: make(map[chainhash.Hash]struct{}),
}
// Start syncing by choosing the best candidate if needed.
if isSyncCandidate && sm.syncPeer == nil {
sm.startSync()
}
}
// handleDonePeerMsg deals with peers that have signalled they are done. It
// removes the peer as a candidate for syncing and in the case where it was
// the current sync peer, attempts to select a new best peer to sync from. It
// is invoked from the syncHandler goroutine.
func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) {
state, exists := sm.peerStates[peer]
if !exists {
log.Warnf("Received done peer message for unknown peer %s", peer)
return
}
// Remove the peer from the list of candidate peers.
delete(sm.peerStates, peer)
log.Infof("Lost peer %s", peer)
// Remove requested transactions from the global map so that they will
// be fetched from elsewhere next time we get an inv.
for txHash := range state.requestedTxns {
delete(sm.requestedTxns, txHash)
}
// Remove requested blocks from the global map so that they will be
// fetched from elsewhere next time we get an inv.
// TODO: we could possibly here check which peers have these blocks
// and request them now to speed things up a little.
for blockHash := range state.requestedBlocks {
delete(sm.requestedBlocks, blockHash)
}
// Attempt to find a new peer to sync from if the quitting peer is the
// sync peer. Also, reset the headers-first state if in headers-first
// mode so
if sm.syncPeer == peer {
sm.syncPeer = nil
if sm.headersFirstMode {
best := sm.chain.BestSnapshot()
sm.resetHeaderState(&best.Hash, best.Height)
}
sm.startSync()
}
}
// handleTxMsg handles transaction messages from all peers.
func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
peer := tmsg.peer
state, exists := sm.peerStates[peer]
if !exists {
log.Warnf("Received tx message from unknown peer %s", peer)
return
}
// NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of
// sending an inventory message and allowing the remote peer to decide
// whether or not they want to request the transaction via a getdata
// message. Unfortunately, the reference implementation permits
// unrequested data, so it has allowed wallets that don't follow the
// spec to proliferate. While this is not ideal, there is no check here
// to disconnect peers for sending unsolicited transactions to provide
// interoperability.
txHash := tmsg.tx.Hash()
// Ignore transactions that we have already rejected. Do not
// send a reject message here because if the transaction was already
// rejected, the transaction was unsolicited.
if _, exists = sm.rejectedTxns[*txHash]; exists {
log.Debugf("Ignoring unsolicited previously rejected "+
"transaction %v from %s", txHash, peer)
return
}
// Process the transaction to include validation, insertion in the
// memory pool, orphan handling, etc.
acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx,
true, true, mempool.Tag(peer.ID()))
// Remove transaction from request maps. Either the mempool/chain
// already knows about it and as such we shouldn't have any more
// instances of trying to fetch it, or we failed to insert and thus
// we'll retry next time we get an inv.
delete(state.requestedTxns, *txHash)
delete(sm.requestedTxns, *txHash)
if err != nil {
// Do not request this transaction again until a new block
// has been processed.
sm.rejectedTxns[*txHash] = struct{}{}
sm.limitMap(sm.rejectedTxns, maxRejectedTxns)
// When the error is a rule error, it means the transaction was
// simply rejected as opposed to something actually going wrong,
// so log it as such. Otherwise, something really did go wrong,
// so log it as an actual error.
if _, ok := err.(mempool.RuleError); ok {
log.Debugf("Rejected transaction %v from %s: %v",
txHash, peer, err)
} else {
log.Errorf("Failed to process transaction %v: %v",
txHash, err)
}
// Convert the error into an appropriate reject message and
// send it.
code, reason := mempool.ErrToRejectErr(err)
peer.PushRejectMsg(wire.CmdTx, code, reason, txHash, false)
return
}
sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
}
// current returns true if we believe we are synced with our peers, false if we
// still have blocks to check
func (sm *SyncManager) current() bool {
if !sm.chain.IsCurrent() {
return false
}
// if blockChain thinks we are current and we have no syncPeer it
// is probably right.
if sm.syncPeer == nil {
return true
}
// No matter what chain thinks, if we are below the block we are syncing
// to we are not current.
if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() {
return false
}
return true
}
// handleBlockMsg handles block messages from all peers.
func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
peer := bmsg.peer
state, exists := sm.peerStates[peer]
if !exists {
log.Warnf("Received block message from unknown peer %s", peer)
return
}
// If we didn't ask for this block then the peer is misbehaving.
blockHash := bmsg.block.Hash()
if _, exists = state.requestedBlocks[*blockHash]; !exists {
// The regression test intentionally sends some blocks twice
// to test duplicate block insertion fails. Don't disconnect
// the peer or ignore the block when we're in regression test
// mode in this case so the chain code is actually fed the
// duplicate blocks.
if sm.chainParams != &chaincfg.RegressionNetParams {
log.Warnf("Got unrequested block %v from %s -- "+
"disconnecting", blockHash, peer.Addr())
peer.Disconnect()
return
}
}
// When in headers-first mode, if the block matches the hash of the
// first header in the list of headers that are being fetched, it's
// eligible for less validation since the headers have already been
// verified to link together and are valid up to the next checkpoint.
// Also, remove the list entry for all blocks except the checkpoint
// since it is needed to verify the next round of headers links
// properly.
isCheckpointBlock := false
behaviorFlags := blockchain.BFNone
if sm.headersFirstMode {
firstNodeEl := sm.headerList.Front()
if firstNodeEl != nil {
firstNode := firstNodeEl.Value.(*headerNode)
if blockHash.IsEqual(firstNode.hash) {
behaviorFlags |= blockchain.BFFastAdd
if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) {
isCheckpointBlock = true
} else {
sm.headerList.Remove(firstNodeEl)
}
}
}
}
// Remove block from request maps. Either chain will know about it and
// so we shouldn't have any more instances of trying to fetch it, or we
// will fail the insert and thus we'll retry next time we get an inv.
delete(state.requestedBlocks, *blockHash)
delete(sm.requestedBlocks, *blockHash)
// Process the block to include validation, best chain selection, orphan
// handling, etc.
_, isOrphan, err := sm.chain.ProcessBlock(bmsg.block, behaviorFlags)
if err != nil {
// When the error is a rule error, it means the block was simply
// rejected as opposed to something actually going wrong, so log
// it as such. Otherwise, something really did go wrong, so log
// it as an actual error.
if _, ok := err.(blockchain.RuleError); ok {
log.Infof("Rejected block %v from %s: %v", blockHash,
peer, err)
} else {
log.Errorf("Failed to process block %v: %v",
blockHash, err)
}
if dbErr, ok := err.(database.Error); ok && dbErr.ErrorCode ==
database.ErrCorruption {
panic(dbErr)
}
// Convert the error into an appropriate reject message and
// send it.
code, reason := mempool.ErrToRejectErr(err)
peer.PushRejectMsg(wire.CmdBlock, code, reason, blockHash, false)
return
}
// Meta-data about the new block this peer is reporting. We use this
// below to update this peer's lastest block height and the heights of
// other peers based on their last announced block hash. This allows us
// to dynamically update the block heights of peers, avoiding stale
// heights when looking for a new sync peer. Upon acceptance of a block
// or recognition of an orphan, we also use this information to update
// the block heights over other peers who's invs may have been ignored
// if we are actively syncing while the chain is not yet current or
// who may have lost the lock announcment race.
var heightUpdate int32
var blkHashUpdate *chainhash.Hash
// Request the parents for the orphan block from the peer that sent it.
if isOrphan {
// We've just received an orphan block from a peer. In order
// to update the height of the peer, we try to extract the
// block height from the scriptSig of the coinbase transaction.
// Extraction is only attempted if the block's version is
// high enough (ver 2+).
header := &bmsg.block.MsgBlock().Header
if blockchain.ShouldHaveSerializedBlockHeight(header) {
coinbaseTx := bmsg.block.Transactions()[0]
cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx)
if err != nil {
log.Warnf("Unable to extract height from "+
"coinbase tx: %v", err)
} else {
log.Debugf("Extracted height of %v from "+
"orphan block", cbHeight)
heightUpdate = cbHeight
blkHashUpdate = blockHash
}
}
orphanRoot := sm.chain.GetOrphanRoot(blockHash)
locator, err := sm.chain.LatestBlockLocator()
if err != nil {
log.Warnf("Failed to get block locator for the "+
"latest block: %v", err)
} else {
peer.PushGetBlocksMsg(locator, orphanRoot)
}
} else {
// When the block is not an orphan, log information about it and
// update the chain state.
sm.progressLogger.LogBlockHeight(bmsg.block)
// Update this peer's latest block height, for future
// potential sync node candidacy.
best := sm.chain.BestSnapshot()
heightUpdate = best.Height
blkHashUpdate = &best.Hash
// Clear the rejected transactions.
sm.rejectedTxns = make(map[chainhash.Hash]struct{})
}
// Update the block height for this peer. But only send a message to
// the server for updating peer heights if this is an orphan or our
// chain is "current". This avoids sending a spammy amount of messages
// if we're syncing the chain from scratch.
if blkHashUpdate != nil && heightUpdate != 0 {
peer.UpdateLastBlockHeight(heightUpdate)
if isOrphan || sm.current() {
go sm.peerNotifier.UpdatePeerHeights(blkHashUpdate, heightUpdate,
peer)
}
}
// Nothing more to do if we aren't in headers-first mode.
if !sm.headersFirstMode {
return
}
// This is headers-first mode, so if the block is not a checkpoint
// request more blocks using the header list when the request queue is
// getting short.
if !isCheckpointBlock {
if sm.startHeader != nil &&
len(state.requestedBlocks) < minInFlightBlocks {
sm.fetchHeaderBlocks()
}
return
}
// This is headers-first mode and the block is a checkpoint. When
// there is a next checkpoint, get the next round of headers by asking
// for headers starting from the block after this one up to the next
// checkpoint.
prevHeight := sm.nextCheckpoint.Height
prevHash := sm.nextCheckpoint.Hash
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
if sm.nextCheckpoint != nil {
locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash})
err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
if err != nil {
log.Warnf("Failed to send getheaders message to "+
"peer %s: %v", peer.Addr(), err)
return
}
log.Infof("Downloading headers for blocks %d to %d from "+
"peer %s", prevHeight+1, sm.nextCheckpoint.Height,
sm.syncPeer.Addr())
return
}
// This is headers-first mode, the block is a checkpoint, and there are
// no more checkpoints, so switch to normal mode by requesting blocks
// from the block after this one up to the end of the chain (zero hash).
sm.headersFirstMode = false
sm.headerList.Init()
log.Infof("Reached the final checkpoint -- switching to normal mode")
locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash})
err = peer.PushGetBlocksMsg(locator, &zeroHash)
if err != nil {
log.Warnf("Failed to send getblocks message to peer %s: %v",
peer.Addr(), err)
return
}
}
// fetchHeaderBlocks creates and sends a request to the syncPeer for the next
// list of blocks to be downloaded based on the current list of headers.
func (sm *SyncManager) fetchHeaderBlocks() {
// Nothing to do if there is no start header.
if sm.startHeader == nil {
log.Warnf("fetchHeaderBlocks called with no start header")
return
}
// Build up a getdata request for the list of blocks the headers
// describe. The size hint will be limited to wire.MaxInvPerMsg by
// the function, so no need to double check it here.
gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len()))
numRequested := 0
for e := sm.startHeader; e != nil; e = e.Next() {
node, ok := e.Value.(*headerNode)
if !ok {
log.Warn("Header list node type is not a headerNode")
continue
}
iv := wire.NewInvVect(wire.InvTypeBlock, node.hash)
haveInv, err := sm.haveInventory(iv)
if err != nil {
log.Warnf("Unexpected failure when checking for "+
"existing inventory during header block "+
"fetch: %v", err)
}
if !haveInv {
syncPeerState := sm.peerStates[sm.syncPeer]
sm.requestedBlocks[*node.hash] = struct{}{}
syncPeerState.requestedBlocks[*node.hash] = struct{}{}
// If we're fetching from a witness enabled peer
// post-fork, then ensure that we receive all the
// witness data in the blocks.
if sm.syncPeer.IsWitnessEnabled() {
iv.Type = wire.InvTypeWitnessBlock
}
gdmsg.AddInvVect(iv)
numRequested++
}
sm.startHeader = e.Next()
if numRequested >= wire.MaxInvPerMsg {
break
}
}
if len(gdmsg.InvList) > 0 {
sm.syncPeer.QueueMessage(gdmsg, nil)
}
}
// handleHeadersMsg handles block header messages from all peers. Headers are
// requested when performing a headers-first sync.
func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
peer := hmsg.peer
_, exists := sm.peerStates[peer]
if !exists {
log.Warnf("Received headers message from unknown peer %s", peer)
return
}
// The remote peer is misbehaving if we didn't request headers.
msg := hmsg.headers
numHeaders := len(msg.Headers)
if !sm.headersFirstMode {
log.Warnf("Got %d unrequested headers from %s -- "+
"disconnecting", numHeaders, peer.Addr())
peer.Disconnect()
return
}
// Nothing to do for an empty headers message.
if numHeaders == 0 {
return
}
// Process all of the received headers ensuring each one connects to the
// previous and that checkpoints match.
receivedCheckpoint := false
var finalHash *chainhash.Hash
for _, blockHeader := range msg.Headers {
blockHash := blockHeader.BlockHash()
finalHash = &blockHash
// Ensure there is a previous header to compare against.
prevNodeEl := sm.headerList.Back()
if prevNodeEl == nil {
log.Warnf("Header list does not contain a previous" +
"element as expected -- disconnecting peer")
peer.Disconnect()
return
}
// Ensure the header properly connects to the previous one and
// add it to the list of headers.
node := headerNode{hash: &blockHash}
prevNode := prevNodeEl.Value.(*headerNode)
if prevNode.hash.IsEqual(&blockHeader.PrevBlock) {
node.height = prevNode.height + 1
e := sm.headerList.PushBack(&node)
if sm.startHeader == nil {
sm.startHeader = e
}
} else {
log.Warnf("Received block header that does not "+
"properly connect to the chain from peer %s "+
"-- disconnecting", peer.Addr())
peer.Disconnect()
return
}
// Verify the header at the next checkpoint height matches.
if node.height == sm.nextCheckpoint.Height {
if node.hash.IsEqual(sm.nextCheckpoint.Hash) {
receivedCheckpoint = true
log.Infof("Verified downloaded block "+
"header against checkpoint at height "+
"%d/hash %s", node.height, node.hash)
} else {
log.Warnf("Block header at height %d/hash "+
"%s from peer %s does NOT match "+
"expected checkpoint hash of %s -- "+
"disconnecting", node.height,
node.hash, peer.Addr(),
sm.nextCheckpoint.Hash)
peer.Disconnect()
return
}
break
}
}
// When this header is a checkpoint, switch to fetching the blocks for
// all of the headers since the last checkpoint.
if receivedCheckpoint {
// Since the first entry of the list is always the final block
// that is already in the database and is only used to ensure
// the next header links properly, it must be removed before
// fetching the blocks.
sm.headerList.Remove(sm.headerList.Front())
log.Infof("Received %v block headers: Fetching blocks",
sm.headerList.Len())
sm.progressLogger.SetLastLogTime(time.Now())
sm.fetchHeaderBlocks()
return
}
// This header is not a checkpoint, so request the next batch of
// headers starting from the latest known header and ending with the
// next checkpoint.
locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash})
err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
if err != nil {
log.Warnf("Failed to send getheaders message to "+
"peer %s: %v", peer.Addr(), err)
return
}
}
// haveInventory returns whether or not the inventory represented by the passed
// inventory vector is known. This includes checking all of the various places
// inventory can be when it is in different states such as blocks that are part
// of the main chain, on a side chain, in the orphan pool, and transactions that
// are in the memory pool (either the main pool or orphan pool).
func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
switch invVect.Type {
case wire.InvTypeWitnessBlock:
fallthrough
case wire.InvTypeBlock:
// Ask chain if the block is known to it in any form (main
// chain, side chain, or orphan).
return sm.chain.HaveBlock(&invVect.Hash)
case wire.InvTypeWitnessTx:
fallthrough
case wire.InvTypeTx:
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if sm.txMemPool.HaveTransaction(&invVect.Hash) {
return true, nil
}
// Check if the transaction exists from the point of view of the
// end of the main chain.
entry, err := sm.chain.FetchUtxoEntry(&invVect.Hash)
if err != nil {
return false, err
}
return entry != nil && !entry.IsFullySpent(), nil
}
// The requested inventory is is an unsupported type, so just claim
// it is known to avoid requesting it.
return true, nil
}
// handleInvMsg handles inv messages from all peers.
// We examine the inventory advertised by the remote peer and act accordingly.
func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
peer := imsg.peer
state, exists := sm.peerStates[peer]
if !exists {
log.Warnf("Received inv message from unknown peer %s", peer)
return
}
// Attempt to find the final block in the inventory list. There may
// not be one.
lastBlock := -1
invVects := imsg.inv.InvList
for i := len(invVects) - 1; i >= 0; i-- {
if invVects[i].Type == wire.InvTypeBlock {
lastBlock = i
break
}
}
// If this inv contains a block announcement, and this isn't coming from
// our current sync peer or we're current, then update the last
// announced block for this peer. We'll use this information later to
// update the heights of peers based on blocks we've accepted that they
// previously announced.
if lastBlock != -1 && (peer != sm.syncPeer || sm.current()) {
peer.UpdateLastAnnouncedBlock(&invVects[lastBlock].Hash)
}
// Ignore invs from peers that aren't the sync if we are not current.
// Helps prevent fetching a mass of orphans.
if peer != sm.syncPeer && !sm.current() {
return
}
// If our chain is current and a peer announces a block we already
// know of, then update their current block height.
if lastBlock != -1 && sm.current() {
blkHeight, err := sm.chain.BlockHeightByHash(&invVects[lastBlock].Hash)
if err == nil {
peer.UpdateLastBlockHeight(blkHeight)
}
}
// Request the advertised inventory if we don't already have it. Also,
// request parent blocks of orphans if we receive one we already have.
// Finally, attempt to detect potential stalls due to long side chains
// we already have and request more blocks to prevent them.
for i, iv := range invVects {
// Ignore unsupported inventory types.
switch iv.Type {
case wire.InvTypeBlock:
case wire.InvTypeTx:
case wire.InvTypeWitnessBlock:
case wire.InvTypeWitnessTx:
default:
continue
}
// Add the inventory to the cache of known inventory
// for the peer.
peer.AddKnownInventory(iv)
// Ignore inventory when we're in headers-first mode.
if sm.headersFirstMode {
continue
}
// Request the inventory if we don't already have it.
haveInv, err := sm.haveInventory(iv)
if err != nil {
log.Warnf("Unexpected failure when checking for "+
"existing inventory during inv message "+
"processing: %v", err)
continue
}
if !haveInv {
if iv.Type == wire.InvTypeTx {
// Skip the transaction if it has already been
// rejected.
if _, exists := sm.rejectedTxns[iv.Hash]; exists {
continue
}
}
// Ignore invs block invs from non-witness enabled
// peers, as after segwit activation we only want to
// download from peers that can provide us full witness
// data for blocks.
if !peer.IsWitnessEnabled() && iv.Type == wire.InvTypeBlock {
continue
}
// Add it to the request queue.
state.requestQueue = append(state.requestQueue, iv)
continue
}
if iv.Type == wire.InvTypeBlock {
// The block is an orphan block that we already have.
// When the existing orphan was processed, it requested
// the missing parent blocks. When this scenario
// happens, it means there were more blocks missing
// than are allowed into a single inventory message. As
// a result, once this peer requested the final
// advertised block, the remote peer noticed and is now
// resending the orphan block as an available block
// to signal there are more missing blocks that need to
// be requested.
if sm.chain.IsKnownOrphan(&iv.Hash) {
// Request blocks starting at the latest known
// up to the root of the orphan that just came
// in.
orphanRoot := sm.chain.GetOrphanRoot(&iv.Hash)
locator, err := sm.chain.LatestBlockLocator()
if err != nil {
log.Errorf("PEER: Failed to get block "+
"locator for the latest block: "+
"%v", err)
continue
}
peer.PushGetBlocksMsg(locator, orphanRoot)
continue
}
// We already have the final block advertised by this
// inventory message, so force a request for more. This
// should only happen if we're on a really long side
// chain.
if i == lastBlock {
// Request blocks after this one up to the
// final one the remote peer knows about (zero
// stop hash).
locator := sm.chain.BlockLocatorFromHash(&iv.Hash)
peer.PushGetBlocksMsg(locator, &zeroHash)
}
}
}
// Request as much as possible at once. Anything that won't fit into
// the request will be requested on the next inv message.
numRequested := 0
gdmsg := wire.NewMsgGetData()
requestQueue := state.requestQueue
for len(requestQueue) != 0 {
iv := requestQueue[0]
requestQueue[0] = nil
requestQueue = requestQueue[1:]
switch iv.Type {
case wire.InvTypeWitnessBlock:
fallthrough
case wire.InvTypeBlock:
// Request the block if there is not already a pending
// request.
if _, exists := sm.requestedBlocks[iv.Hash]; !exists {
sm.requestedBlocks[iv.Hash] = struct{}{}
sm.limitMap(sm.requestedBlocks, maxRequestedBlocks)
state.requestedBlocks[iv.Hash] = struct{}{}
if peer.IsWitnessEnabled() {
iv.Type = wire.InvTypeWitnessBlock
}
gdmsg.AddInvVect(iv)
numRequested++
}
case wire.InvTypeWitnessTx:
fallthrough
case wire.InvTypeTx:
// Request the transaction if there is not already a
// pending request.
if _, exists := sm.requestedTxns[iv.Hash]; !exists {
sm.requestedTxns[iv.Hash] = struct{}{}
sm.limitMap(sm.requestedTxns, maxRequestedTxns)
state.requestedTxns[iv.Hash] = struct{}{}
// If the peer is capable, request the txn
// including all witness data.
if peer.IsWitnessEnabled() {
iv.Type = wire.InvTypeWitnessTx
}
gdmsg.AddInvVect(iv)
numRequested++
}
}
if numRequested >= wire.MaxInvPerMsg {
break
}
}
state.requestQueue = requestQueue
if len(gdmsg.InvList) > 0 {
peer.QueueMessage(gdmsg, nil)
}
}
// limitMap is a helper function for maps that require a maximum limit by
// evicting a random transaction if adding a new value would cause it to
// overflow the maximum allowed.
func (sm *SyncManager) limitMap(m map[chainhash.Hash]struct{}, limit int) {
if len(m)+1 > limit {
// Remove a random entry from the map. For most compilers, Go's
// range statement iterates starting at a random item although
// that is not 100% guaranteed by the spec. The iteration order
// is not important here because an adversary would have to be
// able to pull off preimage attacks on the hashing function in
// order to target eviction of specific entries anyways.
for txHash := range m {
delete(m, txHash)
return
}
}
}
// blockHandler is the main handler for the sync manager. It must be run as a
// goroutine. It processes block and inv messages in a separate goroutine
// from the peer handlers so the block (MsgBlock) messages are handled by a
// single thread without needing to lock memory data structures. This is
// important because the sync manager controls which blocks are needed and how
// the fetching should proceed.
func (sm *SyncManager) blockHandler() {
out:
for {
select {
case m := <-sm.msgChan:
switch msg := m.(type) {
case *newPeerMsg:
sm.handleNewPeerMsg(msg.peer)
case *txMsg:
sm.handleTxMsg(msg)
msg.reply <- struct{}{}
case *blockMsg:
sm.handleBlockMsg(msg)
msg.reply <- struct{}{}
case *invMsg:
sm.handleInvMsg(msg)
case *headersMsg:
sm.handleHeadersMsg(msg)
case *donePeerMsg:
sm.handleDonePeerMsg(msg.peer)
case getSyncPeerMsg:
var peerID int32
if sm.syncPeer != nil {
peerID = sm.syncPeer.ID()
}
msg.reply <- peerID
case processBlockMsg:
_, isOrphan, err := sm.chain.ProcessBlock(
msg.block, msg.flags)
if err != nil {
msg.reply <- processBlockResponse{
isOrphan: false,
err: err,
}
}
msg.reply <- processBlockResponse{
isOrphan: isOrphan,
err: nil,
}
case isCurrentMsg:
msg.reply <- sm.current()
case pauseMsg:
// Wait until the sender unpauses the manager.
<-msg.unpause
default:
log.Warnf("Invalid message type in block "+
"handler: %T", msg)
}
case <-sm.quit:
break out
}
}
sm.wg.Done()
log.Trace("Block handler done")
}
// handleBlockchainNotification handles notifications from blockchain. It does
// things such as request orphan block parents and relay accepted blocks to
// connected peers.
func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) {
switch notification.Type {
// A block has been accepted into the block chain. Relay it to other
// peers.
case blockchain.NTBlockAccepted:
// Don't relay if we are not current. Other peers that are
// current should already know about it.
if !sm.current() {
return
}
block, ok := notification.Data.(*btcutil.Block)
if !ok {
log.Warnf("Chain accepted notification is not a block.")
break
}
// Generate the inventory vector and relay it.
iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header)
// A block has been connected to the main block chain.
case blockchain.NTBlockConnected:
block, ok := notification.Data.(*btcutil.Block)
if !ok {
log.Warnf("Chain connected notification is not a block.")
break
}
// Remove all of the transactions (except the coinbase) in the
// connected block from the transaction pool. Secondly, remove any
// transactions which are now double spends as a result of these
// new transactions. Finally, remove any transaction that is
// no longer an orphan. Transactions which depend on a confirmed
// transaction are NOT removed recursively because they are still
// valid.
for _, tx := range block.Transactions()[1:] {
sm.txMemPool.RemoveTransaction(tx, false)
sm.txMemPool.RemoveDoubleSpends(tx)
sm.txMemPool.RemoveOrphan(tx)
sm.peerNotifier.TransactionConfirmed(tx)
acceptedTxs := sm.txMemPool.ProcessOrphans(tx)
sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
}
// A block has been disconnected from the main block chain.
case blockchain.NTBlockDisconnected:
block, ok := notification.Data.(*btcutil.Block)
if !ok {
log.Warnf("Chain disconnected notification is not a block.")
break
}
// Reinsert all of the transactions (except the coinbase) into
// the transaction pool.
for _, tx := range block.Transactions()[1:] {
_, _, err := sm.txMemPool.MaybeAcceptTransaction(tx,
false, false)
if err != nil {
// Remove the transaction and all transactions
// that depend on it if it wasn't accepted into
// the transaction pool.
sm.txMemPool.RemoveTransaction(tx, true)
}
}
}
}
// NewPeer informs the sync manager of a newly active peer.
func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) {
// Ignore if we are shutting down.
if atomic.LoadInt32(&sm.shutdown) != 0 {
return
}
sm.msgChan <- &newPeerMsg{peer: peer}
}
// QueueTx adds the passed transaction message and peer to the block handling
// queue. Responds to the done channel argument after the tx message is
// processed.
func (sm *SyncManager) QueueTx(tx *btcutil.Tx, peer *peerpkg.Peer, done chan struct{}) {
// Don't accept more transactions if we're shutting down.
if atomic.LoadInt32(&sm.shutdown) != 0 {
done <- struct{}{}
return
}
sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done}
}
// QueueBlock adds the passed block message and peer to the block handling
// queue. Responds to the done channel argument after the block message is
// processed.
func (sm *SyncManager) QueueBlock(block *btcutil.Block, peer *peerpkg.Peer, done chan struct{}) {
// Don't accept more blocks if we're shutting down.
if atomic.LoadInt32(&sm.shutdown) != 0 {
done <- struct{}{}
return
}
sm.msgChan <- &blockMsg{block: block, peer: peer, reply: done}
}
// QueueInv adds the passed inv message and peer to the block handling queue.
func (sm *SyncManager) QueueInv(inv *wire.MsgInv, peer *peerpkg.Peer) {
// No channel handling here because peers do not need to block on inv
// messages.
if atomic.LoadInt32(&sm.shutdown) != 0 {
return
}
sm.msgChan <- &invMsg{inv: inv, peer: peer}
}
// QueueHeaders adds the passed headers message and peer to the block handling
// queue.
func (sm *SyncManager) QueueHeaders(headers *wire.MsgHeaders, peer *peerpkg.Peer) {
// No channel handling here because peers do not need to block on
// headers messages.
if atomic.LoadInt32(&sm.shutdown) != 0 {
return
}
sm.msgChan <- &headersMsg{headers: headers, peer: peer}
}
// DonePeer informs the blockmanager that a peer has disconnected.
func (sm *SyncManager) DonePeer(peer *peerpkg.Peer) {
// Ignore if we are shutting down.
if atomic.LoadInt32(&sm.shutdown) != 0 {
return
}
sm.msgChan <- &donePeerMsg{peer: peer}
}
// Start begins the core block handler which processes block and inv messages.
func (sm *SyncManager) Start() {
// Already started?
if atomic.AddInt32(&sm.started, 1) != 1 {
return
}
log.Trace("Starting sync manager")
sm.wg.Add(1)
go sm.blockHandler()
}
// Stop gracefully shuts down the sync manager by stopping all asynchronous
// handlers and waiting for them to finish.
func (sm *SyncManager) Stop() error {
if atomic.AddInt32(&sm.shutdown, 1) != 1 {
log.Warnf("Sync manager is already in the process of " +
"shutting down")
return nil
}
log.Infof("Sync manager shutting down")
close(sm.quit)
sm.wg.Wait()
return nil
}
// SyncPeerID returns the ID of the current sync peer, or 0 if there is none.
func (sm *SyncManager) SyncPeerID() int32 {
reply := make(chan int32)
sm.msgChan <- getSyncPeerMsg{reply: reply}
return <-reply
}
// ProcessBlock makes use of ProcessBlock on an internal instance of a block
// chain.
func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) {
reply := make(chan processBlockResponse, 1)
sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply}
response := <-reply
return response.isOrphan, response.err
}
// IsCurrent returns whether or not the sync manager believes it is synced with
// the connected peers.
func (sm *SyncManager) IsCurrent() bool {
reply := make(chan bool)
sm.msgChan <- isCurrentMsg{reply: reply}
return <-reply
}
// Pause pauses the sync manager until the returned channel is closed.
//
// Note that while paused, all peer and block processing is halted. The
// message sender should avoid pausing the sync manager for long durations.
func (sm *SyncManager) Pause() chan<- struct{} {
c := make(chan struct{})
sm.msgChan <- pauseMsg{c}
return c
}
// New constructs a new SyncManager. Use Start to begin processing asynchronous
// block, tx, and inv updates.
func New(config *Config) (*SyncManager, error) {
sm := SyncManager{
peerNotifier: config.PeerNotifier,
chain: config.Chain,
txMemPool: config.TxMemPool,
chainParams: config.ChainParams,
rejectedTxns: make(map[chainhash.Hash]struct{}),
requestedTxns: make(map[chainhash.Hash]struct{}),
requestedBlocks: make(map[chainhash.Hash]struct{}),
peerStates: make(map[*peerpkg.Peer]*peerSyncState),
progressLogger: newBlockProgressLogger("Processed", log),
msgChan: make(chan interface{}, config.MaxPeers*3),
headerList: list.New(),
quit: make(chan struct{}),
}
best := sm.chain.BestSnapshot()
if !config.DisableCheckpoints {
// Initialize the next checkpoint based on the current height.
sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height)
if sm.nextCheckpoint != nil {
sm.resetHeaderState(&best.Hash, best.Height)
}
} else {
log.Info("Checkpoints are disabled")
}
sm.chain.Subscribe(sm.handleBlockchainNotification)
return &sm, nil
}
| agpl-3.0 |
yscdaxian/goweb | limesurvey/admin/scripts/kcfinder/core/autoload.php | 3384 | <?php
/** This file is part of KCFinder project
*
* @desc Autoload classes magic function
* @package KCFinder
* @version 2.21
* @author Pavel Tzonkov <[email protected]>
* @copyright 2010 KCFinder Project
* @license http://www.opensource.org/licenses/gpl-2.0.php GPLv2
* @license http://www.opensource.org/licenses/lgpl-2.1.php LGPLv2
* @link http://kcfinder.sunhater.com
*/
require_once(dirname(__FILE__).'/../../../../config-defaults.php');
require_once(dirname(__FILE__).'/../../../../common.php');
require_once(dirname(__FILE__).'/../../../admin_functions.php');
$usquery = "SELECT stg_value FROM ".db_table_name("settings_global")." where stg_name='SessionName'";
$usresult = db_execute_assoc($usquery,'',true);
if ($usresult)
{
$usrow = $usresult->FetchRow();
@session_name($usrow['stg_value']);
}
else
{
session_name("LimeSurveyAdmin");
}
session_set_cookie_params(0,$relativeurl.'/');
if (session_id() == "") @session_start();
$_SESSION['KCFINDER'] = array();
$sAllowedExtensions = implode(' ',array_map('trim',explode(',',$allowedresourcesuploads)));
$_SESSION['KCFINDER']['types']=array('files'=>$sAllowedExtensions,
'flash'=>$sAllowedExtensions,
'images'=>$sAllowedExtensions);
if ($demoModeOnly === false &&
isset($_SESSION['loginID']) &&
isset($_SESSION['FileManagerContext']))
{
// disable upload at survey creation time
// because we don't know the sid yet
if (preg_match('/^(create|edit):(question|group|answer)/',$_SESSION['FileManagerContext']) != 0 ||
preg_match('/^edit:survey/',$_SESSION['FileManagerContext']) !=0 ||
preg_match('/^edit:assessments/',$_SESSION['FileManagerContext']) !=0 ||
preg_match('/^edit:emailsettings/',$_SESSION['FileManagerContext']) != 0)
{
$contextarray=explode(':',$_SESSION['FileManagerContext'],3);
$surveyid=$contextarray[2];
if(bHasSurveyPermission($surveyid,'surveycontent','update'))
{
$_SESSION['KCFINDER']['disabled'] = false ;
$_SESSION['KCFINDER']['uploadURL'] = "{$relativeurl}/upload/surveys/{$surveyid}/" ;
$_SESSION['KCFINDER']['uploadDir'] = $uploaddir.'/surveys/'.$surveyid;
}
}
elseif (preg_match('/^edit:label/',$_SESSION['FileManagerContext']) != 0)
{
$contextarray=explode(':',$_SESSION['FileManagerContext'],3);
$labelid=$contextarray[2];
// check if the user has label management right and labelid defined
if ($_SESSION['USER_RIGHT_MANAGE_LABEL']==1 && isset($labelid) && $labelid != '')
{
$_SESSION['KCFINDER']['disabled'] = false ;
$_SESSION['KCFINDER']['uploadURL'] = "{$relativeurl}/upload/labels/{$labelid}/" ;
$_SESSION['KCFINDER']['uploadDir'] = "{$uploaddir}/labels/{$labelid}" ;
}
}
}
function __autoload($class) {
if ($class == "uploader")
require "core/uploader.php";
elseif ($class == "browser")
require "core/browser.php";
elseif (file_exists("core/types/$class.php"))
require "core/types/$class.php";
elseif (file_exists("lib/class_$class.php"))
require "lib/class_$class.php";
elseif (file_exists("lib/helper_$class.php"))
require "lib/helper_$class.php";
}
?> | agpl-3.0 |
Tejal011089/Medsyn2_app | accounts/doctype/sales_invoice/pos.js | 17987 | // Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
// License: GNU General Public License v3. See license.txt
erpnext.POS = Class.extend({
init: function(wrapper, frm) {
this.wrapper = wrapper;
this.frm = frm;
this.wrapper.html('<div class="container">\
<div class="row" style="margin: -9px 0px 10px -30px; border-bottom: 1px solid #c7c7c7;">\
<div class="party-area col-sm-3 col-xs-6"></div>\
<div class="barcode-area col-sm-3 col-xs-6"></div>\
<div class="search-area col-sm-3 col-xs-6"></div>\
<div class="item-group-area col-sm-3 col-xs-6"></div>\
</div>\
<div class="row">\
<div class="col-sm-6">\
<div class="pos-bill">\
<div class="item-cart">\
<table class="table table-condensed table-hover" id="cart" style="table-layout: fixed;">\
<thead>\
<tr>\
<th style="width: 40%">Item</th>\
<th style="width: 9%"></th>\
<th style="width: 17%; text-align: right;">Qty</th>\
<th style="width: 9%"></th>\
<th style="width: 25%; text-align: right;">Rate</th>\
</tr>\
</thead>\
<tbody>\
</tbody>\
</table>\
</div>\
<br>\
<div class="totals-area" style="margin-left: 40%;">\
<table class="table table-condensed">\
<tr>\
<td><b>Net Total</b></td>\
<td style="text-align: right;" class="net-total"></td>\
</tr>\
</table>\
<div class="tax-table" style="display: none;">\
<table class="table table-condensed">\
<thead>\
<tr>\
<th style="width: 60%">Taxes</th>\
<th style="width: 40%; text-align: right;"></th>\
</tr>\
</thead>\
<tbody>\
</tbody>\
</table>\
</div>\
<div class="grand-total-area">\
<table class="table table-condensed">\
<tr>\
<td style="vertical-align: middle;"><b>Grand Total</b></td>\
<td style="text-align: right; font-size: 200%; \
font-size: bold;" class="grand-total"></td>\
</tr>\
</table>\
</div>\
</div>\
</div>\
<br><br>\
<div class="row">\
<div class="col-sm-9">\
<button class="btn btn-success btn-lg make-payment">\
<i class="icon-money"></i> Make Payment</button>\
</div>\
<div class="col-sm-3">\
<button class="btn btn-default btn-lg remove-items" style="display: none;">\
<i class="icon-trash"></i> Del</button>\
</div>\
</div>\
<br><br>\
</div>\
<div class="col-sm-6">\
<div class="item-list-area">\
<div class="col-sm-12">\
<div class="row item-list"></div></div>\
</div>\
</div>\
</div></div>');
this.check_transaction_type();
this.make();
var me = this;
$(this.frm.wrapper).on("refresh-fields", function() {
me.refresh();
});
this.call_function("remove-items", function() {me.remove_selected_items();});
this.call_function("make-payment", function() {me.make_payment();});
},
check_transaction_type: function() {
var me = this;
// Check whether the transaction is "Sales" or "Purchase"
if (wn.meta.has_field(cur_frm.doc.doctype, "customer")) {
this.set_transaction_defaults("Customer", "export");
}
else if (wn.meta.has_field(cur_frm.doc.doctype, "supplier")) {
this.set_transaction_defaults("Supplier", "import");
}
},
set_transaction_defaults: function(party, export_or_import) {
var me = this;
this.party = party;
this.price_list = (party == "Customer" ?
this.frm.doc.selling_price_list : this.frm.doc.buying_price_list);
this.sales_or_purchase = (party == "Customer" ? "Sales" : "Purchase");
this.net_total = "net_total_" + export_or_import;
this.grand_total = "grand_total_" + export_or_import;
this.amount = export_or_import + "_amount";
this.rate = export_or_import + "_rate";
},
call_function: function(class_name, fn, event_name) {
this.wrapper.find("." + class_name).on(event_name || "click", fn);
},
make: function() {
this.make_party();
this.make_item_group();
this.make_search();
this.make_barcode();
this.make_item_list();
},
make_party: function() {
var me = this;
this.party_field = wn.ui.form.make_control({
df: {
"fieldtype": "Link",
"options": this.party,
"label": this.party,
"fieldname": "pos_party",
"placeholder": this.party
},
parent: this.wrapper.find(".party-area"),
only_input: true,
});
this.party_field.make_input();
this.party_field.$input.on("change", function() {
if(!me.party_field.autocomplete_open)
wn.model.set_value(me.frm.doctype, me.frm.docname,
me.party.toLowerCase(), this.value);
});
},
make_item_group: function() {
var me = this;
this.item_group = wn.ui.form.make_control({
df: {
"fieldtype": "Link",
"options": "Item Group",
"label": "Item Group",
"fieldname": "pos_item_group",
"placeholder": "Item Group"
},
parent: this.wrapper.find(".item-group-area"),
only_input: true,
});
this.item_group.make_input();
this.item_group.$input.on("change", function() {
if(!me.item_group.autocomplete_open)
me.make_item_list();
});
},
make_search: function() {
var me = this;
this.search = wn.ui.form.make_control({
df: {
"fieldtype": "Data",
"label": "Item",
"fieldname": "pos_item",
"placeholder": "Search Item"
},
parent: this.wrapper.find(".search-area"),
only_input: true,
});
this.search.make_input();
this.search.$input.on("keypress", function() {
if(!me.search.autocomplete_open)
if(me.item_timeout)
clearTimeout(me.item_timeout);
me.item_timeout = setTimeout(function() { me.make_item_list(); }, 1000);
});
},
make_barcode: function() {
var me = this;
this.barcode = wn.ui.form.make_control({
df: {
"fieldtype": "Data",
"label": "Barcode",
"fieldname": "pos_barcode",
"placeholder": "Barcode / Serial No"
},
parent: this.wrapper.find(".barcode-area"),
only_input: true,
});
this.barcode.make_input();
this.barcode.$input.on("keypress", function() {
if(me.barcode_timeout)
clearTimeout(me.barcode_timeout);
me.barcode_timeout = setTimeout(function() { me.add_item_thru_barcode(); }, 1000);
});
},
make_item_list: function() {
var me = this;
me.item_timeout = null;
wn.call({
method: 'accounts.doctype.sales_invoice.pos.get_items',
args: {
sales_or_purchase: this.sales_or_purchase,
price_list: this.price_list,
item_group: this.item_group.$input.val(),
item: this.search.$input.val()
},
callback: function(r) {
var $wrap = me.wrapper.find(".item-list");
me.wrapper.find(".item-list").empty();
if (r.message) {
$.each(r.message, function(index, obj) {
if (obj.image)
image = '<img src="' + obj.image + '" class="img-responsive" \
style="border:1px solid #eee; max-height: 140px;">';
else
image = '<div class="missing-image"><i class="icon-camera"></i></div>';
$(repl('<div class="col-xs-3 pos-item" data-item_code="%(item_code)s">\
<div style="height: 140px; overflow: hidden;">%(item_image)s</div>\
<div class="small">%(item_code)s</div>\
<div class="small">%(item_name)s</div>\
<div class="small">%(item_price)s</div>\
</div>',
{
item_code: obj.name,
item_price: format_currency(obj.ref_rate, obj.currency),
item_name: obj.name===obj.item_name ? "" : obj.item_name,
item_image: image
})).appendTo($wrap);
});
}
// if form is local then allow this function
$(me.wrapper).find("div.pos-item").on("click", function() {
if(me.frm.doc.docstatus==0) {
if(!me.frm.doc[me.party.toLowerCase()] && ((me.frm.doctype == "Quotation" &&
me.frm.doc.quotation_to == "Customer")
|| me.frm.doctype != "Quotation")) {
msgprint("Please select " + me.party + " first.");
return;
}
else
me.add_to_cart($(this).attr("data-item_code"));
}
});
}
});
},
add_to_cart: function(item_code, serial_no) {
var me = this;
var caught = false;
// get no_of_items
var no_of_items = me.wrapper.find("#cart tbody tr").length;
// check whether the item is already added
if (no_of_items != 0) {
$.each(wn.model.get_children(this.frm.doctype + " Item", this.frm.doc.name,
this.frm.cscript.fname, this.frm.doctype), function(i, d) {
if (d.item_code == item_code) {
caught = true;
if (serial_no) {
d.serial_no += '\n' + serial_no;
me.frm.script_manager.trigger("serial_no", d.doctype, d.name);
}
else {
d.qty += 1;
me.frm.script_manager.trigger("qty", d.doctype, d.name);
}
}
});
}
// if item not found then add new item
if (!caught) {
this.add_new_item_to_grid(item_code, serial_no);
}
this.refresh();
this.refresh_search_box();
},
add_new_item_to_grid: function(item_code, serial_no) {
var me = this;
var child = wn.model.add_child(me.frm.doc, this.frm.doctype + " Item",
this.frm.cscript.fname);
child.item_code = item_code;
if (serial_no)
child.serial_no = serial_no;
this.frm.script_manager.trigger("item_code", child.doctype, child.name);
},
refresh_search_box: function() {
var me = this;
// Clear Item Box and remake item list
if (this.search.$input.val()) {
this.search.set_input("");
this.make_item_list();
}
},
update_qty: function(item_code, qty) {
var me = this;
$.each(wn.model.get_children(this.frm.doctype + " Item", this.frm.doc.name,
this.frm.cscript.fname, this.frm.doctype), function(i, d) {
if (d.item_code == item_code) {
if (qty == 0) {
wn.model.clear_doc(d.doctype, d.name);
me.refresh_grid();
} else {
d.qty = qty;
me.frm.script_manager.trigger("qty", d.doctype, d.name);
}
}
});
me.refresh();
},
refresh: function() {
var me = this;
this.party_field.set_input(this.frm.doc[this.party.toLowerCase()]);
this.barcode.set_input("");
this.show_items_in_item_cart();
this.show_taxes();
this.set_totals();
// if form is local then only run all these functions
if (this.frm.doc.docstatus===0) {
this.call_when_local();
}
this.disable_text_box_and_button();
this.hide_payment_button();
// If quotation to is not Customer then remove party
if (this.frm.doctype == "Quotation") {
this.party_field.$wrapper.remove();
if (this.frm.doc.quotation_to == "Customer")
this.make_party();
}
},
show_items_in_item_cart: function() {
var me = this;
var $items = this.wrapper.find("#cart tbody").empty();
$.each(wn.model.get_children(this.frm.doctype + " Item", this.frm.doc.name,
this.frm.cscript.fname, this.frm.doctype), function(i, d) {
$(repl('<tr id="%(item_code)s" data-selected="false">\
<td>%(item_code)s%(item_name)s</td>\
<td style="vertical-align:middle;" align="right">\
<div class="decrease-qty" style="cursor:pointer;">\
<i class="icon-minus-sign icon-large text-danger"></i>\
</div>\
</td>\
<td style="vertical-align:middle;"><input type="text" value="%(qty)s" \
class="form-control qty" style="text-align: right;"></td>\
<td style="vertical-align:middle;cursor:pointer;">\
<div class="increase-qty" style="cursor:pointer;">\
<i class="icon-plus-sign icon-large text-success"></i>\
</div>\
</td>\
<td style="text-align: right;"><b>%(amount)s</b><br>%(rate)s</td>\
</tr>',
{
item_code: d.item_code,
item_name: d.item_name===d.item_code ? "" : ("<br>" + d.item_name),
qty: d.qty,
rate: format_currency(d[me.rate], me.frm.doc.currency),
amount: format_currency(d[me.amount], me.frm.doc.currency)
}
)).appendTo($items);
});
this.wrapper.find(".increase-qty, .decrease-qty").on("click", function() {
var item_code = $(this).closest("tr").attr("id");
me.selected_item_qty_operation(item_code, $(this).attr("class"));
});
},
show_taxes: function() {
var me = this;
var taxes = wn.model.get_children(this.sales_or_purchase + " Taxes and Charges",
this.frm.doc.name, this.frm.cscript.other_fname, this.frm.doctype);
$(this.wrapper).find(".tax-table")
.toggle((taxes && taxes.length) ? true : false)
.find("tbody").empty();
$.each(taxes, function(i, d) {
if (d.tax_amount) {
$(repl('<tr>\
<td>%(description)s %(rate)s</td>\
<td style="text-align: right;">%(tax_amount)s</td>\
<tr>', {
description: d.description,
rate: ((d.charge_type == "Actual") ? '' : ("(" + d.rate + "%)")),
tax_amount: format_currency(flt(d.tax_amount)/flt(me.frm.doc.conversion_rate),
me.frm.doc.currency)
})).appendTo(".tax-table tbody");
}
});
},
set_totals: function() {
var me = this;
this.wrapper.find(".net-total").text(format_currency(this.frm.doc[this.net_total],
me.frm.doc.currency));
this.wrapper.find(".grand-total").text(format_currency(this.frm.doc[this.grand_total],
me.frm.doc.currency));
},
call_when_local: function() {
var me = this;
// append quantity to the respective item after change from input box
$(this.wrapper).find("input.qty").on("change", function() {
var item_code = $(this).closest("tr")[0].id;
me.update_qty(item_code, $(this).val());
});
// on td click toggle the highlighting of row
$(this.wrapper).find("#cart tbody tr td").on("click", function() {
var row = $(this).closest("tr");
if (row.attr("data-selected") == "false") {
row.attr("class", "warning");
row.attr("data-selected", "true");
}
else {
row.prop("class", null);
row.attr("data-selected", "false");
}
me.refresh_delete_btn();
});
me.refresh_delete_btn();
this.barcode.$input.focus();
},
disable_text_box_and_button: function() {
var me = this;
// if form is submitted & cancelled then disable all input box & buttons
if (this.frm.doc.docstatus>=1) {
$(this.wrapper).find('input, button').each(function () {
$(this).prop('disabled', true);
});
$(this.wrapper).find(".remove-items").hide();
$(this.wrapper).find(".make-payment").hide();
}
else {
$(this.wrapper).find('input, button').each(function () {
$(this).prop('disabled', false);
});
$(this.wrapper).find(".make-payment").show();
}
},
hide_payment_button: function() {
var me = this;
// Show Make Payment button only in Sales Invoice
if (this.frm.doctype != "Sales Invoice")
$(this.wrapper).find(".make-payment").hide();
},
refresh_delete_btn: function() {
$(this.wrapper).find(".remove-items").toggle($(".item-cart .warning").length ? true : false);
},
add_item_thru_barcode: function() {
var me = this;
me.barcode_timeout = null;
wn.call({
method: 'accounts.doctype.sales_invoice.pos.get_item_code',
args: {barcode_serial_no: this.barcode.$input.val()},
callback: function(r) {
if (r.message) {
if (r.message[1] == "serial_no")
me.add_to_cart(r.message[0][0].item_code, r.message[0][0].name);
else
me.add_to_cart(r.message[0][0].name);
}
else
msgprint(wn._("Invalid Barcode"));
me.refresh();
}
});
},
remove_selected_items: function() {
var me = this;
var selected_items = [];
var no_of_items = $(this.wrapper).find("#cart tbody tr").length;
for(var x=0; x<=no_of_items - 1; x++) {
var row = $(this.wrapper).find("#cart tbody tr:eq(" + x + ")");
if(row.attr("data-selected") == "true") {
selected_items.push(row.attr("id"));
}
}
var child = wn.model.get_children(this.frm.doctype + " Item", this.frm.doc.name,
this.frm.cscript.fname, this.frm.doctype);
$.each(child, function(i, d) {
for (var i in selected_items) {
if (d.item_code == selected_items[i]) {
wn.model.clear_doc(d.doctype, d.name);
}
}
});
this.refresh_grid();
},
refresh_grid: function() {
this.frm.fields_dict[this.frm.cscript.fname].grid.refresh();
this.frm.script_manager.trigger("calculate_taxes_and_totals");
this.refresh();
},
selected_item_qty_operation: function(item_code, operation) {
var me = this;
var child = wn.model.get_children(this.frm.doctype + " Item", this.frm.doc.name,
this.frm.cscript.fname, this.frm.doctype);
$.each(child, function(i, d) {
if (d.item_code == item_code) {
if (operation == "increase-qty")
d.qty += 1;
else if (operation == "decrease-qty")
d.qty != 1 ? d.qty -= 1 : d.qty = 1;
me.refresh();
}
});
},
make_payment: function() {
var me = this;
var no_of_items = $(this.wrapper).find("#cart tbody tr").length;
var mode_of_payment = [];
if (no_of_items == 0)
msgprint(wn._("Payment cannot be made for empty cart"));
else {
wn.call({
method: 'accounts.doctype.sales_invoice.pos.get_mode_of_payment',
callback: function(r) {
for (x=0; x<=r.message.length - 1; x++) {
mode_of_payment.push(r.message[x].name);
}
// show payment wizard
var dialog = new wn.ui.Dialog({
width: 400,
title: 'Payment',
fields: [
{fieldtype:'Data', fieldname:'total_amount', label:'Total Amount', read_only:1},
{fieldtype:'Select', fieldname:'mode_of_payment', label:'Mode of Payment',
options:mode_of_payment.join('\n'), reqd: 1},
{fieldtype:'Button', fieldname:'pay', label:'Pay'}
]
});
dialog.set_values({
"total_amount": $(".grand-total").text()
});
dialog.show();
dialog.get_input("total_amount").prop("disabled", true);
dialog.fields_dict.pay.input.onclick = function() {
me.frm.set_value("mode_of_payment", dialog.get_values().mode_of_payment);
me.frm.set_value("paid_amount", dialog.get_values().total_amount);
me.frm.cscript.mode_of_payment(me.frm.doc);
me.frm.save();
dialog.hide();
me.refresh();
};
}
});
}
},
}); | agpl-3.0 |
lazytech-org/RIOT | cpu/efm32/families/efm32pg1b/include/vendor/efm32pg1b_msc.h | 51392 | /***************************************************************************//**
* @file
* @brief EFM32PG1B_MSC register and bit field definitions
* @version 5.7.0
*******************************************************************************
* # License
* <b>Copyright 2018 Silicon Laboratories Inc. www.silabs.com</b>
*******************************************************************************
*
* SPDX-License-Identifier: Zlib
*
* The licensor of this software is Silicon Laboratories Inc.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*
******************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__ICCARM__)
#pragma system_include /* Treat file as system include file. */
#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#pragma clang system_header /* Treat file as system include file. */
#endif
/***************************************************************************//**
* @addtogroup Parts
* @{
******************************************************************************/
/***************************************************************************//**
* @defgroup EFM32PG1B_MSC MSC
* @{
* @brief EFM32PG1B_MSC Register Declaration
******************************************************************************/
/** MSC Register Declaration */
typedef struct {
__IOM uint32_t CTRL; /**< Memory System Control Register */
__IOM uint32_t READCTRL; /**< Read Control Register */
__IOM uint32_t WRITECTRL; /**< Write Control Register */
__IOM uint32_t WRITECMD; /**< Write Command Register */
__IOM uint32_t ADDRB; /**< Page Erase/Write Address Buffer */
uint32_t RESERVED0[1U]; /**< Reserved for future use **/
__IOM uint32_t WDATA; /**< Write Data Register */
__IM uint32_t STATUS; /**< Status Register */
uint32_t RESERVED1[4U]; /**< Reserved for future use **/
__IM uint32_t IF; /**< Interrupt Flag Register */
__IOM uint32_t IFS; /**< Interrupt Flag Set Register */
__IOM uint32_t IFC; /**< Interrupt Flag Clear Register */
__IOM uint32_t IEN; /**< Interrupt Enable Register */
__IOM uint32_t LOCK; /**< Configuration Lock Register */
__IOM uint32_t CACHECMD; /**< Flash Cache Command Register */
__IM uint32_t CACHEHITS; /**< Cache Hits Performance Counter */
__IM uint32_t CACHEMISSES; /**< Cache Misses Performance Counter */
uint32_t RESERVED2[1U]; /**< Reserved for future use **/
__IOM uint32_t MASSLOCK; /**< Mass Erase Lock Register */
uint32_t RESERVED3[1U]; /**< Reserved for future use **/
__IOM uint32_t STARTUP; /**< Startup Control */
uint32_t RESERVED4[5U]; /**< Reserved for future use **/
__IOM uint32_t CMD; /**< Command Register */
} MSC_TypeDef; /** @} */
/***************************************************************************//**
* @addtogroup EFM32PG1B_MSC
* @{
* @defgroup EFM32PG1B_MSC_BitFields MSC Bit Fields
* @{
******************************************************************************/
/* Bit fields for MSC CTRL */
#define _MSC_CTRL_RESETVALUE 0x00000001UL /**< Default value for MSC_CTRL */
#define _MSC_CTRL_MASK 0x0000000FUL /**< Mask for MSC_CTRL */
#define MSC_CTRL_ADDRFAULTEN (0x1UL << 0) /**< Invalid Address Bus Fault Response Enable */
#define _MSC_CTRL_ADDRFAULTEN_SHIFT 0 /**< Shift value for MSC_ADDRFAULTEN */
#define _MSC_CTRL_ADDRFAULTEN_MASK 0x1UL /**< Bit mask for MSC_ADDRFAULTEN */
#define _MSC_CTRL_ADDRFAULTEN_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_ADDRFAULTEN_DEFAULT (_MSC_CTRL_ADDRFAULTEN_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_CLKDISFAULTEN (0x1UL << 1) /**< Clock-disabled Bus Fault Response Enable */
#define _MSC_CTRL_CLKDISFAULTEN_SHIFT 1 /**< Shift value for MSC_CLKDISFAULTEN */
#define _MSC_CTRL_CLKDISFAULTEN_MASK 0x2UL /**< Bit mask for MSC_CLKDISFAULTEN */
#define _MSC_CTRL_CLKDISFAULTEN_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_CLKDISFAULTEN_DEFAULT (_MSC_CTRL_CLKDISFAULTEN_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_PWRUPONDEMAND (0x1UL << 2) /**< Power Up on Demand During Wake Up */
#define _MSC_CTRL_PWRUPONDEMAND_SHIFT 2 /**< Shift value for MSC_PWRUPONDEMAND */
#define _MSC_CTRL_PWRUPONDEMAND_MASK 0x4UL /**< Bit mask for MSC_PWRUPONDEMAND */
#define _MSC_CTRL_PWRUPONDEMAND_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_PWRUPONDEMAND_DEFAULT (_MSC_CTRL_PWRUPONDEMAND_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_IFCREADCLEAR (0x1UL << 3) /**< IFC Read Clears IF */
#define _MSC_CTRL_IFCREADCLEAR_SHIFT 3 /**< Shift value for MSC_IFCREADCLEAR */
#define _MSC_CTRL_IFCREADCLEAR_MASK 0x8UL /**< Bit mask for MSC_IFCREADCLEAR */
#define _MSC_CTRL_IFCREADCLEAR_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CTRL */
#define MSC_CTRL_IFCREADCLEAR_DEFAULT (_MSC_CTRL_IFCREADCLEAR_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_CTRL */
/* Bit fields for MSC READCTRL */
#define _MSC_READCTRL_RESETVALUE 0x01000100UL /**< Default value for MSC_READCTRL */
#define _MSC_READCTRL_MASK 0x13000338UL /**< Mask for MSC_READCTRL */
#define MSC_READCTRL_IFCDIS (0x1UL << 3) /**< Internal Flash Cache Disable */
#define _MSC_READCTRL_IFCDIS_SHIFT 3 /**< Shift value for MSC_IFCDIS */
#define _MSC_READCTRL_IFCDIS_MASK 0x8UL /**< Bit mask for MSC_IFCDIS */
#define _MSC_READCTRL_IFCDIS_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_IFCDIS_DEFAULT (_MSC_READCTRL_IFCDIS_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_AIDIS (0x1UL << 4) /**< Automatic Invalidate Disable */
#define _MSC_READCTRL_AIDIS_SHIFT 4 /**< Shift value for MSC_AIDIS */
#define _MSC_READCTRL_AIDIS_MASK 0x10UL /**< Bit mask for MSC_AIDIS */
#define _MSC_READCTRL_AIDIS_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_AIDIS_DEFAULT (_MSC_READCTRL_AIDIS_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_ICCDIS (0x1UL << 5) /**< Interrupt Context Cache Disable */
#define _MSC_READCTRL_ICCDIS_SHIFT 5 /**< Shift value for MSC_ICCDIS */
#define _MSC_READCTRL_ICCDIS_MASK 0x20UL /**< Bit mask for MSC_ICCDIS */
#define _MSC_READCTRL_ICCDIS_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_ICCDIS_DEFAULT (_MSC_READCTRL_ICCDIS_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_PREFETCH (0x1UL << 8) /**< Prefetch Mode */
#define _MSC_READCTRL_PREFETCH_SHIFT 8 /**< Shift value for MSC_PREFETCH */
#define _MSC_READCTRL_PREFETCH_MASK 0x100UL /**< Bit mask for MSC_PREFETCH */
#define _MSC_READCTRL_PREFETCH_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_PREFETCH_DEFAULT (_MSC_READCTRL_PREFETCH_DEFAULT << 8) /**< Shifted mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_USEHPROT (0x1UL << 9) /**< AHB_HPROT Mode */
#define _MSC_READCTRL_USEHPROT_SHIFT 9 /**< Shift value for MSC_USEHPROT */
#define _MSC_READCTRL_USEHPROT_MASK 0x200UL /**< Bit mask for MSC_USEHPROT */
#define _MSC_READCTRL_USEHPROT_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_USEHPROT_DEFAULT (_MSC_READCTRL_USEHPROT_DEFAULT << 9) /**< Shifted mode DEFAULT for MSC_READCTRL */
#define _MSC_READCTRL_MODE_SHIFT 24 /**< Shift value for MSC_MODE */
#define _MSC_READCTRL_MODE_MASK 0x3000000UL /**< Bit mask for MSC_MODE */
#define _MSC_READCTRL_MODE_WS0 0x00000000UL /**< Mode WS0 for MSC_READCTRL */
#define _MSC_READCTRL_MODE_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_READCTRL */
#define _MSC_READCTRL_MODE_WS1 0x00000001UL /**< Mode WS1 for MSC_READCTRL */
#define MSC_READCTRL_MODE_WS0 (_MSC_READCTRL_MODE_WS0 << 24) /**< Shifted mode WS0 for MSC_READCTRL */
#define MSC_READCTRL_MODE_DEFAULT (_MSC_READCTRL_MODE_DEFAULT << 24) /**< Shifted mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_MODE_WS1 (_MSC_READCTRL_MODE_WS1 << 24) /**< Shifted mode WS1 for MSC_READCTRL */
#define MSC_READCTRL_SCBTP (0x1UL << 28) /**< Suppress Conditional Branch Target Perfetch */
#define _MSC_READCTRL_SCBTP_SHIFT 28 /**< Shift value for MSC_SCBTP */
#define _MSC_READCTRL_SCBTP_MASK 0x10000000UL /**< Bit mask for MSC_SCBTP */
#define _MSC_READCTRL_SCBTP_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_READCTRL */
#define MSC_READCTRL_SCBTP_DEFAULT (_MSC_READCTRL_SCBTP_DEFAULT << 28) /**< Shifted mode DEFAULT for MSC_READCTRL */
/* Bit fields for MSC WRITECTRL */
#define _MSC_WRITECTRL_RESETVALUE 0x00000000UL /**< Default value for MSC_WRITECTRL */
#define _MSC_WRITECTRL_MASK 0x00000003UL /**< Mask for MSC_WRITECTRL */
#define MSC_WRITECTRL_WREN (0x1UL << 0) /**< Enable Write/Erase Controller */
#define _MSC_WRITECTRL_WREN_SHIFT 0 /**< Shift value for MSC_WREN */
#define _MSC_WRITECTRL_WREN_MASK 0x1UL /**< Bit mask for MSC_WREN */
#define _MSC_WRITECTRL_WREN_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECTRL */
#define MSC_WRITECTRL_WREN_DEFAULT (_MSC_WRITECTRL_WREN_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_WRITECTRL */
#define MSC_WRITECTRL_IRQERASEABORT (0x1UL << 1) /**< Abort Page Erase on Interrupt */
#define _MSC_WRITECTRL_IRQERASEABORT_SHIFT 1 /**< Shift value for MSC_IRQERASEABORT */
#define _MSC_WRITECTRL_IRQERASEABORT_MASK 0x2UL /**< Bit mask for MSC_IRQERASEABORT */
#define _MSC_WRITECTRL_IRQERASEABORT_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECTRL */
#define MSC_WRITECTRL_IRQERASEABORT_DEFAULT (_MSC_WRITECTRL_IRQERASEABORT_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_WRITECTRL */
/* Bit fields for MSC WRITECMD */
#define _MSC_WRITECMD_RESETVALUE 0x00000000UL /**< Default value for MSC_WRITECMD */
#define _MSC_WRITECMD_MASK 0x0000113FUL /**< Mask for MSC_WRITECMD */
#define MSC_WRITECMD_LADDRIM (0x1UL << 0) /**< Load MSC_ADDRB Into ADDR */
#define _MSC_WRITECMD_LADDRIM_SHIFT 0 /**< Shift value for MSC_LADDRIM */
#define _MSC_WRITECMD_LADDRIM_MASK 0x1UL /**< Bit mask for MSC_LADDRIM */
#define _MSC_WRITECMD_LADDRIM_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_LADDRIM_DEFAULT (_MSC_WRITECMD_LADDRIM_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_ERASEPAGE (0x1UL << 1) /**< Erase Page */
#define _MSC_WRITECMD_ERASEPAGE_SHIFT 1 /**< Shift value for MSC_ERASEPAGE */
#define _MSC_WRITECMD_ERASEPAGE_MASK 0x2UL /**< Bit mask for MSC_ERASEPAGE */
#define _MSC_WRITECMD_ERASEPAGE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_ERASEPAGE_DEFAULT (_MSC_WRITECMD_ERASEPAGE_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_WRITEEND (0x1UL << 2) /**< End Write Mode */
#define _MSC_WRITECMD_WRITEEND_SHIFT 2 /**< Shift value for MSC_WRITEEND */
#define _MSC_WRITECMD_WRITEEND_MASK 0x4UL /**< Bit mask for MSC_WRITEEND */
#define _MSC_WRITECMD_WRITEEND_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_WRITEEND_DEFAULT (_MSC_WRITECMD_WRITEEND_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_WRITEONCE (0x1UL << 3) /**< Word Write-Once Trigger */
#define _MSC_WRITECMD_WRITEONCE_SHIFT 3 /**< Shift value for MSC_WRITEONCE */
#define _MSC_WRITECMD_WRITEONCE_MASK 0x8UL /**< Bit mask for MSC_WRITEONCE */
#define _MSC_WRITECMD_WRITEONCE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_WRITEONCE_DEFAULT (_MSC_WRITECMD_WRITEONCE_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_WRITETRIG (0x1UL << 4) /**< Word Write Sequence Trigger */
#define _MSC_WRITECMD_WRITETRIG_SHIFT 4 /**< Shift value for MSC_WRITETRIG */
#define _MSC_WRITECMD_WRITETRIG_MASK 0x10UL /**< Bit mask for MSC_WRITETRIG */
#define _MSC_WRITECMD_WRITETRIG_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_WRITETRIG_DEFAULT (_MSC_WRITECMD_WRITETRIG_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_ERASEABORT (0x1UL << 5) /**< Abort Erase Sequence */
#define _MSC_WRITECMD_ERASEABORT_SHIFT 5 /**< Shift value for MSC_ERASEABORT */
#define _MSC_WRITECMD_ERASEABORT_MASK 0x20UL /**< Bit mask for MSC_ERASEABORT */
#define _MSC_WRITECMD_ERASEABORT_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_ERASEABORT_DEFAULT (_MSC_WRITECMD_ERASEABORT_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_ERASEMAIN0 (0x1UL << 8) /**< Mass Erase Region 0 */
#define _MSC_WRITECMD_ERASEMAIN0_SHIFT 8 /**< Shift value for MSC_ERASEMAIN0 */
#define _MSC_WRITECMD_ERASEMAIN0_MASK 0x100UL /**< Bit mask for MSC_ERASEMAIN0 */
#define _MSC_WRITECMD_ERASEMAIN0_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_ERASEMAIN0_DEFAULT (_MSC_WRITECMD_ERASEMAIN0_DEFAULT << 8) /**< Shifted mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_CLEARWDATA (0x1UL << 12) /**< Clear WDATA State */
#define _MSC_WRITECMD_CLEARWDATA_SHIFT 12 /**< Shift value for MSC_CLEARWDATA */
#define _MSC_WRITECMD_CLEARWDATA_MASK 0x1000UL /**< Bit mask for MSC_CLEARWDATA */
#define _MSC_WRITECMD_CLEARWDATA_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WRITECMD */
#define MSC_WRITECMD_CLEARWDATA_DEFAULT (_MSC_WRITECMD_CLEARWDATA_DEFAULT << 12) /**< Shifted mode DEFAULT for MSC_WRITECMD */
/* Bit fields for MSC ADDRB */
#define _MSC_ADDRB_RESETVALUE 0x00000000UL /**< Default value for MSC_ADDRB */
#define _MSC_ADDRB_MASK 0xFFFFFFFFUL /**< Mask for MSC_ADDRB */
#define _MSC_ADDRB_ADDRB_SHIFT 0 /**< Shift value for MSC_ADDRB */
#define _MSC_ADDRB_ADDRB_MASK 0xFFFFFFFFUL /**< Bit mask for MSC_ADDRB */
#define _MSC_ADDRB_ADDRB_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_ADDRB */
#define MSC_ADDRB_ADDRB_DEFAULT (_MSC_ADDRB_ADDRB_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_ADDRB */
/* Bit fields for MSC WDATA */
#define _MSC_WDATA_RESETVALUE 0x00000000UL /**< Default value for MSC_WDATA */
#define _MSC_WDATA_MASK 0xFFFFFFFFUL /**< Mask for MSC_WDATA */
#define _MSC_WDATA_WDATA_SHIFT 0 /**< Shift value for MSC_WDATA */
#define _MSC_WDATA_WDATA_MASK 0xFFFFFFFFUL /**< Bit mask for MSC_WDATA */
#define _MSC_WDATA_WDATA_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_WDATA */
#define MSC_WDATA_WDATA_DEFAULT (_MSC_WDATA_WDATA_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_WDATA */
/* Bit fields for MSC STATUS */
#define _MSC_STATUS_RESETVALUE 0x00000008UL /**< Default value for MSC_STATUS */
#define _MSC_STATUS_MASK 0x0000007FUL /**< Mask for MSC_STATUS */
#define MSC_STATUS_BUSY (0x1UL << 0) /**< Erase/Write Busy */
#define _MSC_STATUS_BUSY_SHIFT 0 /**< Shift value for MSC_BUSY */
#define _MSC_STATUS_BUSY_MASK 0x1UL /**< Bit mask for MSC_BUSY */
#define _MSC_STATUS_BUSY_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_BUSY_DEFAULT (_MSC_STATUS_BUSY_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_LOCKED (0x1UL << 1) /**< Access Locked */
#define _MSC_STATUS_LOCKED_SHIFT 1 /**< Shift value for MSC_LOCKED */
#define _MSC_STATUS_LOCKED_MASK 0x2UL /**< Bit mask for MSC_LOCKED */
#define _MSC_STATUS_LOCKED_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_LOCKED_DEFAULT (_MSC_STATUS_LOCKED_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_INVADDR (0x1UL << 2) /**< Invalid Write Address or Erase Page */
#define _MSC_STATUS_INVADDR_SHIFT 2 /**< Shift value for MSC_INVADDR */
#define _MSC_STATUS_INVADDR_MASK 0x4UL /**< Bit mask for MSC_INVADDR */
#define _MSC_STATUS_INVADDR_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_INVADDR_DEFAULT (_MSC_STATUS_INVADDR_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_WDATAREADY (0x1UL << 3) /**< WDATA Write Ready */
#define _MSC_STATUS_WDATAREADY_SHIFT 3 /**< Shift value for MSC_WDATAREADY */
#define _MSC_STATUS_WDATAREADY_MASK 0x8UL /**< Bit mask for MSC_WDATAREADY */
#define _MSC_STATUS_WDATAREADY_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_WDATAREADY_DEFAULT (_MSC_STATUS_WDATAREADY_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_WORDTIMEOUT (0x1UL << 4) /**< Flash Write Word Timeout */
#define _MSC_STATUS_WORDTIMEOUT_SHIFT 4 /**< Shift value for MSC_WORDTIMEOUT */
#define _MSC_STATUS_WORDTIMEOUT_MASK 0x10UL /**< Bit mask for MSC_WORDTIMEOUT */
#define _MSC_STATUS_WORDTIMEOUT_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_WORDTIMEOUT_DEFAULT (_MSC_STATUS_WORDTIMEOUT_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_ERASEABORTED (0x1UL << 5) /**< The Current Flash Erase Operation Aborted */
#define _MSC_STATUS_ERASEABORTED_SHIFT 5 /**< Shift value for MSC_ERASEABORTED */
#define _MSC_STATUS_ERASEABORTED_MASK 0x20UL /**< Bit mask for MSC_ERASEABORTED */
#define _MSC_STATUS_ERASEABORTED_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_ERASEABORTED_DEFAULT (_MSC_STATUS_ERASEABORTED_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_PCRUNNING (0x1UL << 6) /**< Performance Counters Running */
#define _MSC_STATUS_PCRUNNING_SHIFT 6 /**< Shift value for MSC_PCRUNNING */
#define _MSC_STATUS_PCRUNNING_MASK 0x40UL /**< Bit mask for MSC_PCRUNNING */
#define _MSC_STATUS_PCRUNNING_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STATUS */
#define MSC_STATUS_PCRUNNING_DEFAULT (_MSC_STATUS_PCRUNNING_DEFAULT << 6) /**< Shifted mode DEFAULT for MSC_STATUS */
/* Bit fields for MSC IF */
#define _MSC_IF_RESETVALUE 0x00000000UL /**< Default value for MSC_IF */
#define _MSC_IF_MASK 0x0000003FUL /**< Mask for MSC_IF */
#define MSC_IF_ERASE (0x1UL << 0) /**< Erase Done Interrupt Read Flag */
#define _MSC_IF_ERASE_SHIFT 0 /**< Shift value for MSC_ERASE */
#define _MSC_IF_ERASE_MASK 0x1UL /**< Bit mask for MSC_ERASE */
#define _MSC_IF_ERASE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IF */
#define MSC_IF_ERASE_DEFAULT (_MSC_IF_ERASE_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_IF */
#define MSC_IF_WRITE (0x1UL << 1) /**< Write Done Interrupt Read Flag */
#define _MSC_IF_WRITE_SHIFT 1 /**< Shift value for MSC_WRITE */
#define _MSC_IF_WRITE_MASK 0x2UL /**< Bit mask for MSC_WRITE */
#define _MSC_IF_WRITE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IF */
#define MSC_IF_WRITE_DEFAULT (_MSC_IF_WRITE_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_IF */
#define MSC_IF_CHOF (0x1UL << 2) /**< Cache Hits Overflow Interrupt Flag */
#define _MSC_IF_CHOF_SHIFT 2 /**< Shift value for MSC_CHOF */
#define _MSC_IF_CHOF_MASK 0x4UL /**< Bit mask for MSC_CHOF */
#define _MSC_IF_CHOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IF */
#define MSC_IF_CHOF_DEFAULT (_MSC_IF_CHOF_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_IF */
#define MSC_IF_CMOF (0x1UL << 3) /**< Cache Misses Overflow Interrupt Flag */
#define _MSC_IF_CMOF_SHIFT 3 /**< Shift value for MSC_CMOF */
#define _MSC_IF_CMOF_MASK 0x8UL /**< Bit mask for MSC_CMOF */
#define _MSC_IF_CMOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IF */
#define MSC_IF_CMOF_DEFAULT (_MSC_IF_CMOF_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_IF */
#define MSC_IF_PWRUPF (0x1UL << 4) /**< Flash Power Up Sequence Complete Flag */
#define _MSC_IF_PWRUPF_SHIFT 4 /**< Shift value for MSC_PWRUPF */
#define _MSC_IF_PWRUPF_MASK 0x10UL /**< Bit mask for MSC_PWRUPF */
#define _MSC_IF_PWRUPF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IF */
#define MSC_IF_PWRUPF_DEFAULT (_MSC_IF_PWRUPF_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_IF */
#define MSC_IF_ICACHERR (0x1UL << 5) /**< ICache RAM Parity Error Flag */
#define _MSC_IF_ICACHERR_SHIFT 5 /**< Shift value for MSC_ICACHERR */
#define _MSC_IF_ICACHERR_MASK 0x20UL /**< Bit mask for MSC_ICACHERR */
#define _MSC_IF_ICACHERR_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IF */
#define MSC_IF_ICACHERR_DEFAULT (_MSC_IF_ICACHERR_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_IF */
/* Bit fields for MSC IFS */
#define _MSC_IFS_RESETVALUE 0x00000000UL /**< Default value for MSC_IFS */
#define _MSC_IFS_MASK 0x0000003FUL /**< Mask for MSC_IFS */
#define MSC_IFS_ERASE (0x1UL << 0) /**< Set ERASE Interrupt Flag */
#define _MSC_IFS_ERASE_SHIFT 0 /**< Shift value for MSC_ERASE */
#define _MSC_IFS_ERASE_MASK 0x1UL /**< Bit mask for MSC_ERASE */
#define _MSC_IFS_ERASE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFS */
#define MSC_IFS_ERASE_DEFAULT (_MSC_IFS_ERASE_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_IFS */
#define MSC_IFS_WRITE (0x1UL << 1) /**< Set WRITE Interrupt Flag */
#define _MSC_IFS_WRITE_SHIFT 1 /**< Shift value for MSC_WRITE */
#define _MSC_IFS_WRITE_MASK 0x2UL /**< Bit mask for MSC_WRITE */
#define _MSC_IFS_WRITE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFS */
#define MSC_IFS_WRITE_DEFAULT (_MSC_IFS_WRITE_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_IFS */
#define MSC_IFS_CHOF (0x1UL << 2) /**< Set CHOF Interrupt Flag */
#define _MSC_IFS_CHOF_SHIFT 2 /**< Shift value for MSC_CHOF */
#define _MSC_IFS_CHOF_MASK 0x4UL /**< Bit mask for MSC_CHOF */
#define _MSC_IFS_CHOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFS */
#define MSC_IFS_CHOF_DEFAULT (_MSC_IFS_CHOF_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_IFS */
#define MSC_IFS_CMOF (0x1UL << 3) /**< Set CMOF Interrupt Flag */
#define _MSC_IFS_CMOF_SHIFT 3 /**< Shift value for MSC_CMOF */
#define _MSC_IFS_CMOF_MASK 0x8UL /**< Bit mask for MSC_CMOF */
#define _MSC_IFS_CMOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFS */
#define MSC_IFS_CMOF_DEFAULT (_MSC_IFS_CMOF_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_IFS */
#define MSC_IFS_PWRUPF (0x1UL << 4) /**< Set PWRUPF Interrupt Flag */
#define _MSC_IFS_PWRUPF_SHIFT 4 /**< Shift value for MSC_PWRUPF */
#define _MSC_IFS_PWRUPF_MASK 0x10UL /**< Bit mask for MSC_PWRUPF */
#define _MSC_IFS_PWRUPF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFS */
#define MSC_IFS_PWRUPF_DEFAULT (_MSC_IFS_PWRUPF_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_IFS */
#define MSC_IFS_ICACHERR (0x1UL << 5) /**< Set ICACHERR Interrupt Flag */
#define _MSC_IFS_ICACHERR_SHIFT 5 /**< Shift value for MSC_ICACHERR */
#define _MSC_IFS_ICACHERR_MASK 0x20UL /**< Bit mask for MSC_ICACHERR */
#define _MSC_IFS_ICACHERR_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFS */
#define MSC_IFS_ICACHERR_DEFAULT (_MSC_IFS_ICACHERR_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_IFS */
/* Bit fields for MSC IFC */
#define _MSC_IFC_RESETVALUE 0x00000000UL /**< Default value for MSC_IFC */
#define _MSC_IFC_MASK 0x0000003FUL /**< Mask for MSC_IFC */
#define MSC_IFC_ERASE (0x1UL << 0) /**< Clear ERASE Interrupt Flag */
#define _MSC_IFC_ERASE_SHIFT 0 /**< Shift value for MSC_ERASE */
#define _MSC_IFC_ERASE_MASK 0x1UL /**< Bit mask for MSC_ERASE */
#define _MSC_IFC_ERASE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFC */
#define MSC_IFC_ERASE_DEFAULT (_MSC_IFC_ERASE_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_IFC */
#define MSC_IFC_WRITE (0x1UL << 1) /**< Clear WRITE Interrupt Flag */
#define _MSC_IFC_WRITE_SHIFT 1 /**< Shift value for MSC_WRITE */
#define _MSC_IFC_WRITE_MASK 0x2UL /**< Bit mask for MSC_WRITE */
#define _MSC_IFC_WRITE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFC */
#define MSC_IFC_WRITE_DEFAULT (_MSC_IFC_WRITE_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_IFC */
#define MSC_IFC_CHOF (0x1UL << 2) /**< Clear CHOF Interrupt Flag */
#define _MSC_IFC_CHOF_SHIFT 2 /**< Shift value for MSC_CHOF */
#define _MSC_IFC_CHOF_MASK 0x4UL /**< Bit mask for MSC_CHOF */
#define _MSC_IFC_CHOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFC */
#define MSC_IFC_CHOF_DEFAULT (_MSC_IFC_CHOF_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_IFC */
#define MSC_IFC_CMOF (0x1UL << 3) /**< Clear CMOF Interrupt Flag */
#define _MSC_IFC_CMOF_SHIFT 3 /**< Shift value for MSC_CMOF */
#define _MSC_IFC_CMOF_MASK 0x8UL /**< Bit mask for MSC_CMOF */
#define _MSC_IFC_CMOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFC */
#define MSC_IFC_CMOF_DEFAULT (_MSC_IFC_CMOF_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_IFC */
#define MSC_IFC_PWRUPF (0x1UL << 4) /**< Clear PWRUPF Interrupt Flag */
#define _MSC_IFC_PWRUPF_SHIFT 4 /**< Shift value for MSC_PWRUPF */
#define _MSC_IFC_PWRUPF_MASK 0x10UL /**< Bit mask for MSC_PWRUPF */
#define _MSC_IFC_PWRUPF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFC */
#define MSC_IFC_PWRUPF_DEFAULT (_MSC_IFC_PWRUPF_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_IFC */
#define MSC_IFC_ICACHERR (0x1UL << 5) /**< Clear ICACHERR Interrupt Flag */
#define _MSC_IFC_ICACHERR_SHIFT 5 /**< Shift value for MSC_ICACHERR */
#define _MSC_IFC_ICACHERR_MASK 0x20UL /**< Bit mask for MSC_ICACHERR */
#define _MSC_IFC_ICACHERR_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IFC */
#define MSC_IFC_ICACHERR_DEFAULT (_MSC_IFC_ICACHERR_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_IFC */
/* Bit fields for MSC IEN */
#define _MSC_IEN_RESETVALUE 0x00000000UL /**< Default value for MSC_IEN */
#define _MSC_IEN_MASK 0x0000003FUL /**< Mask for MSC_IEN */
#define MSC_IEN_ERASE (0x1UL << 0) /**< ERASE Interrupt Enable */
#define _MSC_IEN_ERASE_SHIFT 0 /**< Shift value for MSC_ERASE */
#define _MSC_IEN_ERASE_MASK 0x1UL /**< Bit mask for MSC_ERASE */
#define _MSC_IEN_ERASE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IEN */
#define MSC_IEN_ERASE_DEFAULT (_MSC_IEN_ERASE_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_IEN */
#define MSC_IEN_WRITE (0x1UL << 1) /**< WRITE Interrupt Enable */
#define _MSC_IEN_WRITE_SHIFT 1 /**< Shift value for MSC_WRITE */
#define _MSC_IEN_WRITE_MASK 0x2UL /**< Bit mask for MSC_WRITE */
#define _MSC_IEN_WRITE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IEN */
#define MSC_IEN_WRITE_DEFAULT (_MSC_IEN_WRITE_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_IEN */
#define MSC_IEN_CHOF (0x1UL << 2) /**< CHOF Interrupt Enable */
#define _MSC_IEN_CHOF_SHIFT 2 /**< Shift value for MSC_CHOF */
#define _MSC_IEN_CHOF_MASK 0x4UL /**< Bit mask for MSC_CHOF */
#define _MSC_IEN_CHOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IEN */
#define MSC_IEN_CHOF_DEFAULT (_MSC_IEN_CHOF_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_IEN */
#define MSC_IEN_CMOF (0x1UL << 3) /**< CMOF Interrupt Enable */
#define _MSC_IEN_CMOF_SHIFT 3 /**< Shift value for MSC_CMOF */
#define _MSC_IEN_CMOF_MASK 0x8UL /**< Bit mask for MSC_CMOF */
#define _MSC_IEN_CMOF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IEN */
#define MSC_IEN_CMOF_DEFAULT (_MSC_IEN_CMOF_DEFAULT << 3) /**< Shifted mode DEFAULT for MSC_IEN */
#define MSC_IEN_PWRUPF (0x1UL << 4) /**< PWRUPF Interrupt Enable */
#define _MSC_IEN_PWRUPF_SHIFT 4 /**< Shift value for MSC_PWRUPF */
#define _MSC_IEN_PWRUPF_MASK 0x10UL /**< Bit mask for MSC_PWRUPF */
#define _MSC_IEN_PWRUPF_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IEN */
#define MSC_IEN_PWRUPF_DEFAULT (_MSC_IEN_PWRUPF_DEFAULT << 4) /**< Shifted mode DEFAULT for MSC_IEN */
#define MSC_IEN_ICACHERR (0x1UL << 5) /**< ICACHERR Interrupt Enable */
#define _MSC_IEN_ICACHERR_SHIFT 5 /**< Shift value for MSC_ICACHERR */
#define _MSC_IEN_ICACHERR_MASK 0x20UL /**< Bit mask for MSC_ICACHERR */
#define _MSC_IEN_ICACHERR_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_IEN */
#define MSC_IEN_ICACHERR_DEFAULT (_MSC_IEN_ICACHERR_DEFAULT << 5) /**< Shifted mode DEFAULT for MSC_IEN */
/* Bit fields for MSC LOCK */
#define _MSC_LOCK_RESETVALUE 0x00000000UL /**< Default value for MSC_LOCK */
#define _MSC_LOCK_MASK 0x0000FFFFUL /**< Mask for MSC_LOCK */
#define _MSC_LOCK_LOCKKEY_SHIFT 0 /**< Shift value for MSC_LOCKKEY */
#define _MSC_LOCK_LOCKKEY_MASK 0xFFFFUL /**< Bit mask for MSC_LOCKKEY */
#define _MSC_LOCK_LOCKKEY_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_LOCK */
#define _MSC_LOCK_LOCKKEY_LOCK 0x00000000UL /**< Mode LOCK for MSC_LOCK */
#define _MSC_LOCK_LOCKKEY_UNLOCKED 0x00000000UL /**< Mode UNLOCKED for MSC_LOCK */
#define _MSC_LOCK_LOCKKEY_LOCKED 0x00000001UL /**< Mode LOCKED for MSC_LOCK */
#define _MSC_LOCK_LOCKKEY_UNLOCK 0x00001B71UL /**< Mode UNLOCK for MSC_LOCK */
#define MSC_LOCK_LOCKKEY_DEFAULT (_MSC_LOCK_LOCKKEY_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_LOCK */
#define MSC_LOCK_LOCKKEY_LOCK (_MSC_LOCK_LOCKKEY_LOCK << 0) /**< Shifted mode LOCK for MSC_LOCK */
#define MSC_LOCK_LOCKKEY_UNLOCKED (_MSC_LOCK_LOCKKEY_UNLOCKED << 0) /**< Shifted mode UNLOCKED for MSC_LOCK */
#define MSC_LOCK_LOCKKEY_LOCKED (_MSC_LOCK_LOCKKEY_LOCKED << 0) /**< Shifted mode LOCKED for MSC_LOCK */
#define MSC_LOCK_LOCKKEY_UNLOCK (_MSC_LOCK_LOCKKEY_UNLOCK << 0) /**< Shifted mode UNLOCK for MSC_LOCK */
/* Bit fields for MSC CACHECMD */
#define _MSC_CACHECMD_RESETVALUE 0x00000000UL /**< Default value for MSC_CACHECMD */
#define _MSC_CACHECMD_MASK 0x00000007UL /**< Mask for MSC_CACHECMD */
#define MSC_CACHECMD_INVCACHE (0x1UL << 0) /**< Invalidate Instruction Cache */
#define _MSC_CACHECMD_INVCACHE_SHIFT 0 /**< Shift value for MSC_INVCACHE */
#define _MSC_CACHECMD_INVCACHE_MASK 0x1UL /**< Bit mask for MSC_INVCACHE */
#define _MSC_CACHECMD_INVCACHE_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CACHECMD */
#define MSC_CACHECMD_INVCACHE_DEFAULT (_MSC_CACHECMD_INVCACHE_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_CACHECMD */
#define MSC_CACHECMD_STARTPC (0x1UL << 1) /**< Start Performance Counters */
#define _MSC_CACHECMD_STARTPC_SHIFT 1 /**< Shift value for MSC_STARTPC */
#define _MSC_CACHECMD_STARTPC_MASK 0x2UL /**< Bit mask for MSC_STARTPC */
#define _MSC_CACHECMD_STARTPC_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CACHECMD */
#define MSC_CACHECMD_STARTPC_DEFAULT (_MSC_CACHECMD_STARTPC_DEFAULT << 1) /**< Shifted mode DEFAULT for MSC_CACHECMD */
#define MSC_CACHECMD_STOPPC (0x1UL << 2) /**< Stop Performance Counters */
#define _MSC_CACHECMD_STOPPC_SHIFT 2 /**< Shift value for MSC_STOPPC */
#define _MSC_CACHECMD_STOPPC_MASK 0x4UL /**< Bit mask for MSC_STOPPC */
#define _MSC_CACHECMD_STOPPC_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CACHECMD */
#define MSC_CACHECMD_STOPPC_DEFAULT (_MSC_CACHECMD_STOPPC_DEFAULT << 2) /**< Shifted mode DEFAULT for MSC_CACHECMD */
/* Bit fields for MSC CACHEHITS */
#define _MSC_CACHEHITS_RESETVALUE 0x00000000UL /**< Default value for MSC_CACHEHITS */
#define _MSC_CACHEHITS_MASK 0x000FFFFFUL /**< Mask for MSC_CACHEHITS */
#define _MSC_CACHEHITS_CACHEHITS_SHIFT 0 /**< Shift value for MSC_CACHEHITS */
#define _MSC_CACHEHITS_CACHEHITS_MASK 0xFFFFFUL /**< Bit mask for MSC_CACHEHITS */
#define _MSC_CACHEHITS_CACHEHITS_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CACHEHITS */
#define MSC_CACHEHITS_CACHEHITS_DEFAULT (_MSC_CACHEHITS_CACHEHITS_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_CACHEHITS */
/* Bit fields for MSC CACHEMISSES */
#define _MSC_CACHEMISSES_RESETVALUE 0x00000000UL /**< Default value for MSC_CACHEMISSES */
#define _MSC_CACHEMISSES_MASK 0x000FFFFFUL /**< Mask for MSC_CACHEMISSES */
#define _MSC_CACHEMISSES_CACHEMISSES_SHIFT 0 /**< Shift value for MSC_CACHEMISSES */
#define _MSC_CACHEMISSES_CACHEMISSES_MASK 0xFFFFFUL /**< Bit mask for MSC_CACHEMISSES */
#define _MSC_CACHEMISSES_CACHEMISSES_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CACHEMISSES */
#define MSC_CACHEMISSES_CACHEMISSES_DEFAULT (_MSC_CACHEMISSES_CACHEMISSES_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_CACHEMISSES */
/* Bit fields for MSC MASSLOCK */
#define _MSC_MASSLOCK_RESETVALUE 0x00000001UL /**< Default value for MSC_MASSLOCK */
#define _MSC_MASSLOCK_MASK 0x0000FFFFUL /**< Mask for MSC_MASSLOCK */
#define _MSC_MASSLOCK_LOCKKEY_SHIFT 0 /**< Shift value for MSC_LOCKKEY */
#define _MSC_MASSLOCK_LOCKKEY_MASK 0xFFFFUL /**< Bit mask for MSC_LOCKKEY */
#define _MSC_MASSLOCK_LOCKKEY_LOCK 0x00000000UL /**< Mode LOCK for MSC_MASSLOCK */
#define _MSC_MASSLOCK_LOCKKEY_UNLOCKED 0x00000000UL /**< Mode UNLOCKED for MSC_MASSLOCK */
#define _MSC_MASSLOCK_LOCKKEY_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_MASSLOCK */
#define _MSC_MASSLOCK_LOCKKEY_LOCKED 0x00000001UL /**< Mode LOCKED for MSC_MASSLOCK */
#define _MSC_MASSLOCK_LOCKKEY_UNLOCK 0x0000631AUL /**< Mode UNLOCK for MSC_MASSLOCK */
#define MSC_MASSLOCK_LOCKKEY_LOCK (_MSC_MASSLOCK_LOCKKEY_LOCK << 0) /**< Shifted mode LOCK for MSC_MASSLOCK */
#define MSC_MASSLOCK_LOCKKEY_UNLOCKED (_MSC_MASSLOCK_LOCKKEY_UNLOCKED << 0) /**< Shifted mode UNLOCKED for MSC_MASSLOCK */
#define MSC_MASSLOCK_LOCKKEY_DEFAULT (_MSC_MASSLOCK_LOCKKEY_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_MASSLOCK */
#define MSC_MASSLOCK_LOCKKEY_LOCKED (_MSC_MASSLOCK_LOCKKEY_LOCKED << 0) /**< Shifted mode LOCKED for MSC_MASSLOCK */
#define MSC_MASSLOCK_LOCKKEY_UNLOCK (_MSC_MASSLOCK_LOCKKEY_UNLOCK << 0) /**< Shifted mode UNLOCK for MSC_MASSLOCK */
/* Bit fields for MSC STARTUP */
#define _MSC_STARTUP_RESETVALUE 0x1300104DUL /**< Default value for MSC_STARTUP */
#define _MSC_STARTUP_MASK 0x773FF3FFUL /**< Mask for MSC_STARTUP */
#define _MSC_STARTUP_STDLY0_SHIFT 0 /**< Shift value for MSC_STDLY0 */
#define _MSC_STARTUP_STDLY0_MASK 0x3FFUL /**< Bit mask for MSC_STDLY0 */
#define _MSC_STARTUP_STDLY0_DEFAULT 0x0000004DUL /**< Mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STDLY0_DEFAULT (_MSC_STARTUP_STDLY0_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_STARTUP */
#define _MSC_STARTUP_STDLY1_SHIFT 12 /**< Shift value for MSC_STDLY1 */
#define _MSC_STARTUP_STDLY1_MASK 0x3FF000UL /**< Bit mask for MSC_STDLY1 */
#define _MSC_STARTUP_STDLY1_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STDLY1_DEFAULT (_MSC_STARTUP_STDLY1_DEFAULT << 12) /**< Shifted mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_ASTWAIT (0x1UL << 24) /**< Active Startup Wait */
#define _MSC_STARTUP_ASTWAIT_SHIFT 24 /**< Shift value for MSC_ASTWAIT */
#define _MSC_STARTUP_ASTWAIT_MASK 0x1000000UL /**< Bit mask for MSC_ASTWAIT */
#define _MSC_STARTUP_ASTWAIT_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_ASTWAIT_DEFAULT (_MSC_STARTUP_ASTWAIT_DEFAULT << 24) /**< Shifted mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STWSEN (0x1UL << 25) /**< Startup Waitstates Enable */
#define _MSC_STARTUP_STWSEN_SHIFT 25 /**< Shift value for MSC_STWSEN */
#define _MSC_STARTUP_STWSEN_MASK 0x2000000UL /**< Bit mask for MSC_STWSEN */
#define _MSC_STARTUP_STWSEN_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STWSEN_DEFAULT (_MSC_STARTUP_STWSEN_DEFAULT << 25) /**< Shifted mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STWSAEN (0x1UL << 26) /**< Startup Waitstates Always Enable */
#define _MSC_STARTUP_STWSAEN_SHIFT 26 /**< Shift value for MSC_STWSAEN */
#define _MSC_STARTUP_STWSAEN_MASK 0x4000000UL /**< Bit mask for MSC_STWSAEN */
#define _MSC_STARTUP_STWSAEN_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STWSAEN_DEFAULT (_MSC_STARTUP_STWSAEN_DEFAULT << 26) /**< Shifted mode DEFAULT for MSC_STARTUP */
#define _MSC_STARTUP_STWS_SHIFT 28 /**< Shift value for MSC_STWS */
#define _MSC_STARTUP_STWS_MASK 0x70000000UL /**< Bit mask for MSC_STWS */
#define _MSC_STARTUP_STWS_DEFAULT 0x00000001UL /**< Mode DEFAULT for MSC_STARTUP */
#define MSC_STARTUP_STWS_DEFAULT (_MSC_STARTUP_STWS_DEFAULT << 28) /**< Shifted mode DEFAULT for MSC_STARTUP */
/* Bit fields for MSC CMD */
#define _MSC_CMD_RESETVALUE 0x00000000UL /**< Default value for MSC_CMD */
#define _MSC_CMD_MASK 0x00000001UL /**< Mask for MSC_CMD */
#define MSC_CMD_PWRUP (0x1UL << 0) /**< Flash Power Up Command */
#define _MSC_CMD_PWRUP_SHIFT 0 /**< Shift value for MSC_PWRUP */
#define _MSC_CMD_PWRUP_MASK 0x1UL /**< Bit mask for MSC_PWRUP */
#define _MSC_CMD_PWRUP_DEFAULT 0x00000000UL /**< Mode DEFAULT for MSC_CMD */
#define MSC_CMD_PWRUP_DEFAULT (_MSC_CMD_PWRUP_DEFAULT << 0) /**< Shifted mode DEFAULT for MSC_CMD */
/** @} */
/** @} End of group EFM32PG1B_MSC */
/** @} End of group Parts */
#ifdef __cplusplus
}
#endif
| lgpl-2.1 |
lazytech-org/RIOT | tests/pkg_cn-cbor/main.c | 5954 | /*
* Copyright (C) Lorenz Hüther, Mathias Detmers
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup tests
* @{
*
* @file
* @brief Unit tests for pkg cn-cbor.
*
* @author Lorenz Hüther <[email protected]>
* @author Mathias Detmers <[email protected]
*/
#define EBUF_SIZE 32
#define NUM_BLOCKS 7
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "assert.h"
#include "cn-cbor/cn-cbor.h"
#include "embUnit.h"
#include "fmt.h"
#include "memarray.h"
typedef struct {
char *hex;
cn_cbor_error err;
} cbor_failure;
static size_t test, offs;
static unsigned char ebuf[EBUF_SIZE];
static cn_cbor_errback errb;
/* Block allocator */
static cn_cbor block_storage_data[NUM_BLOCKS];
static memarray_t storage;
/* calloc/free functions */
static void *cbor_calloc(size_t count, size_t size, void *memblock);
static void cbor_free(void *ptr, void *memblock);
/* CN_CBOR block allocator context struct*/
static cn_cbor_context ct =
{
.calloc_func = cbor_calloc,
.free_func = cbor_free,
.context = &storage,
};
static void *cbor_calloc(size_t count, size_t size, void *memblock)
{
(void)count;
assert(count == 1); /* Count is always 1 with cn-cbor */
void *block = memarray_alloc(memblock);
if (block) {
memset(block, 0, size);
}
return block;
}
static void cbor_free(void *ptr, void *memblock)
{
memarray_free(memblock, ptr);
}
static void setup_cn_cbor(void)
{
test = 0;
offs = 0;
memset(ebuf, '\0', EBUF_SIZE);
memarray_init(&storage, block_storage_data, sizeof(cn_cbor), NUM_BLOCKS);
}
static void test_parse(void)
{
char *tests[] = {
"00", // 0
"01", // 1
"17", // 23
"1818", // 24
"190100", // 256
"1a00010000", // 65536
#ifndef CBOR_NO_LL
"1b0000000100000000", // 4294967296
#endif /* CBOR_NO_LL */
"20", // -1
"37", // -24
"3818", // -25
"390100", // -257
"3a00010000", // -65537
#ifndef CBOR_NO_LL
"3b0000000100000000", // -4294967297
#endif /* CBOR_LL */
"4161", // h"a"
"6161", // "a"
"80", // []
"8100", // [0]
"820102", // [1,2]
"818100", // [[0]]
"a1616100", // {"a":0}
"d8184100", // tag
"f4", // false
"f5", // true
"f6", // null
"f7", // undefined
"f8ff", // simple(255)
#ifndef CBOR_NO_FLOAT
"f93c00", // 1.0
"f9bc00", // -1.0
"f903ff", // 6.097555160522461e-05
"f90400", // 6.103515625e-05
"f907ff", // 0.00012201070785522461
"f90800", // 0.0001220703125
"fa47800000", // 65536.0
"fb3ff199999999999a", // 1.1
"f97e00", // NaN
#endif /* CBOR_NO_FLOAT */
"5f42010243030405ff", // (_ h'0102', h'030405')
"7f61616161ff", // (_ "a", "a")
"9fff", // [_ ]
"9f9f9fffffff", // [_ [_ [_ ]]]
"9f009f00ff00ff", // [_ 0, [_ 0], 0]
"bf61610161629f0203ffff", // {_ "a": 1, "b": [_ 2, 3]}
};
for (test = 0; test < sizeof(tests) / sizeof(char*); test++) {
unsigned char buf[64] = {0};
TEST_ASSERT((strlen(tests[test])/2) <= sizeof(buf));
size_t len = fmt_hex_bytes(buf, tests[test]);
TEST_ASSERT(len);
errb.err = CN_CBOR_NO_ERROR;
cn_cbor *cbor = cn_cbor_decode(buf, len, &ct, &errb);
TEST_ASSERT_EQUAL_INT(errb.err, CN_CBOR_NO_ERROR);
TEST_ASSERT_NOT_NULL(cbor);
cn_cbor_encoder_write(ebuf, 0, sizeof(ebuf), cbor);
for (offs = 0; offs < len; offs++) {
TEST_ASSERT_EQUAL_INT(buf[offs], ebuf[offs]);
}
cn_cbor_free(cbor, &ct);
}
}
static void test_errors(void)
{
cbor_failure tests[] = {
{"81", CN_CBOR_ERR_OUT_OF_DATA},
{"0000", CN_CBOR_ERR_NOT_ALL_DATA_CONSUMED},
{"bf00ff", CN_CBOR_ERR_ODD_SIZE_INDEF_MAP},
{"ff", CN_CBOR_ERR_BREAK_OUTSIDE_INDEF},
{"1f", CN_CBOR_ERR_MT_UNDEF_FOR_INDEF},
{"1c", CN_CBOR_ERR_RESERVED_AI},
{"7f4100", CN_CBOR_ERR_WRONG_NESTING_IN_INDEF_STRING},
};
cn_cbor inv = {CN_CBOR_INVALID, 0, {0}, 0, NULL, NULL, NULL, NULL};
TEST_ASSERT_EQUAL_INT(-1, cn_cbor_encoder_write(ebuf, 0, sizeof(ebuf),
&inv));
for (offs = 0; offs < sizeof(tests) / sizeof(cbor_failure); offs++) {
unsigned char buf[32] = {0};
TEST_ASSERT((strlen(tests[offs].hex)/2) <= sizeof(buf));
size_t len = fmt_hex_bytes(buf, tests[offs].hex);
TEST_ASSERT(len);
cn_cbor *cbor = cn_cbor_decode(buf, len, &ct, &errb);
TEST_ASSERT_NULL(cbor);
TEST_ASSERT_EQUAL_INT(errb.err, tests[offs].err);
cn_cbor_free(cbor, &ct);
}
}
TestRef test_cn_cbor(void)
{
EMB_UNIT_TESTFIXTURES(fixtures) {
new_TestFixture(test_parse),
new_TestFixture(test_errors)
};
EMB_UNIT_TESTCALLER(tests_cn_cbor, setup_cn_cbor, NULL, fixtures);
return (TestRef) & tests_cn_cbor;
}
int main(void)
{
TESTS_START();
TESTS_RUN(test_cn_cbor());
TESTS_END();
}
| lgpl-2.1 |
shabanovd/exist | extensions/svn/src/org/exist/versioning/svn/xquery/SVNCleanup.java | 3112 | /*
* eXist Open Source Native XML Database
* Copyright (C) 2010 The eXist Project
* http://exist-db.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* $Id$
*/
package org.exist.versioning.svn.xquery;
import org.exist.dom.QName;
import org.exist.util.io.Resource;
import org.exist.versioning.svn.internal.wc.DefaultSVNOptions;
import org.exist.versioning.svn.wc.SVNClientManager;
import org.exist.versioning.svn.wc.SVNWCUtil;
import org.exist.xquery.Cardinality;
import org.exist.xquery.FunctionSignature;
import org.exist.xquery.XPathException;
import org.exist.xquery.XQueryContext;
import org.exist.xquery.value.FunctionReturnSequenceType;
import org.exist.xquery.value.Sequence;
import org.exist.xquery.value.SequenceType;
import org.exist.xquery.value.Type;
import org.tmatesoft.svn.core.SVNException;
/**
* Recursively cleans up the working copy, removing locks and resuming unfinished operations.
*
* @author <a href="mailto:[email protected]">Amir Akhmedov</a>
* @author <a href="mailto:[email protected]">Dmitriy Shabanov</a>
*/
public class SVNCleanup extends AbstractSVNFunction {
public final static FunctionSignature signature =
new FunctionSignature(
new QName("clean-up", SVNModule.NAMESPACE_URI, SVNModule.PREFIX),
"Recursively cleans up the working copy, removing locks and resuming unfinished operations.",
new SequenceType[] {
DB_PATH
},
new FunctionReturnSequenceType(Type.EMPTY, Cardinality.ZERO, ""));
/**
*
* @param context
*/
public SVNCleanup(XQueryContext context) {
super(context, signature);
}
/**
* Process the function. All arguments are passed in the array args. The number of
* arguments, their type and cardinality have already been checked to match
* the function signature.
*
* @param args
* @param contextSequence
*/
public Sequence eval(Sequence[] args, Sequence contextSequence) throws XPathException {
String uri = args[0].getStringValue();
DefaultSVNOptions options = SVNWCUtil.createDefaultOptions(true);
SVNClientManager manager = SVNClientManager.newInstance(options, "", "");
try {
manager.getWCClient().doCleanup(new Resource(uri));
} catch (SVNException e) {
throw new XPathException(this, e.getMessage(), e);
}
return Sequence.EMPTY_SEQUENCE;
}
}
| lgpl-2.1 |
bob-the-hamster/commandergenius | project/jni/ffmpeg/libavcodec/x86/rv34dsp_init.c | 1800 | /*
* RV30/40 MMX/SSE2 optimizations
* Copyright (C) 2012 Christophe Gisquet <[email protected]>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/dsputil.h"
#include "libavcodec/rv34dsp.h"
void ff_rv34_idct_dc_mmxext(DCTELEM *block);
void ff_rv34_idct_dc_noround_mmxext(DCTELEM *block);
void ff_rv34_idct_dc_add_mmx(uint8_t *dst, ptrdiff_t stride, int dc);
void ff_rv34_idct_dc_add_sse4(uint8_t *dst, ptrdiff_t stride, int dc);
void ff_rv34_idct_add_mmxext(uint8_t *dst, ptrdiff_t stride, DCTELEM *block);
av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp)
{
int mm_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(mm_flags))
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_mmx;
if (EXTERNAL_MMXEXT(mm_flags)) {
c->rv34_inv_transform_dc = ff_rv34_idct_dc_noround_mmxext;
c->rv34_idct_add = ff_rv34_idct_add_mmxext;
}
if (EXTERNAL_SSE4(mm_flags))
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_sse4;
}
| lgpl-2.1 |
shakirbsm/dealii | tests/manifold/spherical_manifold_04.cc | 2888 | // ---------------------------------------------------------------------
//
// Copyright (C) 2016 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE at
// the top level of the deal.II distribution.
//
// ---------------------------------------------------------------------
// test Volume of a Ball
#include "../tests.h"
#include <deal.II/base/logstream.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_tools.h>
#include <deal.II/fe/mapping_q.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/grid/grid_out.h>
#include <deal.II/grid/grid_in.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/manifold_lib.h>
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/fe_evaluation.h>
#include <iostream>
#include <fstream>
#include <sstream>
using namespace dealii;
void test (const double R)
{
const unsigned int dim = 3;
const unsigned int global_mesh_refinement_steps = 4;
const unsigned int fe_degree = 2;
const unsigned int n_q_points_1d = 3;
// derived
Point<dim> center;
for (unsigned int d=0; d < dim; d++)
center[d] = d;
Triangulation<dim> triangulation;
DoFHandler<dim> dof_handler(triangulation);
FE_Q<dim> fe(fe_degree);
QGauss<dim> quadrature_formula(n_q_points_1d);
GridGenerator::hyper_ball (triangulation,
center,
R);
triangulation.set_all_manifold_ids_on_boundary(0);
static SphericalManifold<dim> surface_description(center);
triangulation.set_manifold (0, surface_description);
triangulation.refine_global(global_mesh_refinement_steps);
dof_handler.distribute_dofs (fe);
MappingQ<dim> mapping(fe_degree);
FEValues<dim> fe_values (mapping, fe, quadrature_formula,
update_JxW_values);
DoFHandler<dim>::active_cell_iterator
cell = dof_handler.begin_active (),
endc = dof_handler.end ();
const unsigned int n_q_points = quadrature_formula.size();
double volume = 0.;
for (; cell!=endc; ++cell)
{
fe_values.reinit (cell);
for (unsigned int q=0; q<n_q_points; ++q)
volume += fe_values.JxW (q);
}
deallog << "Volume: " << volume << std::endl
<< "Exact volume: " << 4.0*numbers::PI *std::pow(R,3.0)/3. << std::endl;
dof_handler.clear ();
}
using namespace dealii;
int main (int argc, char *argv[])
{
initlog();
test(15);
return 0;
}
| lgpl-2.1 |
nhminus/jaudiotagger-androidpatch | srctest/org/jaudiotagger/issues/Issue383Test.java | 2217 | package org.jaudiotagger.issues;
import org.jaudiotagger.AbstractTestCase;
import org.jaudiotagger.audio.AudioFile;
import org.jaudiotagger.audio.AudioFileIO;
import org.jaudiotagger.tag.FieldKey;
import java.io.File;
/**
* Test deletions of ID3v1 tag
*/
public class Issue383Test extends AbstractTestCase
{
/**
* This song is incorrectly shown as 6:08 when should be 3:34 but all apps (Media Monkey, iTunes)
* also report incorrect length, however think problem is audio does continue until 6:08 but is just quiet sound
*
* @throws Exception
*/
public void testIssueIncorrectTrackLength() throws Exception
{
Exception caught = null;
try
{
File orig = new File("testdata", "test106.mp3");
if (!orig.isFile())
{
System.err.println("Unable to test file - not available");
return;
}
File testFile = AbstractTestCase.copyAudioToTmp("test106.mp3");
AudioFile af = AudioFileIO.read(testFile);
assertEquals(af.getAudioHeader().getTrackLength(),368);
}
catch(Exception e)
{
caught=e;
}
assertNull(caught);
}
/**
* This song is incorrectly shown as 01:12:52, but correct length was 2:24. Other applications
* such as Media Monkey show correct value.
*
* @throws Exception
*/
public void testIssue() throws Exception
{
Exception caught = null;
try
{
File orig = new File("testdata", "test107.mp3");
if (!orig.isFile())
{
System.err.println("Unable to test file - not available");
return;
}
File testFile = AbstractTestCase.copyAudioToTmp("test107.mp3");
AudioFile af = AudioFileIO.read(testFile);
assertEquals(af.getTag().getFirst(FieldKey.TRACK),"01");
assertEquals(af.getAudioHeader().getTrackLength(),4372);
}
catch(Exception e)
{
caught=e;
}
assertNull(caught);
}
} | lgpl-2.1 |
paulklinkenberg/Lucee4 | lucee-java/lucee-core/src/lucee/runtime/functions/query/QueryDeleteRow.java | 1691 | /**
*
* Copyright (c) 2014, the Railo Company Ltd. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
**/
package lucee.runtime.functions.query;
import lucee.runtime.PageContext;
import lucee.runtime.exp.PageException;
import lucee.runtime.functions.BIF;
import lucee.runtime.op.Caster;
import lucee.runtime.type.Query;
public final class QueryDeleteRow extends BIF {
private static final long serialVersionUID = 7610413135885802876L;
public static boolean call(PageContext pc, Query query) throws PageException {
return call(pc,query,query.getRowCount());
}
public static boolean call(PageContext pc, Query query, double row) throws PageException {
if(row==-9999) row=query.getRowCount();// used for named arguments
query.removeRow((int)row);
return true;
}
@Override
public Object invoke(PageContext pc, Object[] args) throws PageException {
if(args.length==1)return call(pc,Caster.toQuery(args[0]));
return call(pc,Caster.toQuery(args[0]),Caster.toDoubleValue(args[1]));
}
} | lgpl-2.1 |
ivassile/wildfly-core | server/src/main/java/org/jboss/as/server/deployment/DeploymentFullReplaceHandler.java | 12747 | /*
* JBoss, Home of Professional Open Source
* Copyright 2011 Red Hat Inc. and/or its affiliates and other contributors
* as indicated by the @authors tag. All rights reserved.
* See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License, v. 2.1.
* This program is distributed in the hope that it will be useful, but WITHOUT A
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public License,
* v.2.1 along with this distribution; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
package org.jboss.as.server.deployment;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.CONTENT;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.DEPLOYMENT;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.FULL_REPLACE_DEPLOYMENT;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.CONTENT_ARCHIVE;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.CONTENT_HASH;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.CONTENT_PATH;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.CONTENT_RELATIVE_TO;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.ENABLED;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.OWNER;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.PERSISTENT;
import static org.jboss.as.server.controller.resources.DeploymentAttributes.RUNTIME_NAME;
import static org.jboss.as.server.deployment.DeploymentHandlerUtils.addFlushHandler;
import static org.jboss.as.server.deployment.DeploymentHandlerUtils.asString;
import static org.jboss.as.server.deployment.DeploymentHandlerUtils.createFailureException;
import static org.jboss.as.server.deployment.DeploymentHandlerUtils.getInputStream;
import static org.jboss.as.server.deployment.DeploymentHandlerUtils.hasValidContentAdditionParameterDefined;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import org.jboss.as.controller.AttributeDefinition;
import org.jboss.as.controller.OperationContext;
import org.jboss.as.controller.OperationContext.ResultAction;
import org.jboss.as.controller.OperationFailedException;
import org.jboss.as.controller.OperationStepHandler;
import org.jboss.as.controller.PathAddress;
import org.jboss.as.controller.PathElement;
import org.jboss.as.controller.registry.Resource;
import org.jboss.as.protocol.StreamUtils;
import org.jboss.as.repository.ContentReference;
import org.jboss.as.repository.ContentRepository;
import org.jboss.as.server.controller.resources.DeploymentAttributes;
import org.jboss.as.server.logging.ServerLogger;
import org.jboss.dmr.ModelNode;
/**
* Handles replacement in the runtime of one deployment by another.
*
* @author Brian Stansberry (c) 2011 Red Hat Inc.
*/
public class DeploymentFullReplaceHandler implements OperationStepHandler {
public static final String OPERATION_NAME = FULL_REPLACE_DEPLOYMENT;
protected final ContentRepository contentRepository;
private final DeploymentTransformation deploymentTransformation;
protected DeploymentFullReplaceHandler(final ContentRepository contentRepository) {
assert contentRepository != null : "Null contentRepository";
this.contentRepository = contentRepository;
this.deploymentTransformation = new DeploymentTransformation();
}
public static DeploymentFullReplaceHandler create(final ContentRepository contentRepository) {
return new DeploymentFullReplaceHandler(contentRepository);
}
public void execute(OperationContext context, ModelNode operation) throws OperationFailedException {
// Validate op. Store any corrected values back to the op before manipulating further
ModelNode correctedOperation = operation.clone();
for (AttributeDefinition def : DeploymentAttributes.FULL_REPLACE_DEPLOYMENT_ATTRIBUTES.values()) {
def.validateAndSet(operation, correctedOperation);
}
// Pull data from the op
final String name = DeploymentAttributes.NAME.resolveModelAttribute(context, correctedOperation).asString();
final PathElement deploymentPath = PathElement.pathElement(DEPLOYMENT, name);
final String runtimeName = correctedOperation.hasDefined(RUNTIME_NAME.getName()) ? correctedOperation.get(RUNTIME_NAME.getName()).asString() : name;
// clone the content param, so we can modify it to our own content
ModelNode content = correctedOperation.require(CONTENT).clone();
// Throw a specific exception if the replaced deployment doesn't already exist
// BES 2013/10/30 -- this is pointless; the readResourceForUpdate call will throw
// an exception with an equally informative message if the deployment doesn't exist
// final Resource root = context.readResource(PathAddress.EMPTY_ADDRESS);
// boolean exists = root.hasChild(deploymentPath);
// if (!exists) {
// throw ServerLogger.ROOT_LOGGER.noSuchDeployment(name);
// }
// verify that the resource existance before removing it
context.readResourceForUpdate(PathAddress.pathAddress(deploymentPath));
// WFCORE-495 remove and call context.addResource() as below to add new resource with updated PERSISTENT value
final ModelNode deploymentModel = context.removeResource(PathAddress.pathAddress(deploymentPath)).getModel();
final ModelNode originalDeployment = deploymentModel.clone();
// Keep track of runtime name of deployment we are replacing for use in Stage.RUNTIME
final String replacedRuntimeName = RUNTIME_NAME.resolveModelAttribute(context, deploymentModel).asString();
final PathAddress address = PathAddress.pathAddress(deploymentPath);
// Keep track of hash we are replacing so we can drop it from the content repo if all is well
ModelNode replacedContent = deploymentModel.get(CONTENT).get(0);
final byte[] replacedHash = replacedContent.hasDefined(CONTENT_HASH.getName())
? CONTENT_HASH.resolveModelAttribute(context, replacedContent).asBytes() : null;
// Set up the new content attribute
final byte[] newHash;
// TODO: JBAS-9020: for the moment overlays are not supported, so there is a single content item
final DeploymentHandlerUtil.ContentItem contentItem;
ModelNode contentItemNode = content.require(0);
if (contentItemNode.hasDefined(CONTENT_HASH.getName())) {
newHash = CONTENT_HASH.resolveModelAttribute(context, contentItemNode).asBytes();
ContentReference reference = ModelContentReference.fromModelAddress(address, newHash);
contentItem = addFromHash(reference);
} else if (hasValidContentAdditionParameterDefined(contentItemNode)) {
contentItem = addFromContentAdditionParameter(context, contentItemNode, name);
newHash = contentItem.getHash();
// Replace the content data
contentItemNode = new ModelNode();
contentItemNode.get(CONTENT_HASH.getName()).set(newHash);
content.clear();
content.add(contentItemNode);
} else {
contentItem = addUnmanaged(context, contentItemNode);
newHash = null;
}
// deploymentModel.get(NAME).set(name); // already there
deploymentModel.get(RUNTIME_NAME.getName()).set(runtimeName);
deploymentModel.get(CONTENT).set(content);
// The 'persistent' and 'owner' parameters are hidden internal API, so handle them specifically
// Persistent is hidden from CLI users so let's set this to true here if it is not defined
if (!operation.hasDefined(PERSISTENT.getName())) {
operation.get(PERSISTENT.getName()).set(true);
}
PERSISTENT.validateAndSet(operation, deploymentModel);
OWNER.validateAndSet(operation, deploymentModel);
// ENABLED stays as is if not present in operation
boolean wasDeployed = ENABLED.resolveModelAttribute(context, deploymentModel).asBoolean();
if (operation.hasDefined(ENABLED.getName())) {
ENABLED.validateAndSet(operation, deploymentModel);
}
// Do the runtime part if the deployment is enabled
if (ENABLED.resolveModelAttribute(context, deploymentModel).asBoolean()) {
DeploymentUtils.enableAttribute(deploymentModel);
} else if (wasDeployed) {
DeploymentUtils.disableAttribute(deploymentModel);
}
boolean persistent = PERSISTENT.resolveModelAttribute(context, operation).asBoolean();
final Resource resource = Resource.Factory.create(!persistent);
resource.writeModel(deploymentModel);
context.addResource(PathAddress.pathAddress(deploymentPath), resource);
if (ENABLED.resolveModelAttribute(context, deploymentModel).asBoolean()) {
DeploymentHandlerUtil.replace(context, originalDeployment, runtimeName, name, replacedRuntimeName, contentItem);
} else if (wasDeployed) {
DeploymentHandlerUtil.undeploy(context, operation, name, runtimeName);
}
addFlushHandler(context, contentRepository, new OperationContext.ResultHandler() {
@Override
public void handleResult(ResultAction resultAction, OperationContext context, ModelNode operation) {
if (resultAction == ResultAction.KEEP) {
if (replacedHash != null && (newHash == null || !Arrays.equals(replacedHash, newHash))) {
// The old content is no longer used; clean from repos
contentRepository.removeContent(ModelContentReference.fromModelAddress(address, replacedHash));
}
if (newHash != null) {
contentRepository.addContentReference(ModelContentReference.fromModelAddress(address, newHash));
}
} else if (newHash != null && (replacedHash == null || !Arrays.equals(replacedHash, newHash))) {
// Due to rollback, the new content isn't used; clean from repos
contentRepository.removeContent(ModelContentReference.fromModelAddress(address, newHash));
}
}
});
}
DeploymentHandlerUtil.ContentItem addFromHash(ContentReference reference) throws OperationFailedException {
if (!contentRepository.syncContent(reference)) {
throw ServerLogger.ROOT_LOGGER.noSuchDeploymentContent(reference.getHexHash());
}
return new DeploymentHandlerUtil.ContentItem(reference.getHash());
}
DeploymentHandlerUtil.ContentItem addFromContentAdditionParameter(OperationContext context, ModelNode contentItemNode, String name) throws OperationFailedException {
byte[] hash;
InputStream in = getInputStream(context, contentItemNode);
InputStream transformed = null;
try {
try {
transformed = deploymentTransformation.doTransformation(context, contentItemNode, name, in);
hash = contentRepository.addContent(transformed);
} catch (IOException e) {
throw createFailureException(e.toString());
}
} finally {
StreamUtils.safeClose(in);
StreamUtils.safeClose(transformed);
}
contentItemNode.clear(); // AS7-1029
contentItemNode.get(CONTENT_HASH.getName()).set(hash);
// TODO: remove the content addition stuff?
return new DeploymentHandlerUtil.ContentItem(hash);
}
DeploymentHandlerUtil.ContentItem addUnmanaged(OperationContext context, ModelNode contentItemNode) throws OperationFailedException {
final String path = CONTENT_PATH.resolveModelAttribute(context, contentItemNode).asString();
final String relativeTo = asString(contentItemNode, CONTENT_RELATIVE_TO.getName());
final boolean archive = CONTENT_ARCHIVE.resolveModelAttribute(context, contentItemNode).asBoolean();
return new DeploymentHandlerUtil.ContentItem(path, relativeTo, archive);
}
}
| lgpl-2.1 |
palominolabs/jmagick | src/magick/jmagick.c | 21475 | #include <jni.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <sys/types.h>
#include <magick/api.h>
#include "jmagick.h"
/*
* Convenience function to help throw an MagickException.
*/
void throwMagickException(JNIEnv *env, const char *mesg)
{
jclass magickExceptionClass;
magickExceptionClass = (*env)->FindClass(env, "magick/MagickException");
if (magickExceptionClass == 0) {
fprintf(stderr, "Cannot find MagickException class\n");
return;
}
(*env)->ThrowNew(env, magickExceptionClass, mesg);
}
/*
* Convenience function to help throw an MagickApiException.
*
* Input:
* mesg JMagick message
* exception points to a ImageMagick ExceptionInfo structure
*/
void throwMagickApiException(JNIEnv *env,
const char *mesg,
const ExceptionInfo *exception)
{
jclass magickApiExceptionClass;
jmethodID consMethodID = 0;
jobject newObj;
jstring jreason, jdescription;
int result;
#ifdef DIAGNOSTIC
fprintf(stderr, "throwMagickApiException reason: %s - desc: %s \n", exception->reason, exception->description);
#endif
/* Find the class ID */
magickApiExceptionClass =
(*env)->FindClass(env, "magick/MagickApiException");
if (magickApiExceptionClass == 0) {
fprintf(stderr, "Cannot find MagickApiException class\n");
return;
}
/* Find the constructor ID */
consMethodID =
(*env)->GetMethodID(env, magickApiExceptionClass,
"<init>",
"(ILjava/lang/String;Ljava/lang/String;)V");
if (consMethodID == 0) {
return;
}
/* Obtain the string objects */
jreason = (*env)->NewStringUTF(env, exception->reason != NULL ? exception->reason : "");
if (jreason == NULL) {
#ifdef DIAGNOSTIC
fprintf(stderr,
"throwMagickApiException: "
"Unable to create reason string\n");
#endif
return;
}
jdescription = (*env)->NewStringUTF(env, exception->description != NULL ? exception->description : "");
if (jdescription == NULL) {
#ifdef DIAGNOSTIC
fprintf(stderr,
"throwMagickApiException: "
"Unable to create description string\n");
#endif
return;
}
/* Create the MagickApiException object */
newObj = (*env)->NewObject(env, magickApiExceptionClass, consMethodID,
exception->severity,
jreason, jdescription);
if (newObj == NULL) {
#ifdef DIAGNOSTIC
fprintf(stderr,
"throwMagickApiException: "
"Unable to create MagickApiException object\n");
#endif
return;
}
/* Throw the exception. */
result = (*env)->Throw(env, newObj);
#ifdef DIAGNOSTIC
if (result != 0) {
fprintf(stderr,
"throwMagickApiException: "
"Fail to throw MagickApiException");
}
#endif
}
/*
* Convenience function to retreive a handle from an object.
*
* Input:
* env Java VM environment
* obj Java object for which handle is to be retrieved
* handleName name of the handle in the object
* fieldId if non-null, contains the field ID. 0 to request retrieval.
*
* Output:
* fieldId if non-null, will contain field ID of the handle on output.
*/
void *getHandle(JNIEnv *env,
jobject obj,
const char *handleName,
jfieldID *fieldId)
{
jclass objClass;
jfieldID handleFid;
/* Retrieve the field ID of the handle */
if (fieldId == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return NULL;
}
handleFid = (*env)->GetFieldID(env, objClass, handleName, "J");
}
else if (*fieldId == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return NULL;
}
handleFid = *fieldId =
(*env)->GetFieldID(env, objClass, handleName, "J");
}
else {
handleFid = *fieldId;
}
return (void*) (*env)->GetLongField(env, obj, handleFid);
}
/*
* Convenience function to set a handle in an object.
*
* Input:
* env Java VM environment
* obj Java object for which handle is to be retrieved
* handleName name of the handle in the object
* fieldId if non-null, contains the field ID. 0 to request retrieval.
*
* Output:
* fieldId if non-null, will contain field ID of the handle on output.
*
* Return:
* non-zero if successful
* zero if failure
*/
int setHandle(JNIEnv *env,
jobject obj,
const char *handleName,
void *handle,
jfieldID *fieldId)
{
jclass objClass;
jfieldID handleFid;
/* Retrieve the field ID of the handle */
if (fieldId == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
handleFid = (*env)->GetFieldID(env, objClass, handleName, "J");
}
else if (fieldId == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
handleFid = *fieldId =
(*env)->GetFieldID(env, objClass, handleName, "J");
}
else {
handleFid = *fieldId;
}
if (handleFid == 0) {
return 0;
}
(*env)->SetLongField(env, obj, handleFid, (jlong) handle);
return 1;
}
/*
* Retrieve the int value of the specified field.
*
* Input:
* env Java VM environment.
* obj Java object for which the value is to be retrieved.
* fieldName name of the field to be retrieved.
* fieldID if non-null, points to field ID. 0 to request retrieval.
*
* Output:
* iRect to be initilised by values in jRect.
* fieldID if non-null, will contain the field ID.
* value to contain the retrieved value. Must not be null.
*
* Return:
* non-zero if successful
* zero if failed
*/
int getIntFieldValue(JNIEnv *env,
jobject obj,
const char *fieldName,
jfieldID *fieldID,
jint *value)
{
jclass objClass = 0;
jfieldID objFieldID = 0;
if (fieldID == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = (*env)->GetFieldID(env, objClass, fieldName, "I");
}
else if (*fieldID == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = *fieldID =
(*env)->GetFieldID(env, objClass, fieldName, "I");
}
else {
objFieldID = *fieldID;
}
if (objFieldID == 0) {
return 0;
}
*value = (*env)->GetIntField(env, obj, objFieldID);
return 1;
}
/*
* Store the int value of the specified field.
*
* Input:
* env Java VM environment.
* obj Java object for which the value is to be retrieved.
* fieldName name of the field to be retrieved.
* fieldID if non-null, points to field ID. 0 to request retrieval.
* value to contain the value to be stored.
*
* Output:
* fieldID if non-null, will contain the field ID.
*
* Return:
* non-zero if successful
* zero if failed
*/
int setIntFieldValue(JNIEnv *env,
jobject obj,
const char *fieldName,
jfieldID *fieldID,
jint value)
{
jclass objClass = 0;
jfieldID objFieldID = 0;
if (fieldID == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = (*env)->GetFieldID(env, objClass, fieldName, "I");
}
else if (*fieldID == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = *fieldID =
(*env)->GetFieldID(env, objClass, fieldName, "I");
}
else {
objFieldID = *fieldID;
}
if (objFieldID == 0) {
return 0;
}
(*env)->SetIntField(env, obj, objFieldID, value);
return 1;
}
/*
* Retrieve the byte value of the specified field.
*
* Input:
* env Java VM environment.
* obj Java object for which the value is to be retrieved.
* fieldName name of the field to be retrieved.
* fieldID if non-null, points to field ID. 0 to request retrieval.
*
* Output:
* iRect to be initilised by values in jRect.
* fieldID if non-null, will contain the field ID.
* value to contain the retrieved value. Must not be null.
*
* Return:
* non-zero if successful
* zero if failed
*/
int getByteFieldValue(JNIEnv *env,
jobject obj,
const char *fieldName,
jfieldID *fieldID,
jbyte *value)
{
jclass objClass = 0;
jfieldID objFieldID = 0;
if (fieldID == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = (*env)->GetFieldID(env, objClass, fieldName, "B");
}
else if (*fieldID == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = *fieldID =
(*env)->GetFieldID(env, objClass, fieldName, "B");
}
else {
objFieldID = *fieldID;
}
if (objFieldID == 0) {
return 0;
}
*value = (*env)->GetByteField(env, obj, objFieldID);
return 1;
}
/*
* Retrieve the short value of the specified field.
*
* Input:
* env Java VM environment.
* obj Java object for which the value is to be retrieved.
* fieldName name of the field to be retrieved.
* fieldID if non-null, points to field ID. 0 to request retrieval.
*
* Output:
* iRect to be initilised by values in jRect.
* fieldID if non-null, will contain the field ID.
* value to contain the retrieved value. Must not be null.
*
* Return:
* non-zero if successful
* zero if failed
*/
int getShortFieldValue(JNIEnv *env,
jobject obj,
const char *fieldName,
jfieldID *fieldID,
jshort *value)
{
jclass objClass = 0;
jfieldID objFieldID = 0;
if (fieldID == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = (*env)->GetFieldID(env, objClass, fieldName, "S");
}
else if (*fieldID == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return 0;
}
objFieldID = *fieldID =
(*env)->GetFieldID(env, objClass, fieldName, "S");
}
else {
objFieldID = *fieldID;
}
if (objFieldID == 0) {
return 0;
}
*value = (*env)->GetShortField(env, obj, objFieldID);
return 1;
}
/*
* Retrieve the string value of the specified field.
*
* Input:
* env Java VM environment.
* obj Java object for which the value is to be retrieved.
* fieldName name of the field to be retrieved.
* fieldID if non-null, points to field ID. 0 to request retrieval.
*
* Output:
* fieldID if non-null, will contain the field ID.
*
* Return:
* The string value requested. The caller is responsible for
* deallocating this string.
*/
char* getStringFieldValue(JNIEnv *env,
jobject obj,
const char *fieldName,
jfieldID *fieldID)
{
jclass objClass = 0;
jfieldID objFieldID = 0;
jobject stringObj = 0;
char *stringVal = NULL;
char *stringCpy = NULL;
if (fieldID == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return NULL;
}
objFieldID =
(*env)->GetFieldID(env, objClass, fieldName, "Ljava/lang/String;");
}
else if (*fieldID == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return NULL;
}
objFieldID = *fieldID =
(*env)->GetFieldID(env, objClass, fieldName, "Ljava/lang/String;");
}
else {
objFieldID = *fieldID;
}
if (objFieldID == 0) {
return NULL;
}
stringObj = (*env)->GetObjectField(env, obj, objFieldID);
if (stringObj == NULL) {
return NULL;
}
stringVal = (char *) (*env)->GetStringUTFChars(env, stringObj, 0);
stringCpy = (char *) AcquireString(stringVal);
(*env)->ReleaseStringUTFChars(env, stringObj, stringVal);
return stringCpy;
}
/*
* Retrieve the byte array from the specified field.
*
* Input:
* env Java VM environment.
* obj Java object for which the value is to be retrieved.
* fieldName name of the field to be retrieved.
* fieldID if non-null, points to field ID. 0 to request retrieval.
*
* Output:
* fieldID if non-null, will contain the field ID.
* size the size of the array is returned here. Must not be NULL.
*
* Return:
* The byte array requested. The caller is responsible for
* deallocating this byte array.
*/
unsigned char* getByteArrayFieldValue(JNIEnv *env,
jobject obj,
const char *fieldName,
jfieldID *fieldID,
int *size)
{
jclass objClass = 0;
jfieldID objFieldID = 0;
jobject byteArrayObj = 0;
unsigned char *byteArray = NULL;
char *byteArrayCpy = NULL;
if (fieldID == NULL) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return NULL;
}
objFieldID =
(*env)->GetFieldID(env, objClass, fieldName, "[B");
}
else if (*fieldID == 0) {
objClass = (*env)->GetObjectClass(env, obj);
if (objClass == 0) {
return NULL;
}
objFieldID = *fieldID =
(*env)->GetFieldID(env, objClass, fieldName, "[B");
}
else {
objFieldID = *fieldID;
}
if (objFieldID == 0) {
return NULL;
}
/* Get the array object */
byteArrayObj = (*env)->GetObjectField(env, obj, objFieldID);
if (byteArrayObj == NULL) {
return NULL;
}
/* Determine the size of the array */
*size = (*env)->GetArrayLength(env, byteArrayObj);
if (*size == 0) {
return NULL;
}
/* Get and copy the array elements */
byteArray = (jbyte *) (*env)->GetByteArrayElements(env, byteArrayObj, 0);
byteArrayCpy = (unsigned char *) AcquireMemory(*size);
if (byteArray == NULL) {
return NULL;
}
memcpy(byteArrayCpy, byteArray, *size);
(*env)->ReleaseByteArrayElements(env, byteArrayObj, byteArray, JNI_ABORT);
return byteArrayCpy;
}
/*
* From a java.awt.Rectangle object, construct a ImageMagick
* RectangleInfo, as passed in from the parameter.
*
* Input:
* env Java VM environment
* jRect an instance of java.awt.Rectangle
*
* Output:
* iRect to be initilised by values in jRect
*
* Return:
* non-zero if successful
* zero if failed
*/
int getRectangle(JNIEnv *env, jobject jRect, RectangleInfo *iRect)
{
jint width, height, x, y;
int retVal =
getIntFieldValue(env, jRect, "width", NULL, (jint *) &width) &&
getIntFieldValue(env, jRect, "height", NULL, (jint *) &height) &&
getIntFieldValue(env, jRect, "x", NULL, (jint *) &x) &&
getIntFieldValue(env, jRect, "y", NULL, (jint *) &y);
if (retVal) {
iRect->width = width;
iRect->height = height;
iRect->x = x;
iRect->y = y;
}
return retVal;
}
/*
* From a magick.PixelPacket object, construct a ImageMagick
* PixelPacket, as passed in from the parameter.
*
* Input:
* env Java VM environment
* jPixelPacket an instance of magick.PixelPacket
*
* Output:
* iPixelPacket to be initilised by values in jPixelPacket
*
* Return:
* non-zero if successful
* zero if failed
*/
int getPixelPacket(JNIEnv *env,
jobject jPixelPacket,
PixelPacket *iPixelPacket)
{
jint red, green, blue, opacity;
int successful =
getIntFieldValue(env, jPixelPacket, "red", NULL,
&red) &&
getIntFieldValue(env, jPixelPacket, "green", NULL,
&green) &&
getIntFieldValue(env, jPixelPacket, "blue", NULL,
&blue) &&
getIntFieldValue(env, jPixelPacket, "opacity", NULL,
&opacity);
if (!successful) {
return successful;
}
iPixelPacket->red = (Quantum) red;
iPixelPacket->green = (Quantum) green;
iPixelPacket->blue = (Quantum) blue;
iPixelPacket->opacity = (Quantum) opacity;
return successful;
}
/*
* Construct a new Java magick.MagickImage object and set the
* handle.
*
* Input:
* env Java VM environment
* image ImageMagick image handle
*
* Return:
* A new instance of magick.MagickImage object.
*
*/
jobject newImageObject(JNIEnv *env, Image* image)
{
jclass magickImageClass = 0;
jmethodID consMethodID = 0;
jobject newObj;
magickImageClass = (*env)->FindClass(env, "magick/MagickImage");
if (magickImageClass == 0) {
return NULL;
}
consMethodID = (*env)->GetMethodID(env, magickImageClass,
"<init>", "()V");
if (consMethodID == 0) {
return NULL;
}
newObj = (*env)->NewObject(env, magickImageClass, consMethodID);
if (newObj == NULL) {
return NULL;
}
if (!setHandle(env, newObj, "magickImageHandle", (void*) image, NULL)) {
#ifdef DIAGNOSTIC
fprintf(stderr, "newImageObject: Unable to set handle\n");
#endif
return NULL;
}
return newObj;
}
/*
* Set a attribute in a generic handle to string.
*
* Input:
* env Java VM environment
* attribVar points to a C string so as to set the value
* jstr Java string for which to set the attrib
*
* Output:
* attribVar points to a new C string with content from jstr
*/
void setHandleAttribute(JNIEnv *env, char **attribVar, jstring jstr)
{
const char *cstr = NULL;
if (*attribVar != NULL) {
// RelinquishMagickMemory((void**)attribVar);
}
cstr = (*env)->GetStringUTFChars(env, jstr, 0);
*attribVar = (char *) AcquireString(cstr);
(*env)->ReleaseStringUTFChars(env, jstr, cstr);
}
/*
* Given the C ProfileInfo structure and the Java ProfileInfo object,
* acquire the contents of the Java ProfileInfo object and store it in
* the C ProfileInfo structure.
*
* Input:
* env JNI environment
* profileObj Java ProfileInfo object for which field values are to be
* obtain to store into the C ProfileInfo structure
* Output:
* profileInfo C ProfileINfo structure to store field values
*/
void setProfileInfo(JNIEnv *env, ProfileInfo *profileInfo, jobject profileObj)
{
char *name;
unsigned char *info;
int infoSize = 0;
if (profileObj == NULL) {
throwMagickException(env, "ProfileInfo cannot be null");
return;
}
name = getStringFieldValue(env, profileObj, "name", NULL);
info = getByteArrayFieldValue(env, profileObj, "info", NULL, &infoSize);
if (profileInfo->name != NULL) {
// RelinquishMagickMemory((void**) &profileInfo->name);
}
profileInfo->name = name;
if (profileInfo->info != NULL) {
// RelinquishMagickMemory((void**) &profileInfo->info);
}
profileInfo->info = info;
profileInfo->length = infoSize;
}
/*
* Given the C ProfileInfo structure, construct a Java ProfileInfo
* object with values obtained from the C ProfileInfo structure.
* Input:
* env JNI environment
* profileInfo C ProfileInfo structure
* Return:
* Java ProfileInfo object
*/
jobject getProfileInfo(JNIEnv *env, ProfileInfo *profileInfo)
{
jclass profileInfoClass;
jmethodID consMethodID;
jobject profileObject;
jstring name;
jbyteArray byteArray;
unsigned char *byteElements;
/* Get the ProfileInfo class ID */
profileInfoClass = (*env)->FindClass(env, "magick/ProfileInfo");
if (profileInfoClass == 0) {
throwMagickException(env, "Unable to locate class "
"magick.ProfileInfo");
return NULL;
}
/* Get the constructor method ID */
consMethodID = (*env)->GetMethodID(env, profileInfoClass,
"<init>", "(Ljava/lang/String;[B)V");
if (consMethodID == 0) {
throwMagickException(env, "Unable to locate constructor "
"ProfileInfo(String, byte[])");
return NULL;
}
/* Construct the name */
if (profileInfo->name != NULL) {
name = (*env)->NewStringUTF(env, profileInfo->name);
if (name == NULL) {
throwMagickException(env, "Unable to allocate Java String "
"for profile name");
return NULL;
}
}
else {
name = NULL;
}
/* Construct the byte array */
if (profileInfo->length > 0) {
byteArray = (*env)->NewByteArray(env, profileInfo->length);
if (byteArray == NULL) {
throwMagickException(env, "Unable to allocate byte array "
"for profile info");
return NULL;
}
byteElements =
(*env)->GetByteArrayElements(env, byteArray, JNI_FALSE);
if (byteElements == NULL) {
throwMagickException(env, "Unable to obtain byte array elements "
"for profile info");
return NULL;
}
memcpy(byteElements,
profileInfo->info,
profileInfo->length);
(*env)->ReleaseByteArrayElements(env, byteArray, byteElements, 0);
}
else {
byteArray = NULL;
}
/* Construct the ProfileInfo object */
profileObject = (*env)->NewObject(env, profileInfoClass, consMethodID,
name, byteArray);
if (profileObject == NULL) {
throwMagickException(env, "Unable to construct ProfileInfo object");
return NULL;
}
return profileObject;
}
| lgpl-2.1 |
ostash/qt-creator-i18n-uk | src/plugins/projectexplorer/currentprojectfind.cpp | 3806 | /**************************************************************************
**
** This file is part of Qt Creator
**
** Copyright (c) 2012 Nokia Corporation and/or its subsidiary(-ies).
**
** Contact: Nokia Corporation ([email protected])
**
**
** GNU Lesser General Public License Usage
**
** This file may be used under the terms of the GNU Lesser General Public
** License version 2.1 as published by the Free Software Foundation and
** appearing in the file LICENSE.LGPL included in the packaging of this file.
** Please review the following information to ensure the GNU Lesser General
** Public License version 2.1 requirements will be met:
** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Nokia gives you certain additional
** rights. These rights are described in the Nokia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** Other Usage
**
** Alternatively, this file may be used in accordance with the terms and
** conditions contained in a signed written agreement between you and Nokia.
**
** If you have questions regarding the use of this file, please contact
** Nokia at [email protected].
**
**************************************************************************/
#include "currentprojectfind.h"
#include "projectexplorer.h"
#include "project.h"
#include "session.h"
#include <coreplugin/idocument.h>
#include <utils/qtcassert.h>
#include <QDebug>
#include <QSettings>
#include <QLabel>
#include <QHBoxLayout>
using namespace Find;
using namespace ProjectExplorer;
using namespace ProjectExplorer::Internal;
using namespace TextEditor;
CurrentProjectFind::CurrentProjectFind(ProjectExplorerPlugin *plugin)
: AllProjectsFind(plugin),
m_plugin(plugin)
{
connect(m_plugin, SIGNAL(currentProjectChanged(ProjectExplorer::Project*)),
this, SLOT(handleProjectChanged()));
}
QString CurrentProjectFind::id() const
{
return QLatin1String("Current Project");
}
QString CurrentProjectFind::displayName() const
{
return tr("Current Project");
}
bool CurrentProjectFind::isEnabled() const
{
return ProjectExplorerPlugin::currentProject() != 0 && BaseFileFind::isEnabled();
}
QVariant CurrentProjectFind::additionalParameters() const
{
Project *project = ProjectExplorerPlugin::currentProject();
if (project && project->document())
return qVariantFromValue(project->document()->fileName());
return QVariant();
}
Utils::FileIterator *CurrentProjectFind::files(const QStringList &nameFilters,
const QVariant &additionalParameters) const
{
QTC_ASSERT(additionalParameters.isValid(), return new Utils::FileIterator());
QList<Project *> allProjects = m_plugin->session()->projects();
QString projectFile = additionalParameters.toString();
foreach (Project *project, allProjects) {
if (project->document() && projectFile == project->document()->fileName())
return filesForProjects(nameFilters, QList<Project *>() << project);
}
return new Utils::FileIterator();
}
QString CurrentProjectFind::label() const
{
QTC_ASSERT(ProjectExplorerPlugin::currentProject(), return QString());
return tr("Project '%1':").arg(ProjectExplorerPlugin::currentProject()->displayName());
}
void CurrentProjectFind::handleProjectChanged()
{
emit enabledChanged(isEnabled());
}
void CurrentProjectFind::writeSettings(QSettings *settings)
{
settings->beginGroup(QLatin1String("CurrentProjectFind"));
writeCommonSettings(settings);
settings->endGroup();
}
void CurrentProjectFind::readSettings(QSettings *settings)
{
settings->beginGroup(QLatin1String("CurrentProjectFind"));
readCommonSettings(settings, QString(QLatin1Char('*')));
settings->endGroup();
}
| lgpl-2.1 |
mhuwiler/rootauto | interpreter/llvm/src/include/llvm/Transforms/Utils/Local.h | 18769 | //===-- Local.h - Functions to perform local transformations ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform various local transformations to the
// program.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
#define LLVM_TRANSFORMS_UTILS_LOCAL_H
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Operator.h"
#include "llvm/ADT/SmallPtrSet.h"
namespace llvm {
class User;
class BasicBlock;
class Function;
class BranchInst;
class Instruction;
class CallInst;
class DbgDeclareInst;
class DbgValueInst;
class StoreInst;
class LoadInst;
class Value;
class PHINode;
class AllocaInst;
class AssumptionCache;
class ConstantExpr;
class DataLayout;
class TargetLibraryInfo;
class TargetTransformInfo;
class DIBuilder;
class DominatorTree;
class LazyValueInfo;
template<typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
// Local constant propagation.
//
/// If a terminator instruction is predicated on a constant value, convert it
/// into an unconditional branch to the constant destination.
/// This is a nontrivial operation because the successors of this basic block
/// must have their PHI nodes updated.
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
//
/// Return true if the result produced by the instruction is not used, and the
/// instruction has no side effects.
bool isInstructionTriviallyDead(Instruction *I,
const TargetLibraryInfo *TLI = nullptr);
/// Return true if the result produced by the instruction would have no side
/// effects if it was not used. This is equivalent to checking whether
/// isInstructionTriviallyDead would be true if the use count was 0.
bool wouldInstructionBeTriviallyDead(Instruction *I,
const TargetLibraryInfo *TLI = nullptr);
/// If the specified value is a trivially dead instruction, delete it.
/// If that makes any of its operands trivially dead, delete them too,
/// recursively. Return true if any instructions were deleted.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
const TargetLibraryInfo *TLI = nullptr);
/// If the specified value is an effectively dead PHI node, due to being a
/// def-use chain of single-use nodes that either forms a cycle or is terminated
/// by a trivially dead instruction, delete it. If that makes any of its
/// operands trivially dead, delete them too, recursively. Return true if a
/// change was made.
bool RecursivelyDeleteDeadPHINode(PHINode *PN,
const TargetLibraryInfo *TLI = nullptr);
/// Scan the specified basic block and try to simplify any instructions in it
/// and recursively delete dead instructions.
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB,
const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring.
//
/// Like BasicBlock::removePredecessor, this method is called when we're about
/// to delete Pred as a predecessor of BB. If BB contains any PHI nodes, this
/// drops the entries in the PHI nodes for Pred.
///
/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
/// nodes that collapse into identity values. For example, if we have:
/// x = phi(1, 0, 0, 0)
/// y = and x, z
///
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred);
/// BB is a block with one predecessor and its predecessor is known to have one
/// successor (BB!). Eliminate the edge between them, moving the instructions in
/// the predecessor into BB. This deletes the predecessor block.
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
/// BB is known to contain an unconditional branch, and contains no instructions
/// other than PHI nodes, potential debug intrinsics and the branch. If
/// possible, eliminate BB by rewriting all the predecessors to branch to the
/// successor block and return true. If we can't transform, return false.
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB);
/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
/// to be clever about PHI nodes which differ only in the order of the incoming
/// values, but instcombine orders them so it usually won't matter.
bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// This function is used to do simplification of a CFG. For
/// example, it adjusts branches to branches to eliminate the extra hop, it
/// eliminates unreachable basic blocks, and does other "peephole" optimization
/// of the CFG. It returns true if a modification was made, possibly deleting
/// the basic block that was pointed to. LoopHeaders is an optional input
/// parameter, providing the set of loop header that SimplifyCFG should not
/// eliminate.
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
unsigned BonusInstThreshold, AssumptionCache *AC = nullptr,
SmallPtrSetImpl<BasicBlock *> *LoopHeaders = nullptr,
bool LateSimplifyCFG = false);
/// This function is used to flatten a CFG. For example, it uses parallel-and
/// and parallel-or mode to collapse if-conditions and merge if-regions with
/// identical statements.
bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
/// If this basic block is ONLY a setcc and a branch, and if a predecessor
/// branches to us and one of our successors, fold the setcc into the
/// predecessor and use logical operations to pick the right destination.
bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
/// This function takes a virtual register computed by an Instruction and
/// replaces it with a slot in the stack frame, allocated via alloca.
/// This allows the CFG to be changed around without fear of invalidating the
/// SSA information for the value. It returns the pointer to the alloca inserted
/// to create a stack slot for X.
AllocaInst *DemoteRegToStack(Instruction &X,
bool VolatileLoads = false,
Instruction *AllocaPoint = nullptr);
/// This function takes a virtual register computed by a phi node and replaces
/// it with a slot in the stack frame, allocated via alloca. The phi node is
/// deleted and it returns the pointer to the alloca inserted.
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
/// the owning object can be modified and has an alignment less than \p
/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
/// cannot be increased, the known alignment of the value is returned.
///
/// It is not always possible to modify the alignment of the underlying object,
/// so if alignment is important, a more reliable approach is to simply align
/// all global variables and allocation instructions to their preferred
/// alignment from the beginning.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout &DL,
const Instruction *CxtI = nullptr,
AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr);
/// Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
const Instruction *CxtI = nullptr,
AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr) {
return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
}
/// Given a getelementptr instruction/constantexpr, emit the code necessary to
/// compute the offset from the base pointer (without adding in the base
/// pointer). Return the result as a signed integer of intptr size.
/// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made.
template <typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
bool NoAssumptions = false) {
GEPOperator *GEPOp = cast<GEPOperator>(GEP);
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
Value *Result = Constant::getNullValue(IntPtrTy);
// If the GEP is inbounds, we know that none of the addressing operations will
// overflow in an unsigned sense.
bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
// Build a mask for high order bits.
unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
++i, ++GTI) {
Value *Op = *i;
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
if (Constant *OpC = dyn_cast<Constant>(Op)) {
if (OpC->isZeroValue())
continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = GTI.getStructTypeOrNull()) {
if (OpC->getType()->isVectorTy())
OpC = OpC->getSplatValue();
uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
if (Size)
Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
GEP->getName()+".offs");
continue;
}
Constant *Scale = ConstantInt::get(IntPtrTy, Size);
Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
// Emit an add instruction.
Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
continue;
}
// Convert to correct type.
if (Op->getType() != IntPtrTy)
Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
if (Size != 1) {
// We'll let instcombine(mul) convert this to a shl if possible.
Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
GEP->getName()+".idx", isInBounds /*NUW*/);
}
// Emit an add instruction.
Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
}
return Result;
}
///===---------------------------------------------------------------------===//
/// Dbg Intrinsic utilities
///
/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
/// that has an associated llvm.dbg.decl intrinsic.
void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
StoreInst *SI, DIBuilder &Builder);
/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
/// that has an associated llvm.dbg.decl intrinsic.
void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
LoadInst *LI, DIBuilder &Builder);
/// Inserts a llvm.dbg.value intrinsic after a phi of an alloca'd value
/// that has an associated llvm.dbg.decl intrinsic.
void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
PHINode *LI, DIBuilder &Builder);
/// Lowers llvm.dbg.declare intrinsics into appropriate set of
/// llvm.dbg.value intrinsics.
bool LowerDbgDeclare(Function &F);
/// Finds the llvm.dbg.declare intrinsic corresponding to an alloca, if any.
DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
/// Finds the llvm.dbg.value intrinsics describing a value.
void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
/// Replaces llvm.dbg.declare instruction when the address it describes
/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
/// prepended to the expression. If Offset is non-zero, a constant displacement
/// is added to the expression (after the optional Deref). Offset can be
/// negative.
bool replaceDbgDeclare(Value *Address, Value *NewAddress,
Instruction *InsertBefore, DIBuilder &Builder,
bool Deref, int Offset);
/// Replaces llvm.dbg.declare instruction when the alloca it describes
/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
/// prepended to the expression. If Offset is non-zero, a constant displacement
/// is added to the expression (after the optional Deref). Offset can be
/// negative. New llvm.dbg.declare is inserted immediately before AI.
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
DIBuilder &Builder, bool Deref, int Offset = 0);
/// Replaces multiple llvm.dbg.value instructions when the alloca it describes
/// is replaced with a new value. If Offset is non-zero, a constant displacement
/// is added to the expression (after the mandatory Deref). Offset can be
/// negative. New llvm.dbg.value instructions are inserted at the locations of
/// the instructions they replace.
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
DIBuilder &Builder, int Offset = 0);
/// Assuming the instruction \p I is going to be deleted, attempt to salvage any
/// dbg.value intrinsics referring to \p I by rewriting its effect into a
/// DIExpression.
void salvageDebugInfo(Instruction &I);
/// Remove all instructions from a basic block other than it's terminator
/// and any present EH pad instructions.
unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
/// Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
bool PreserveLCSSA = false);
/// Convert the CallInst to InvokeInst with the specified unwind edge basic
/// block. This also splits the basic block where CI is located, because
/// InvokeInst is a terminator instruction. Returns the newly split basic
/// block.
BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
BasicBlock *UnwindEdge);
/// Replace 'BB's terminator with one that does not have an unwind successor
/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
/// successor.
///
/// \param BB Block whose terminator will be replaced. Its terminator must
/// have an unwind successor.
void removeUnwindEdge(BasicBlock *BB);
/// Remove all blocks that can not be reached from the function's entry.
///
/// Returns true if any basic block was removed.
bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr);
/// Combine the metadata of two instructions so that K can replace J
///
/// Metadata not listed as known via KnownIDs is removed
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
/// Combine the metadata of two instructions so that K can replace J. This
/// specifically handles the case of CSE-like transformations.
///
/// Unknown metadata is removed.
void combineMetadataForCSE(Instruction *K, const Instruction *J);
/// Replace each use of 'From' with 'To' if that use is dominated by
/// the given edge. Returns the number of replacements made.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
const BasicBlockEdge &Edge);
/// Replace each use of 'From' with 'To' if that use is dominated by
/// the end of the given BasicBlock. Returns the number of replacements made.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
const BasicBlock *BB);
/// Return true if the CallSite CS calls a gc leaf function.
///
/// A leaf function is a function that does not safepoint the thread during its
/// execution. During a call or invoke to such a function, the callers stack
/// does not have to be made parseable.
///
/// Most passes can and should ignore this information, and it is only used
/// during lowering by the GC infrastructure.
bool callsGCLeafFunction(ImmutableCallSite CS);
//===----------------------------------------------------------------------===//
// Intrinsic pattern matching
//
/// Try and match a bswap or bitreverse idiom.
///
/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
/// instructions are returned in \c InsertedInsts. They will all have been added
/// to a basic block.
///
/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
/// to BW / 4 nodes to be searched, so is significantly faster.
///
/// This function returns true on a successful match or false otherwise.
bool recognizeBSwapOrBitReverseIdiom(
Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
SmallVectorImpl<Instruction *> &InsertedInsts);
//===----------------------------------------------------------------------===//
// Sanitizer utilities
//
/// Given a CallInst, check if it calls a string function known to CodeGen,
/// and mark it with NoBuiltin if so. To be used by sanitizers that intend
/// to intercept string functions and want to avoid converting them to target
/// specific instructions.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
const TargetLibraryInfo *TLI);
} // End llvm namespace
#endif
| lgpl-2.1 |
tomck/intermine | intermine/objectstore/main/src/org/intermine/sql/writebatch/BatchWriterPostgresCopyImpl.java | 11352 | package org.intermine.sql.writebatch;
/*
* Copyright (C) 2002-2015 FlyMine
*
* This code may be freely distributed and modified under the
* terms of the GNU Lesser General Public Licence. This should
* be distributed with the code. See the LICENSE file for more
* information or http://www.gnu.org/copyleft/lesser.html.
*
*/
import java.io.DataOutputStream;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import org.intermine.model.StringConstructor;
import org.postgresql.PGConnection;
import org.postgresql.copy.CopyManager;
/**
* An implementation of the BatchWriter interface that uses PostgreSQL-specific COPY commands.
*
* @author Matthew Wakeling
*/
public class BatchWriterPostgresCopyImpl extends BatchWriterPreparedStatementImpl
{
private static final Logger LOG = Logger.getLogger(BatchWriterPostgresCopyImpl.class);
protected static final BigInteger TEN = new BigInteger("10");
protected static final BigInteger HUNDRED = new BigInteger("100");
protected static final BigInteger THOUSAND = new BigInteger("1000");
protected static final BigInteger TEN_THOUSAND = new BigInteger("10000");
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
protected int doInserts(String name, TableBatch table, List<FlushJob> batches)
throws SQLException {
String[] colNames = table.getColNames();
if ((colNames != null) && (!table.getIdsToInsert().isEmpty())) {
try {
CopyManager copyManager = null;
if (con.isWrapperFor(PGConnection.class)) {
copyManager = con.unwrap(PGConnection.class).getCopyAPI();
}
if (copyManager == null) {
LOG.warn("Database with Connection " + con.getClass().getName()
+ " is incompatible with the PostgreSQL COPY command - falling"
+ " back to prepared statements");
super.doInserts(name, table, batches);
} else {
PostgresByteArrayOutputStream baos = new PostgresByteArrayOutputStream();
PostgresDataOutputStream dos = new PostgresDataOutputStream(baos);
dos.writeBytes("PGCOPY\n");
dos.writeByte(255);
dos.writeBytes("\r\n");
dos.writeByte(0); // Signature done
dos.writeInt(0); // Flags - we aren't supplying OIDS
dos.writeInt(0); // Length of header extension
for (Map.Entry<Object, Object> insertEntry : table.getIdsToInsert()
.entrySet()) {
Object inserts = insertEntry.getValue();
if (inserts instanceof Object[]) {
Object[] values = (Object[]) inserts;
dos.writeShort(colNames.length);
for (int i = 0; i < colNames.length; i++) {
writeObject(dos, values[i]);
}
} else {
for (Object[] values : ((List<Object[]>) inserts)) {
dos.writeShort(colNames.length);
for (int i = 0; i < colNames.length; i++) {
writeObject(dos, values[i]);
}
}
}
}
StringBuffer sqlBuffer = new StringBuffer("COPY ").append(name).append(" (");
for (int i = 0; i < colNames.length; i++) {
if (i > 0) {
sqlBuffer.append(", ");
}
sqlBuffer.append(colNames[i]);
}
sqlBuffer.append(") FROM STDIN BINARY");
String sql = sqlBuffer.toString();
dos.writeShort(-1);
dos.flush();
batches.add(new FlushJobPostgresCopyImpl(copyManager, sql,
baos.getBuffer(), baos.size()));
}
} catch (IOException e) {
throw new SQLException(e.toString());
}
return table.getIdsToInsert().size();
}
return 0;
}
// TODO: Add support for UUID.
private static void writeObject(PostgresDataOutputStream dos, Object o) throws IOException {
if (o == null) {
dos.writeInt(-1);
} else if (o instanceof Integer) {
dos.writeInt(4);
dos.writeInt(((Integer) o).intValue());
} else if (o instanceof Short) {
dos.writeInt(2);
dos.writeShort(((Short) o).intValue());
} else if (o instanceof Boolean) {
dos.writeInt(1);
dos.writeByte(((Boolean) o).booleanValue() ? 1 : 0);
} else if (o instanceof Float) {
dos.writeInt(4);
dos.writeFloat(((Float) o).floatValue());
} else if (o instanceof Double) {
dos.writeInt(8);
dos.writeDouble(((Double) o).doubleValue());
} else if (o instanceof Long) {
dos.writeInt(8);
dos.writeLong(((Long) o).longValue());
} else if (o instanceof String) {
dos.writeLargeUTF((String) o);
} else if (o instanceof StringConstructor) {
dos.writeLargeUTF((StringConstructor) o);
} else if (o instanceof BigDecimal) {
BigInteger unscaledValue = ((BigDecimal) o).unscaledValue();
int signum = ((BigDecimal) o).signum();
if (signum == -1) {
unscaledValue = unscaledValue.negate();
}
int scale = ((BigDecimal) o).scale();
int nBaseScale = (scale + 3) / 4;
int nBaseScaleRemainder = scale % 4;
List<Integer> digits = new ArrayList<Integer>();
if (nBaseScaleRemainder == 1) {
BigInteger[] res = unscaledValue.divideAndRemainder(TEN);
int digit = res[1].intValue() * 1000;
digits.add(new Integer(digit));
unscaledValue = res[0];
} else if (nBaseScaleRemainder == 2) {
BigInteger[] res = unscaledValue.divideAndRemainder(HUNDRED);
int digit = res[1].intValue() * 100;
digits.add(new Integer(digit));
unscaledValue = res[0];
} else if (nBaseScaleRemainder == 3) {
BigInteger[] res = unscaledValue.divideAndRemainder(THOUSAND);
int digit = res[1].intValue() * 10;
digits.add(new Integer(digit));
unscaledValue = res[0];
}
while (!unscaledValue.equals(BigInteger.ZERO)) {
BigInteger[] res = unscaledValue.divideAndRemainder(TEN_THOUSAND);
digits.add(new Integer(res[1].intValue()));
unscaledValue = res[0];
}
dos.writeInt(8 + (2 * digits.size()));
dos.writeShort(digits.size());
dos.writeShort(digits.size() - nBaseScale - 1);
dos.writeShort(signum == 1 ? 0x0000 : 0x4000);
dos.writeShort(scale);
//StringBuffer log = new StringBuffer("Writing BigDecimal ")
// .append(o.toString())
// .append(" as (digitCount = ")
// .append(Integer.toString(digits.size()))
// .append(", weight = ")
// .append(Integer.toString(digits.size() - nBaseScale - 1))
// .append(", sign = ")
// .append(Integer.toString(signum == 1 ? 0x0000 : 0x4000))
// .append(", dscale = ")
// .append(Integer.toString(scale))
// .append(")");
for (int i = digits.size() - 1; i >= 0; i--) {
int digit = digits.get(i).intValue();
dos.writeShort(digit);
// log.append(" " + digit);
}
//LOG.error(log.toString());
} else {
throw new IllegalArgumentException("Cannot store values of type " + o.getClass());
}
}
/**
* {@inheritDoc}
*/
@Override
protected int doIndirectionInserts(String name,
IndirectionTableBatch table, List<FlushJob> batches) throws SQLException {
if (!table.getRowsToInsert().isEmpty()) {
try {
CopyManager copyManager = null;
if (con.isWrapperFor(PGConnection.class)) {
copyManager = con.unwrap(PGConnection.class).getCopyAPI();
}
if (copyManager == null) {
LOG.warn("Database is incompatible with the PostgreSQL COPY command - falling"
+ " back to prepared statements");
super.doIndirectionInserts(name, table, batches);
} else {
PostgresByteArrayOutputStream baos = new PostgresByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
dos.writeBytes("PGCOPY\n");
dos.writeByte(255);
dos.writeBytes("\r\n");
dos.writeByte(0); // Signature done
dos.writeInt(0); // Flags - we aren't supplying OIDS
dos.writeInt(0); // Length of header extension
for (Row row : table.getRowsToInsert()) {
dos.writeShort(2);
dos.writeInt(4);
dos.writeInt(row.getLeft());
dos.writeInt(4);
dos.writeInt(row.getRight());
}
String sql = "COPY " + name + " (" + table.getLeftColName() + ", "
+ table.getRightColName() + ") FROM STDIN BINARY";
dos.writeShort(-1);
dos.flush();
batches.add(new FlushJobPostgresCopyImpl(copyManager, sql,
baos.getBuffer(), baos.size()));
}
} catch (IOException e) {
throw new SQLException(e.toString());
}
}
return table.getRowsToInsert().size();
}
/**
* {@inheritDoc}
*/
@Override
protected int getTableSize(String name, Connection conn) throws SQLException {
Statement s = conn.createStatement();
ResultSet r = s.executeQuery("SELECT reltuples FROM pg_class WHERE relname = '"
+ name.toLowerCase() + "'");
if (r.next()) {
int returnValue = (int) r.getFloat(1);
if (r.next()) {
throw new SQLException("Too many results for table " + name.toLowerCase());
}
return returnValue;
} else {
throw new SQLException("No results");
}
}
}
| lgpl-2.1 |
tectronics/phantomuserland | phantom/libphantom/init.c | 3747 | /**
*
* Phantom OS
*
* Copyright (C) 2005-2011 Dmitry Zavalishin, [email protected]
*
* Static constructors runner. General init/stop functions runner.
*
*
**/
#define DEBUG_MSG_PREFIX "init"
#include <debug_ext.h>
#define debug_level_flow 0
#define debug_level_error 10
#define debug_level_info 10
#include <sys/types.h>
#include <phantom_libc.h>
#include <kernel/init.h>
// See kernel/boot.h
unsigned int arch_flags = 0;
/* These magic symbols are provided by the linker. */
extern void (*_preinit_array_start []) (void) __attribute__((weak));
extern void (*_preinit_array_end []) (void) __attribute__((weak));
extern void (*_init_array_start []) (void) __attribute__((weak));
extern void (*_init_array_end []) (void) __attribute__((weak));
extern void (*_fini_array_start []) (void) __attribute__((weak));
extern void (*_fini_array_end []) (void) __attribute__((weak));
//extern void _init (void);
//extern void _fini (void);
/* Iterate over all the init routines. */
void __phantom_run_constructors (void)
{
size_t count;
size_t i;
/*
count = __preinit_array_end - __preinit_array_start;
for (i = 0; i < count; i++)
__preinit_array_start[i] ();
*/
// _init ();
#if 1
count = _init_array_end - _init_array_start;
/*
printf("%d c'tors (%p - %p) @ (%p - %p)\n",
count,
_init_array_start, _init_array_end,
&_init_array_start, &_init_array_end
);
*/
for (i = 0; i < count; i++)
{
//printf("c'tor %p\n", _init_array_start[i]);
_init_array_start[i] ();
}
#endif
}
/* Run all the cleanup routines. * /
void
__libc_fini_array (void)
{
size_t count;
size_t i;
count = __fini_array_end - __fini_array_start;
for (i = count; i > 0; i--)
__fini_array_start[i-1] ();
// _fini ();
}
*/
// -----------------------------------------------------------------------
// General init code
// -----------------------------------------------------------------------
static struct init_record *init_list_root = 0;
void register_init_record( struct init_record *ir )
{
ir->next = init_list_root;
init_list_root = ir;
}
static void run_next_init( int level, struct init_record *ir )
{
if( ir == 0 )
return;
SHOW_FLOW( 6, "init %d ir %p (%p,%p,%p)", level, ir, ir->init_1, ir->init_2, ir->init_3 );
switch( level )
{
case INIT_LEVEL_PREPARE:
if(ir->init_1) ir->init_1(); break;
case INIT_LEVEL_INIT:
if(ir->init_2) ir->init_2(); break;
case INIT_LEVEL_LATE:
if(ir->init_3) ir->init_3(); break;
default:
SHOW_ERROR( 0, "wrong level %d", level );
}
//if( ir->next )
run_next_init( level, ir->next );
}
void run_init_functions( int level )
{
run_next_init( level, init_list_root );
}
static struct init_record *stop_list_root = 0;
void register_stop_record( struct init_record *ir )
{
ir->next = stop_list_root;
stop_list_root = ir;
}
static void run_next_stop( int level, struct init_record *ir )
{
if( ir == 0 )
return;
switch( level )
{
case INIT_LEVEL_PREPARE:
if(ir->init_1) ir->init_1(); break;
case INIT_LEVEL_INIT:
if(ir->init_2) ir->init_2(); break;
case INIT_LEVEL_LATE:
if(ir->init_3) ir->init_3(); break;
default:
SHOW_ERROR( 0, "wrong level %d", level );
}
//if( ir->next )
run_next_init( level, ir->next );
}
volatile int phantom_stop_level = 0;
void run_stop_functions( int level )
{
phantom_stop_level = level;
run_next_stop( level, stop_list_root );
}
| lgpl-3.0 |
vigourouxjulien/thelia | setup/update.php | 9119 | <?php
/*************************************************************************************/
/* */
/* Thelia */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 3 of the License */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* */
/*************************************************************************************/
$bootstrapToggle = false;
$bootstraped = false;
// Autoload bootstrap
foreach ($argv as $arg) {
if ($arg === '-b') {
$bootstrapToggle = true;
continue;
}
if ($bootstrapToggle) {
require __DIR__ . DIRECTORY_SEPARATOR . $arg;
$bootstraped = true;
}
}
if (!$bootstraped) {
if (isset($bootstrapFile)) {
require $bootstrapFile;
} elseif (is_file($file = __DIR__ . '/../core/vendor/autoload.php')) {
require $file;
} elseif (is_file($file = __DIR__ . '/../../bootstrap.php')) {
// Here we are on a thelia/thelia-project
require $file;
} else {
cliOutput('No autoload file found. Please use the -b argument to include yours', 'error');
exit(1);
}
}
if (php_sapi_name() != 'cli') {
cliOutput('this script can only be launched with cli sapi', 'error');
exit(1);
}
use Symfony\Component\Filesystem\Filesystem;
use Symfony\Component\Finder\Finder;
use Thelia\Install\Exception\UpdateException;
/***************************************************
* Load Update class
***************************************************/
try {
$update = new \Thelia\Install\Update(false);
} catch (UpdateException $ex) {
cliOutput($ex->getMessage(), 'error');
exit(2);
}
/***************************************************
* Check if update is needed
***************************************************/
if ($update->isLatestVersion()) {
cliOutput("You already have the latest version of Thelia : " . $update->getCurrentVersion(), 'success');
exit(3);
}
$current = $update->getCurrentVersion();
$files = $update->getLatestVersion();
$web = $update->getWebVersion();
while (1) {
if ($web !== null && $files != $web) {
cliOutput(sprintf(
"Thelia server is reporting the current stable release version is %s ",
$web
), 'warning');
}
cliOutput(sprintf(
"You are going to update Thelia from version %s to version %s.",
$current,
$files
), 'info');
if ($web !== null && $files < $web) {
cliOutput(sprintf(
"Your files belongs to version %s, which is not the latest stable release.",
$web
), 'warning');
cliOutput(sprintf(
"It is recommended to upgrade your files first then run this script again." . PHP_EOL
. "The latest version is available at http://thelia.net/#download ."
), 'warning');
cliOutput("Continue update process anyway ? (Y/n)");
} else {
cliOutput("Continue update process ? (Y/n)");
}
$rep = readStdin(true);
if ($rep == 'y') {
break;
} elseif ($rep == 'n') {
cliOutput("Update aborted", 'warning');
exit(0);
}
}
$backup = false;
while (1) {
cliOutput(sprintf("Would you like to backup the current database before proceeding ? (Y/n)"));
$rep = readStdin(true);
if ($rep == 'y') {
$backup = true;
break;
} elseif ($rep == 'n') {
$backup = false;
break;
}
}
/***************************************************
* Update
***************************************************/
$updateError = null;
try {
// backup db
if (true === $backup) {
try {
$update->backupDb();
cliOutput(sprintf('Your database has been backed up. The sql file : %s', $update->getBackupFile()), 'info');
} catch (\Exception $e) {
cliOutput('Sorry, your database can\'t be backed up. Reason : ' . $e->getMessage(), 'error');
exit(4);
}
}
// update
$update->process($backup);
} catch (UpdateException $ex) {
$updateError = $ex;
}
foreach ($update->getMessages() as $message) {
cliOutput($message[0], $message[1]);
}
if (null === $updateError) {
cliOutput(sprintf('Thelia as been successfully updated to version %s', $update->getCurrentVersion()), 'success');
if ($update->hasPostInstructions()) {
cliOutput('===================================');
cliOutput($update->getPostInstructions());
cliOutput('===================================');
}
} else {
cliOutput(sprintf('Sorry, an unexpected error has occured : %s', $updateError->getMessage()), 'error');
print $updateError->getTraceAsString() . PHP_EOL;
print "Trace: " . PHP_EOL;
foreach ($update->getLogs() as $log) {
cliOutput(sprintf('[%s] %s' . PHP_EOL, $log[0], $log[1]), 'error');
}
if (true === $backup) {
while (1) {
cliOutput("Would you like to restore the backup database ? (Y/n)");
$rep = readStdin(true);
if ($rep == 'y') {
cliOutput("Database restore started. Wait, it could take a while...");
if (false === $update->restoreDb()) {
cliOutput(sprintf(
'Sorry, your database can\'t be restore. Try to do it manually : %s',
$update->getBackupFile()
), 'error');
exit(5);
} else {
cliOutput("Database successfully restore.");
exit(5);
}
break;
} elseif ($rep == 'n') {
exit(0);
}
}
}
}
/***************************************************
* Try to delete cache
***************************************************/
$finder = new Finder();
$fs = new Filesystem();
$hasDeleteError = false;
$finder->files()->in(THELIA_CACHE_DIR);
cliOutput(sprintf("Try to delete cache in : %s", THELIA_CACHE_DIR), 'info');
foreach ($finder as $file) {
try {
$fs->remove($file);
} catch (\Symfony\Component\Filesystem\Exception\IOException $ex) {
$hasDeleteError = true;
}
}
if (true === $hasDeleteError) {
cliOutput("The cache has not been cleared properly. Try to run the command manually : " .
"(sudo) php Thelia cache:clear (--env=prod).");
}
cliOutput("Update process finished.", 'info');
exit(0);
/***************************************************
* Utils
***************************************************/
function readStdin($normalize = false)
{
$fr = fopen("php://stdin", "r");
$input = fgets($fr, 128);
$input = rtrim($input);
fclose($fr);
if ($normalize) {
$input = strtolower(trim($input));
}
return $input;
}
function joinPaths()
{
$args = func_get_args();
$paths = [];
foreach ($args as $arg) {
$paths[] = trim($arg, '/\\');
}
$path = join(DIRECTORY_SEPARATOR, $paths);
if (substr($args[0], 0, 1) === '/') {
$path = DIRECTORY_SEPARATOR . $path;
}
return $path;
}
function cliOutput($message, $type = null)
{
switch ($type) {
case 'success':
$color = "\033[0;32m";
break;
case 'info':
$color = "\033[0;34m";
break;
case 'error':
$color = "\033[0;31m";
break;
case 'warning':
$color = "\033[1;33m";
break;
default:
$color = "\033[0m";
}
echo PHP_EOL . $color . $message . "\033[0m" . PHP_EOL;
}
| lgpl-3.0 |
nawawi/wkhtmltopdf | webkit/Source/WebKit/mac/WebView/WebFullScreenController.h | 2443 | /*
* Copyright (C) 2010, 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#if ENABLE(FULLSCREEN_API)
#import <wtf/OwnPtr.h>
#import <wtf/RefPtr.h>
#import <wtf/RetainPtr.h>
@class WebWindowFadeAnimation;
@class WebWindowScaleAnimation;
@class WebView;
namespace WebCore {
class DisplaySleepDisabler;
class Element;
class RenderBox;
class EventListener;
}
@interface WebFullScreenController : NSWindowController {
@private
RefPtr<WebCore::Element> _element;
WebView *_webView;
RetainPtr<NSView> _webViewPlaceholder;
RetainPtr<WebWindowScaleAnimation> _scaleAnimation;
RetainPtr<WebWindowFadeAnimation> _fadeAnimation;
RetainPtr<NSWindow> _backgroundWindow;
NSRect _initialFrame;
NSRect _finalFrame;
BOOL _isEnteringFullScreen;
BOOL _isExitingFullScreen;
BOOL _isFullScreen;
BOOL _isPlaying;
}
- (WebView*)webView;
- (void)setWebView:(WebView*)webView;
- (BOOL)isFullScreen;
- (void)setElement:(PassRefPtr<WebCore::Element>)element;
- (WebCore::Element*)element;
- (void)enterFullScreen:(NSScreen *)screen;
- (void)exitFullScreen;
- (void)close;
@end
#endif // ENABLE(FULLSCREEN_API)
| lgpl-3.0 |
Tybion/community-edition | projects/repository/source/java/org/alfresco/filesys/repo/rules/operations/OpenFileOperation.java | 2566 | /*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.filesys.repo.rules.operations;
import org.alfresco.filesys.repo.OpenFileMode;
import org.alfresco.filesys.repo.rules.Operation;
import org.alfresco.service.cmr.repository.NodeRef;
/**
* Open File Operation.
* <p>
* Open a file with the given name.
*/
public class OpenFileOperation implements Operation
{
private String name;
private OpenFileMode mode;
private boolean truncate = false;
private String path;
private NodeRef rootNode;
/**
*
* @param name the name of the file to open
* @param mode if true open the file in read/write
* @param truncate boolean
* @param rootNode root node
* @param path the full path/name to open
*/
public OpenFileOperation(String name, OpenFileMode mode, boolean truncate, NodeRef rootNode, String path)
{
this.name = name;
this.rootNode = rootNode;
this.truncate = truncate;
this.path = path;
this.mode = mode;
}
public String getName()
{
return name;
}
public String getPath()
{
return path;
}
public NodeRef getRootNodeRef()
{
return rootNode;
}
public OpenFileMode getMode()
{
return mode;
}
public boolean isTruncate()
{
return truncate;
}
public String toString()
{
return "OpenFileOperation: " + name;
}
public int hashCode()
{
return name.hashCode();
}
public boolean equals(Object o)
{
if(o instanceof OpenFileOperation)
{
OpenFileOperation c = (OpenFileOperation)o;
if(name.equals(c.getName()))
{
return true;
}
}
return false;
}
}
| lgpl-3.0 |
hyller/CodeLibrary | Test_Driven_Development_for_Embedded_C/CppUTest/include/CppUTestExt/MockActualFunctionCall.h | 4257 | /***
* Excerpted from "Test-Driven Development for Embedded C",
* published by The Pragmatic Bookshelf.
* Copyrights apply to this code. It may not be used to create training material,
* courses, books, articles, and the like. Contact us if you are in doubt.
* We make no guarantees that this code is fit for any purpose.
* Visit http://www.pragmaticprogrammer.com/titles/jgade for more book information.
***/
/*
* Copyright (c) 2007, Michael Feathers, James Grenning and Bas Vodde
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE EARLIER MENTIONED AUTHORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef D_MockActualFunctionCall_h
#define D_MockActualFunctionCall_h
#include "CppUTestExt/MockFunctionCall.h"
#include "CppUTestExt/MockExpectedFunctionsList.h"
class MockFailureReporter;
class MockFailure;
class MockNamedValue;
class MockActualFunctionCall : public MockFunctionCall
{
public:
MockActualFunctionCall(MockFailureReporter* reporter, const MockExpectedFunctionsList& expectations);
virtual ~MockActualFunctionCall();
virtual MockFunctionCall& withName(const SimpleString& name);
virtual MockFunctionCall& withParameter(const SimpleString& name, int value);
virtual MockFunctionCall& withParameter(const SimpleString& name, double value);
virtual MockFunctionCall& withParameter(const SimpleString& name, const char* value);
virtual MockFunctionCall& withParameter(const SimpleString& name, void* value);
virtual MockFunctionCall& withParameterOfType(const SimpleString& type, const SimpleString& name, void* value);
virtual MockFunctionCall& andReturnValue(int value);
virtual MockFunctionCall& andReturnValue(double value);
virtual MockFunctionCall& andReturnValue(const char* value);
virtual MockFunctionCall& andReturnValue(void* value);
virtual bool hasReturnValue();
virtual MockNamedValue returnValue();
virtual MockFunctionCall& onObject(void* objectPtr);
virtual bool isFulfilled() const;
virtual bool hasFailed() const;
virtual void checkExpectations();
virtual void setMockFailureReporter(MockFailureReporter* reporter);
protected:
virtual Utest* getTest() const;
virtual void callHasSucceeded();
virtual void finnalizeCallWhenFulfilled();
virtual void failTest(const MockFailure& failure);
virtual void checkActualParameter(const MockNamedValue& actualParameter);
enum ActualCallState {
CALL_IN_PROGESS,
CALL_FAILED,
CALL_SUCCEED
};
virtual const char* stringFromState(ActualCallState state);
virtual void setState(ActualCallState state);
virtual void checkStateConsistency(ActualCallState oldState, ActualCallState newState);
private:
MockFailureReporter* reporter_;
ActualCallState state_;
MockExpectedFunctionCall* _fulfilledExpectation;
MockExpectedFunctionsList unfulfilledExpectations_;
const MockExpectedFunctionsList& allExpectations_;
};
#endif
| unlicense |
nemanja88/azure-powershell | src/ResourceManager/Compute/Commands.Compute/Generated/VirtualMachineScaleSetVM/VirtualMachineScaleSetVMGetMethod.cs | 9368 | //
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
using Microsoft.Azure;
using Microsoft.Azure.Commands.Compute.Automation.Models;
using Microsoft.Azure.Management.Compute;
using Microsoft.Azure.Management.Compute.Models;
using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Management.Automation;
using System.Reflection;
namespace Microsoft.Azure.Commands.Compute.Automation
{
public partial class InvokeAzureComputeMethodCmdlet : ComputeAutomationBaseCmdlet
{
protected object CreateVirtualMachineScaleSetVMGetDynamicParameters()
{
dynamicParameters = new RuntimeDefinedParameterDictionary();
var pResourceGroupName = new RuntimeDefinedParameter();
pResourceGroupName.Name = "ResourceGroupName";
pResourceGroupName.ParameterType = typeof(string);
pResourceGroupName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 1,
Mandatory = true
});
pResourceGroupName.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("ResourceGroupName", pResourceGroupName);
var pVMScaleSetName = new RuntimeDefinedParameter();
pVMScaleSetName.Name = "VMScaleSetName";
pVMScaleSetName.ParameterType = typeof(string);
pVMScaleSetName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 2,
Mandatory = true
});
pVMScaleSetName.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("VMScaleSetName", pVMScaleSetName);
var pInstanceId = new RuntimeDefinedParameter();
pInstanceId.Name = "InstanceId";
pInstanceId.ParameterType = typeof(string);
pInstanceId.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 3,
Mandatory = true
});
pInstanceId.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("InstanceId", pInstanceId);
var pArgumentList = new RuntimeDefinedParameter();
pArgumentList.Name = "ArgumentList";
pArgumentList.ParameterType = typeof(object[]);
pArgumentList.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByStaticParameters",
Position = 4,
Mandatory = true
});
pArgumentList.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("ArgumentList", pArgumentList);
return dynamicParameters;
}
protected void ExecuteVirtualMachineScaleSetVMGetMethod(object[] invokeMethodInputParameters)
{
string resourceGroupName = (string)ParseParameter(invokeMethodInputParameters[0]);
string vmScaleSetName = (string)ParseParameter(invokeMethodInputParameters[1]);
string instanceId = (string)ParseParameter(invokeMethodInputParameters[2]);
if (!string.IsNullOrEmpty(resourceGroupName) && !string.IsNullOrEmpty(vmScaleSetName) && !string.IsNullOrEmpty(instanceId))
{
var result = VirtualMachineScaleSetVMsClient.Get(resourceGroupName, vmScaleSetName, instanceId);
WriteObject(result);
}
else if (!string.IsNullOrEmpty(resourceGroupName) && !string.IsNullOrEmpty(vmScaleSetName))
{
var result = VirtualMachineScaleSetVMsClient.List(resourceGroupName, vmScaleSetName);
WriteObject(result);
}
}
}
public partial class NewAzureComputeArgumentListCmdlet : ComputeAutomationBaseCmdlet
{
protected PSArgument[] CreateVirtualMachineScaleSetVMGetParameters()
{
string resourceGroupName = string.Empty;
string vmScaleSetName = string.Empty;
string instanceId = string.Empty;
return ConvertFromObjectsToArguments(
new string[] { "ResourceGroupName", "VMScaleSetName", "InstanceId" },
new object[] { resourceGroupName, vmScaleSetName, instanceId });
}
}
[Cmdlet("Get", "AzureRmVmssVM", DefaultParameterSetName = "InvokeByDynamicParameters")]
public partial class GetAzureRmVmssVM : InvokeAzureComputeMethodCmdlet
{
public override string MethodName { get; set; }
protected override void ProcessRecord()
{
if (this.ParameterSetName == "InvokeByDynamicParameters")
{
this.MethodName = "VirtualMachineScaleSetVMGet";
}
else
{
this.MethodName = "VirtualMachineScaleSetVMGetInstanceView";
}
base.ProcessRecord();
}
public override object GetDynamicParameters()
{
dynamicParameters = new RuntimeDefinedParameterDictionary();
var pResourceGroupName = new RuntimeDefinedParameter();
pResourceGroupName.Name = "ResourceGroupName";
pResourceGroupName.ParameterType = typeof(string);
pResourceGroupName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 1,
Mandatory = false,
ValueFromPipeline = false
});
pResourceGroupName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParametersForFriendMethod",
Position = 1,
Mandatory = false,
ValueFromPipeline = false
});
pResourceGroupName.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("ResourceGroupName", pResourceGroupName);
var pVMScaleSetName = new RuntimeDefinedParameter();
pVMScaleSetName.Name = "VMScaleSetName";
pVMScaleSetName.ParameterType = typeof(string);
pVMScaleSetName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 2,
Mandatory = false,
ValueFromPipeline = false
});
pVMScaleSetName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParametersForFriendMethod",
Position = 2,
Mandatory = false,
ValueFromPipeline = false
});
pVMScaleSetName.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("VMScaleSetName", pVMScaleSetName);
var pInstanceId = new RuntimeDefinedParameter();
pInstanceId.Name = "InstanceId";
pInstanceId.ParameterType = typeof(string);
pInstanceId.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 3,
Mandatory = false,
ValueFromPipeline = false
});
pInstanceId.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParametersForFriendMethod",
Position = 3,
Mandatory = false,
ValueFromPipeline = false
});
pInstanceId.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("InstanceId", pInstanceId);
var pInstanceView = new RuntimeDefinedParameter();
pInstanceView.Name = "InstanceView";
pInstanceView.ParameterType = typeof(SwitchParameter);
pInstanceView.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParametersForFriendMethod",
Position = 4,
Mandatory = true
});
pInstanceView.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByStaticParametersForFriendMethod",
Position = 5,
Mandatory = true
});
pInstanceView.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("InstanceView", pInstanceView);
return dynamicParameters;
}
}
}
| apache-2.0 |
OpenXIP/xip-libraries | src/extern/dcmtk-3.5.4/dcmjpeg/libijg8/jcomapi.c | 3112 | /*
* jcomapi.c
*
* Copyright (C) 1994-1997, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file contains application interface routines that are used for both
* compression and decompression.
*/
#define JPEG_INTERNALS
#include "jinclude8.h"
#include "jpeglib8.h"
/*
* Abort processing of a JPEG compression or decompression operation,
* but don't destroy the object itself.
*
* For this, we merely clean up all the nonpermanent memory pools.
* Note that temp files (virtual arrays) are not allowed to belong to
* the permanent pool, so we will be able to close all temp files here.
* Closing a data source or destination, if necessary, is the application's
* responsibility.
*/
GLOBAL(void)
jpeg_abort (j_common_ptr cinfo)
{
int pool;
/* Do nothing if called on a not-initialized or destroyed JPEG object. */
if (cinfo->mem == NULL)
return;
/* Releasing pools in reverse order might help avoid fragmentation
* with some (brain-damaged) malloc libraries.
*/
for (pool = JPOOL_NUMPOOLS-1; pool > JPOOL_PERMANENT; pool--) {
(*cinfo->mem->free_pool) (cinfo, pool);
}
/* Reset overall state for possible reuse of object */
if (cinfo->is_decompressor) {
cinfo->global_state = DSTATE_START;
/* Try to keep application from accessing now-deleted marker list.
* A bit kludgy to do it here, but this is the most central place.
*/
((j_decompress_ptr) cinfo)->marker_list = NULL;
} else {
cinfo->global_state = CSTATE_START;
}
}
/*
* Destruction of a JPEG object.
*
* Everything gets deallocated except the master jpeg_compress_struct itself
* and the error manager struct. Both of these are supplied by the application
* and must be freed, if necessary, by the application. (Often they are on
* the stack and so don't need to be freed anyway.)
* Closing a data source or destination, if necessary, is the application's
* responsibility.
*/
GLOBAL(void)
jpeg_destroy (j_common_ptr cinfo)
{
/* We need only tell the memory manager to release everything. */
/* NB: mem pointer is NULL if memory mgr failed to initialize. */
if (cinfo->mem != NULL)
(*cinfo->mem->self_destruct) (cinfo);
cinfo->mem = NULL; /* be safe if jpeg_destroy is called twice */
cinfo->global_state = 0; /* mark it destroyed */
}
/*
* Convenience routines for allocating quantization and Huffman tables.
* (Would jutils.c be a more reasonable place to put these?)
*/
GLOBAL(JQUANT_TBL *)
jpeg_alloc_quant_table (j_common_ptr cinfo)
{
JQUANT_TBL *tbl;
tbl = (JQUANT_TBL *)
(*cinfo->mem->alloc_small) (cinfo, JPOOL_PERMANENT, SIZEOF(JQUANT_TBL));
tbl->sent_table = FALSE; /* make sure this is false in any new table */
return tbl;
}
GLOBAL(JHUFF_TBL *)
jpeg_alloc_huff_table (j_common_ptr cinfo)
{
JHUFF_TBL *tbl;
tbl = (JHUFF_TBL *)
(*cinfo->mem->alloc_small) (cinfo, JPOOL_PERMANENT, SIZEOF(JHUFF_TBL));
tbl->sent_table = FALSE; /* make sure this is false in any new table */
return tbl;
}
| apache-2.0 |
andreasnef/fcrepo | fcrepo-server/src/main/java/org/fcrepo/server/utilities/status/ServerState.java | 1711 | /* The contents of this file are subject to the license and copyright terms
* detailed in the license directory at the root of the source tree (also
* available online at http://fedora-commons.org/license/).
*/
package org.fcrepo.server.utilities.status;
public class ServerState {
public static final ServerState NEW_SERVER = new ServerState("New Server");
public static final ServerState NOT_STARTING =
new ServerState("Not Starting");
public static final ServerState STARTING = new ServerState("Starting");
public static final ServerState STARTED = new ServerState("Started");
public static final ServerState STARTUP_FAILED =
new ServerState("Startup Failed");
public static final ServerState STOPPING = new ServerState("Stopping");
public static final ServerState STOPPED = new ServerState("Stopped");
public static final ServerState STOPPED_WITH_ERR =
new ServerState("Stopped with error");
public static final ServerState[] STATES =
new ServerState[] {NEW_SERVER, NOT_STARTING, STARTING, STARTED,
STARTUP_FAILED, STOPPING, STOPPED, STOPPED_WITH_ERR};
private final String _name;
private ServerState(String name) {
_name = name;
}
public String getName() {
return _name;
}
@Override
public String toString() {
return _name;
}
public static ServerState fromString(String name) throws Exception {
for (ServerState element : STATES) {
if (element.getName().equals(name)) {
return element;
}
}
throw new Exception("Unrecognized Server State: " + name);
}
}
| apache-2.0 |
eloekset/fluentmigrator | test/FluentMigrator.Tests/Integration/Processors/Oracle/OracleNative/OracleColumnTests.cs | 1137 | #region License
//
// Copyright (c) 2018, Fluent Migrator Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#endregion
using FluentMigrator.Runner;
using Microsoft.Extensions.DependencyInjection;
using NUnit.Framework;
namespace FluentMigrator.Tests.Integration.Processors.Oracle.OracleNative
{
[TestFixture]
[Category("Oracle")]
public class OracleColumnTests : OracleColumnTestsBase
{
/// <inheritdoc />
protected override IServiceCollection AddOracleServices(IServiceCollection services)
{
return services.ConfigureRunner(r => r.AddOracle());
}
}
}
| apache-2.0 |
ppittle/AlienSwarmDirectorMod | trunk/src/game/shared/swarm/asw_weapon_hornet_barrage.cpp | 5658 | #include "cbase.h"
#include "asw_weapon_hornet_barrage.h"
#ifdef CLIENT_DLL
#include "c_asw_player.h"
#include "c_asw_marine.h"
#include "c_asw_alien.h"
#include "asw_input.h"
#include "prediction.h"
#else
#include "asw_marine.h"
#include "asw_player.h"
#include "asw_alien.h"
#include "particle_parse.h"
#include "te_effect_dispatch.h"
#include "asw_rocket.h"
#include "asw_gamerules.h"
#endif
#include "asw_marine_skills.h"
// memdbgon must be the last include file in a .cpp file!!!
#include "tier0/memdbgon.h"
IMPLEMENT_NETWORKCLASS_ALIASED( ASW_Weapon_Hornet_Barrage, DT_ASW_Weapon_Hornet_Barrage )
BEGIN_NETWORK_TABLE( CASW_Weapon_Hornet_Barrage, DT_ASW_Weapon_Hornet_Barrage )
#ifdef CLIENT_DLL
RecvPropFloat( RECVINFO( m_flNextLaunchTime ) ),
RecvPropFloat( RECVINFO( m_flFireInterval ) ),
RecvPropInt( RECVINFO( m_iRocketsToFire ) ),
#else
SendPropFloat( SENDINFO( m_flNextLaunchTime ) ),
SendPropFloat( SENDINFO( m_flFireInterval ) ),
SendPropInt( SENDINFO( m_iRocketsToFire ), 8 ),
#endif
END_NETWORK_TABLE()
#ifdef CLIENT_DLL
BEGIN_PREDICTION_DATA( CASW_Weapon_Hornet_Barrage )
DEFINE_PRED_FIELD_TOL( m_flNextLaunchTime, FIELD_FLOAT, FTYPEDESC_INSENDTABLE, TD_MSECTOLERANCE ),
DEFINE_PRED_FIELD( m_iRocketsToFire, FIELD_INTEGER, FTYPEDESC_INSENDTABLE ),
END_PREDICTION_DATA()
#endif
LINK_ENTITY_TO_CLASS( asw_weapon_hornet_barrage, CASW_Weapon_Hornet_Barrage );
PRECACHE_WEAPON_REGISTER( asw_weapon_hornet_barrage );
#ifndef CLIENT_DLL
//---------------------------------------------------------
// Save/Restore
//---------------------------------------------------------
BEGIN_DATADESC( CASW_Weapon_Hornet_Barrage )
END_DATADESC()
#endif /* not client */
CASW_Weapon_Hornet_Barrage::CASW_Weapon_Hornet_Barrage()
{
}
void CASW_Weapon_Hornet_Barrage::Precache()
{
BaseClass::Precache();
PrecacheScriptSound( "ASW_Hornet_Barrage.Fire" );
}
bool CASW_Weapon_Hornet_Barrage::OffhandActivate()
{
if (!GetMarine() || GetMarine()->GetFlags() & FL_FROZEN) // don't allow this if the marine is frozen
return false;
PrimaryAttack();
return true;
}
void CASW_Weapon_Hornet_Barrage::PrimaryAttack()
{
CASW_Marine *pMarine = GetMarine();
if ( !pMarine )
return;
CASW_Player *pPlayer = GetCommander();
if ( !pPlayer )
return;
if ( m_iRocketsToFire.Get() > 0 )
return;
#ifndef CLIENT_DLL
bool bThisActive = (pMarine && pMarine->GetActiveWeapon() == this);
#endif
// mine weapon is lost when all mines are gone
if ( UsesClipsForAmmo1() && !m_iClip1 )
{
//Reload();
#ifndef CLIENT_DLL
if (pMarine)
{
pMarine->Weapon_Detach(this);
if (bThisActive)
pMarine->SwitchToNextBestWeapon(NULL);
}
Kill();
#endif
return;
}
SetRocketsToFire();
m_flFireInterval = GetRocketFireInterval();
m_flNextLaunchTime = gpGlobals->curtime;
const char *pszSound = "ASW_Hornet_Barrage.Fire";
CPASAttenuationFilter filter( this, pszSound );
if ( IsPredicted() && CBaseEntity::GetPredictionPlayer() )
{
filter.UsePredictionRules();
}
EmitSound( filter, entindex(), pszSound );
// decrement ammo
m_iClip1 -= 1;
m_flNextPrimaryAttack = gpGlobals->curtime + GetFireRate();
}
void CASW_Weapon_Hornet_Barrage::ItemPostFrame( void )
{
BaseClass::ItemPostFrame();
if ( GetRocketsToFire() > 0 && GetNextLaunchTime() <= gpGlobals->curtime )
{
FireRocket();
#ifndef CLIENT_DLL
if ( GetRocketsToFire() <= 0 )
{
DestroyIfEmpty( true );
}
#endif
}
}
void CASW_Weapon_Hornet_Barrage::SetRocketsToFire()
{
CASW_Marine *pMarine = GetMarine();
if ( !pMarine )
return;
m_iRocketsToFire = MarineSkills()->GetSkillBasedValueByMarine(pMarine, ASW_MARINE_SKILL_GRENADES, ASW_MARINE_SUBSKILL_GRENADE_HORNET_COUNT );
}
float CASW_Weapon_Hornet_Barrage::GetRocketFireInterval()
{
CASW_Marine *pMarine = GetMarine();
if ( !pMarine )
return 0.5f;
return MarineSkills()->GetSkillBasedValueByMarine(pMarine, ASW_MARINE_SKILL_GRENADES, ASW_MARINE_SUBSKILL_GRENADE_HORNET_INTERVAL );
}
void CASW_Weapon_Hornet_Barrage::FireRocket()
{
CASW_Player *pPlayer = GetCommander();
CASW_Marine *pMarine = GetMarine();
if ( !pPlayer || !pMarine || pMarine->GetHealth() <= 0 )
{
m_iRocketsToFire = 0;
return;
}
WeaponSound(SINGLE);
// tell the marine to tell its weapon to draw the muzzle flash
pMarine->DoMuzzleFlash();
pMarine->DoAnimationEvent( PLAYERANIMEVENT_FIRE_GUN_PRIMARY );
Vector vecSrc = GetRocketFiringPosition();
m_iRocketsToFire = m_iRocketsToFire.Get() - 1;
m_flNextLaunchTime = gpGlobals->curtime + m_flFireInterval.Get();
#ifndef CLIENT_DLL
float fGrenadeDamage = MarineSkills()->GetSkillBasedValueByMarine(pMarine, ASW_MARINE_SKILL_GRENADES, ASW_MARINE_SUBSKILL_GRENADE_HORNET_DMG );
CASW_Rocket::Create( fGrenadeDamage, vecSrc, GetRocketAngle(), pMarine, this );
if ( ASWGameRules() )
{
ASWGameRules()->m_fLastFireTime = gpGlobals->curtime;
}
pMarine->OnWeaponFired( this, 1 );
#endif
}
const QAngle& CASW_Weapon_Hornet_Barrage::GetRocketAngle()
{
static QAngle angRocket = vec3_angle;
CASW_Player *pPlayer = GetCommander();
CASW_Marine *pMarine = GetMarine();
if ( !pPlayer || !pMarine || pMarine->GetHealth() <= 0 )
{
return angRocket;
}
Vector vecDir = pPlayer->GetAutoaimVectorForMarine(pMarine, GetAutoAimAmount(), GetVerticalAdjustOnlyAutoAimAmount()); // 45 degrees = 0.707106781187
VectorAngles( vecDir, angRocket );
angRocket[ YAW ] += random->RandomFloat( -35, 35 );
return angRocket;
}
const Vector& CASW_Weapon_Hornet_Barrage::GetRocketFiringPosition()
{
CASW_Marine *pMarine = GetMarine();
if ( !pMarine )
return vec3_origin;
static Vector vecSrc;
vecSrc = pMarine->Weapon_ShootPosition();
return vecSrc;
} | apache-2.0 |
pnavarro/neutron | neutron/tests/unit/test_wsgi.py | 26842 | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import urllib2
import mock
from oslo_config import cfg
import testtools
import webob
import webob.exc
from neutron.common import exceptions as exception
from neutron.tests import base
from neutron import wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'var'))
def open_no_proxy(*args, **kwargs):
# NOTE(jamespage):
# Deal with more secure certification chain verficiation
# introduced in python 2.7.9 under PEP-0476
# https://github.com/python/peps/blob/master/pep-0476.txt
if hasattr(ssl, "_create_unverified_context"):
opener = urllib2.build_opener(
urllib2.ProxyHandler({}),
urllib2.HTTPSHandler(context=ssl._create_unverified_context())
)
else:
opener = urllib2.build_opener(urllib2.ProxyHandler({}))
return opener.open(*args, **kwargs)
class TestWorkerService(base.BaseTestCase):
"""WorkerService tests."""
@mock.patch('neutron.db.api')
def test_start_withoutdb_call(self, apimock):
_service = mock.Mock()
_service.pool = mock.Mock()
_service.pool.spawn = mock.Mock()
_service.pool.spawn.return_value = None
_app = mock.Mock()
cfg.CONF.set_override("connection", "", "database")
workerservice = wsgi.WorkerService(_service, _app)
workerservice.start()
self.assertFalse(apimock.get_engine.called)
class TestWSGIServer(base.BaseTestCase):
"""WSGI server tests."""
def test_start_random_port(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="127.0.0.1")
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@mock.patch('neutron.openstack.common.service.ProcessLauncher')
def test_start_multiple_workers(self, ProcessLauncher):
launcher = ProcessLauncher.return_value
server = wsgi.Server("test_multiple_processes")
server.start(None, 0, host="127.0.0.1", workers=2)
launcher.launch_service.assert_called_once_with(mock.ANY, workers=2)
server.stop()
launcher.stop.assert_called_once_with()
server.wait()
launcher.wait.assert_called_once_with()
def test_start_random_port_with_ipv6(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="::1")
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_ipv6_listen_called_with_scope(self):
server = wsgi.Server("test_app")
with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
mock_get_addr.return_value = [
(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
]
with mock.patch.object(server, 'pool') as mock_pool:
server.start(None,
1234,
host="fe80::204:acff:fe96:da87%eth0")
mock_get_addr.assert_called_once_with(
"fe80::204:acff:fe96:da87%eth0",
1234,
socket.AF_UNSPEC,
socket.SOCK_STREAM
)
mock_listen.assert_called_once_with(
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
family=socket.AF_INET6,
backlog=cfg.CONF.backlog
)
mock_pool.spawn.assert_has_calls([
mock.call(
server._run,
None,
mock_listen.return_value)
])
def test_app(self):
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
@mock.patch.object(wsgi, 'eventlet')
@mock.patch.object(wsgi, 'loggers')
def test__run(self, logging_mock, eventlet_mock):
server = wsgi.Server('test')
server._run("app", "socket")
eventlet_mock.wsgi.server.assert_called_once_with(
'socket',
'app',
max_size=server.num_threads,
log=mock.ANY,
keepalive=CONF.wsgi_keep_alive,
socket_timeout=server.client_socket_timeout
)
self.assertTrue(len(logging_mock.mock_calls))
class SerializerTest(base.BaseTestCase):
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
input_dict = {'servers': {'test': 'pass'}}
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType, serializer.serialize,
input_dict, content_type)
def test_get_deserialize_handler_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType,
serializer.get_deserialize_handler, content_type)
def test_serialize_content_type_json(self):
"""Test serialize with content type json."""
input_data = {'servers': ['test=pass']}
content_type = 'application/json'
serializer = wsgi.Serializer()
result = serializer.serialize(input_data, content_type)
self.assertEqual('{"servers": ["test=pass"]}', result)
def test_deserialize_raise_bad_request(self):
"""Test serialize verifies that exception is raises."""
content_type = 'application/unknown'
data_string = 'test'
serializer = wsgi.Serializer()
self.assertRaises(
webob.exc.HTTPBadRequest,
serializer.deserialize, data_string, content_type)
def test_deserialize_json_content_type(self):
"""Test Serializer.deserialize with content type json."""
content_type = 'application/json'
data_string = '{"servers": ["test=pass"]}'
serializer = wsgi.Serializer()
result = serializer.deserialize(data_string, content_type)
self.assertEqual({'body': {u'servers': [u'test=pass']}}, result)
class RequestDeserializerTest(testtools.TestCase):
def setUp(self):
super(RequestDeserializerTest, self).setUp()
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
self.body_deserializers = {'application/json': JSONDeserializer()}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def test_get_deserializer(self):
"""Test RequestDeserializer.get_body_deserializer."""
expected_json_serializer = self.deserializer.get_body_deserializer(
'application/json')
self.assertEqual(
expected_json_serializer,
self.body_deserializers['application/json'])
def test_get_expected_content_type(self):
"""Test RequestDeserializer.get_expected_content_type."""
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual('application/json',
self.deserializer.get_expected_content_type(request))
def test_get_action_args(self):
"""Test RequestDeserializer.get_action_args."""
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12}]}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected,
self.deserializer.get_action_args(env))
def test_deserialize(self):
"""Test RequestDeserializer.deserialize."""
with mock.patch.object(
self.deserializer, 'get_action_args') as mock_method:
mock_method.return_value = {'action': 'create'}
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/json')
self.assertEqual(expected, deserialized)
def test_get_body_deserializer_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
deserializer = wsgi.RequestDeserializer()
self.assertRaises(
exception.InvalidContentType,
deserializer.get_body_deserializer, content_type)
class ResponseSerializerTest(testtools.TestCase):
def setUp(self):
super(ResponseSerializerTest, self).setUp()
class JSONSerializer(object):
def serialize(self, data, action='default'):
return 'pew_json'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {'application/json': JSONSerializer()}
self.serializer = wsgi.ResponseSerializer(
self.body_serializers, HeadersSerializer())
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
def test_get_body_serializer(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.get_body_serializer, 'application/unknown')
def test_get_serializer(self):
"""Test ResponseSerializer.get_body_serializer."""
content_type = 'application/json'
self.assertEqual(self.body_serializers[content_type],
self.serializer.get_body_serializer(content_type))
def test_serialize_json_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual('pew_json', response.body)
self.assertEqual(404, response.status_int)
def test_serialize_response_None(self):
response = self.serializer.serialize(
None, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual('', response.body)
self.assertEqual(404, response.status_int)
class RequestTest(base.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "fake<br />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/new-type;"
self.assertIsNone(request.get_content_type())
def test_content_type_from_accept(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/new_type"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
class ActionDispatcherTest(base.BaseTestCase):
def test_dispatch(self):
"""Test ActionDispatcher.dispatch."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x
self.assertEqual('pants',
serializer.dispatch('pants', action='create'))
def test_dispatch_action_None(self):
"""Test ActionDispatcher.dispatch with none action."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action='update'))
class ResponseHeadersSerializerTest(base.BaseTestCase):
def test_default(self):
serializer = wsgi.ResponseHeaderSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'fake')
self.assertEqual(200, response.status_int)
def test_custom(self):
class Serializer(wsgi.ResponseHeaderSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(404, response.status_int)
self.assertEqual('123', response.headers['X-Custom-Header'])
class DictSerializerTest(base.BaseTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('',
serializer.serialize({}, 'NonExistentAction'))
class JSONDictSerializerTest(base.BaseTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_json, result)
def test_json_with_utf8(self):
input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c')))
expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_json, result)
def test_json_with_unicode(self):
input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc')))
expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_json, result)
class TextDeserializerTest(base.BaseTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({},
deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(base.BaseTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1'}}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_default_raise_Malformed_Exception(self):
"""Test JsonDeserializer.default.
Test verifies JsonDeserializer.default raises exception
MalformedRequestBody correctly.
"""
data_string = ""
deserializer = wsgi.JSONDeserializer()
self.assertRaises(
exception.MalformedRequestBody, deserializer.default, data_string)
def test_json_with_utf8(self):
data = '{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_json_with_unicode(self):
data = '{"a": "\u7f51\u7edc"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
class RequestHeadersDeserializerTest(base.BaseTestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual({},
deserializer.deserialize(req, 'nonExistent'))
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual({'a': 'b'},
deserializer.deserialize(req, 'update'))
class ResourceTest(base.BaseTestCase):
@staticmethod
def my_fault_body_function():
return 'off'
class Controller(object):
def index(self, request, index=None):
return index
def test_dispatch(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
actual = resource.dispatch(
resource.controller, 'index', action_args={'index': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_dispatch_unknown_controller_action(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
self.assertRaises(
AttributeError, resource.dispatch,
resource.controller, 'create', {})
def test_malformed_request_body_throws_bad_request(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", body="{mal:formed", method='POST',
headers={'Content-Type': "application/json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_throws_unsupported_media_type_error(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", body="{some:json}", method='POST',
headers={'Content-Type': "xxx"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_server_error(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = resource(request)
self.assertEqual(500, response.status_int)
def test_call_resource_class_bad_request(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = 'body'
def method(self):
pass
def best_match_content_type(self):
return 'best_match_content_type'
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(400, result.status_int)
def test_type_error(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "json"})
response = resource.dispatch(
request, action='index', action_args='test')
self.assertEqual(400, response.status_int)
def test_call_resource_class_internal_error(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = '{"Content-Type": "json"}'
def method(self):
pass
def best_match_content_type(self):
return 'application/json'
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(500, result.status_int)
class MiddlewareTest(base.BaseTestCase):
def test_process_response(self):
def application(environ, start_response):
response = 'Success'
return response
response = application('test', 'fake')
result = wsgi.Middleware(application).process_response(response)
self.assertEqual('Success', result)
class FaultTest(base.BaseTestCase):
def test_call_fault(self):
class MyException(object):
status_int = 415
explanation = 'test'
my_exceptions = MyException()
my_fault = wsgi.Fault(exception=my_exceptions)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = my_fault(request)
self.assertEqual(415, response.status_int)
class TestWSGIServerWithSSL(base.BaseTestCase):
"""WSGI server tests."""
def test_app_using_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ssl_combined_cert_and_key(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certandkey.pem'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ipv6_and_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="::1")
response = open_no_proxy('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
| apache-2.0 |
jasonchaffee/camel-labs | gateway/components/camel-pi4j/src/main/java/io/rhiot/component/pi4j/i2c/driver/BMP180Value.java | 1355 | /**
* Licensed to the Rhiot under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rhiot.component.pi4j.i2c.driver;
/**
*
*/
public class BMP180Value {
private int pressure;
private double temperature;
public int getPressure() {
return pressure;
}
public void setPressure(int pressure) {
this.pressure = pressure;
}
public double getTemperature() {
return temperature;
}
public void setTemperature(double temperature) {
this.temperature = temperature;
}
public String toString() {
return "[temperature:" + temperature + ",pressure:" + pressure + "]";
}
}
| apache-2.0 |
tohou/diesel | diesel_tests/tests/deserialization.rs | 490 | use schema::*;
use diesel::*;
use std::borrow::Cow;
#[derive(Queryable, PartialEq, Debug)]
struct CowUser<'a> {
id: i32,
name: Cow<'a, str>,
}
#[test]
fn generated_queryable_allows_lifetimes() {
use schema::users::dsl::*;
let connection = connection_with_sean_and_tess_in_users_table();
let expected_user = CowUser {
id: 1,
name: Cow::Owned("Sean".to_string()),
};
assert_eq!(Ok(expected_user), users.select((id, name)).first(&connection));
}
| apache-2.0 |
AerialX/rust-rt-minimal | src/test/run-pass/trait-bounds-recursion.rs | 790 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait I { fn i(&self) -> Self; }
trait A<T:I> : ::std::marker::MarkerTrait {
fn id(x:T) -> T { x.i() }
}
trait J<T> { fn j(&self) -> T; }
trait B<T:J<T>> : ::std::marker::MarkerTrait {
fn id(x:T) -> T { x.j() }
}
trait C : ::std::marker::MarkerTrait {
fn id<T:J<T>>(x:T) -> T { x.j() }
}
pub fn main() { }
| apache-2.0 |
snabbco/snabb | lib/luajit/testsuite/test/misc/catch_wrap.lua | 916 |
local cp = require("cpptest")
cp.wrapon()
do
local a, b = pcall(cp.catch, function() return "x" end)
assert(a == true and b == "x")
end
do
local a, b = pcall(function() cp.throw("foo") end)
assert(a == false and b == "foo")
end
local unwind
do
local a, b = pcall(cp.catch, function() cp.throw("foo") end)
unwind = a
assert((a == false and b == "foo") or (a == true and b == "catch ..."))
end
do
local st = cp.alloc(function() return cp.isalloc() end)
assert(st == true)
assert(cp.isalloc() == false)
end
do
local a, b = pcall(cp.alloc, function()
assert(cp.isalloc() == true)
return "foo", cp.throw
end)
assert(a == false and b == "foo")
assert(cp.isalloc() == false)
end
do
local a, b = pcall(cp.alloc, function()
assert(cp.isalloc() == true)
return "foo", error
end)
assert(a == false and b == "foo")
if unwind then assert(cp.isalloc() == false) end
end
| apache-2.0 |
BiznetGIO/horizon | openstack_dashboard/api/rest/urls.py | 1231 | # Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import urls
urlpatterns = []
# to register the URLs for your API endpoints, decorate the view class with
# @register below, and the import the endpoint module in the
# rest_api/__init__.py module
def register(view):
"""Register API views to respond to a regex pattern.
``url_regex`` on a wrapped view class is used as the regex pattern.
The view should be a standard Django class-based view implementing an
as_view() method. The url_regex attribute of the view should be a standard
Django URL regex pattern.
"""
p = urls.url(view.url_regex, view.as_view())
urlpatterns.append(p)
return view
| apache-2.0 |
spring-projects/spring-boot | buildSrc/src/main/java/org/springframework/boot/build/test/IntegrationTestPlugin.java | 3330 | /*
* Copyright 2012-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.test;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.testing.Test;
import org.gradle.language.base.plugins.LifecycleBasePlugin;
import org.gradle.plugins.ide.eclipse.EclipsePlugin;
import org.gradle.plugins.ide.eclipse.model.EclipseModel;
/**
* A {@link Plugin} to configure integration testing support in a {@link Project}.
*
* @author Andy Wilkinson
*/
public class IntegrationTestPlugin implements Plugin<Project> {
/**
* Name of the {@code intTest} task.
*/
public static String INT_TEST_TASK_NAME = "intTest";
/**
* Name of the {@code intTest} source set.
*/
public static String INT_TEST_SOURCE_SET_NAME = "intTest";
@Override
public void apply(Project project) {
project.getPlugins().withType(JavaPlugin.class, (javaPlugin) -> configureIntegrationTesting(project));
}
private void configureIntegrationTesting(Project project) {
SourceSet intTestSourceSet = createSourceSet(project);
Test intTest = createTestTask(project, intTestSourceSet);
project.getTasks().getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(intTest);
project.getPlugins().withType(EclipsePlugin.class, (eclipsePlugin) -> {
EclipseModel eclipse = project.getExtensions().getByType(EclipseModel.class);
eclipse.classpath((classpath) -> classpath.getPlusConfigurations().add(
project.getConfigurations().getByName(intTestSourceSet.getRuntimeClasspathConfigurationName())));
});
}
private SourceSet createSourceSet(Project project) {
SourceSetContainer sourceSets = project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets();
SourceSet intTestSourceSet = sourceSets.create(INT_TEST_SOURCE_SET_NAME);
SourceSet main = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME);
intTestSourceSet.setCompileClasspath(intTestSourceSet.getCompileClasspath().plus(main.getOutput()));
intTestSourceSet.setRuntimeClasspath(intTestSourceSet.getRuntimeClasspath().plus(main.getOutput()));
return intTestSourceSet;
}
private Test createTestTask(Project project, SourceSet intTestSourceSet) {
Test intTest = project.getTasks().create(INT_TEST_TASK_NAME, Test.class);
intTest.setGroup(LifecycleBasePlugin.VERIFICATION_GROUP);
intTest.setDescription("Runs integration tests.");
intTest.setTestClassesDirs(intTestSourceSet.getOutput().getClassesDirs());
intTest.setClasspath(intTestSourceSet.getRuntimeClasspath());
intTest.shouldRunAfter(JavaPlugin.TEST_TASK_NAME);
return intTest;
}
}
| apache-2.0 |
n3wscott/service-catalog | docsite/_includes/templates/glossary/README.md | 4208 | # Kubernetes Glossary
To write a glossary snippet, start with a copy of the template, [`/_data/glossary/_example.yml`](/_data/glossary/_example.yml). Make sure to provide (or omit) values for the following fields:
* (Required) `id`
* This field must match the name of the glossary file itself (without the `*.yml` extension). It is *not* intended to be displayed to users, and is only used programmatically.
* (Required) `name`
* The name of the term.
* (Optional) `full-link`
* The link to any specific long-form documentation, starting with `https://` if not within the website repo, and `/docs/...` if within the repo.
* (Required) `tags`
* Must be one of the tags listed in the [tags directory in the website repository](https://github.com/kubernetes/website/tree/master/_data/canonical-tags).
* (Required) `short description`
* Make sure to replace the instructional text in the template with your content.
* (Optional) `aka`
* These synonyms do not need to be glossary terms themselves (if they are deprecated), and can include spaces.
* (Optional) `related`
* These should be the `id`s (not the `names`) of related glossary terms.
* (Optional) `long description`
* If you do not provide a long description, remove the field -- that is, the complete key-value pair.
The `_example.yml` template also contains basic information about how to write your snippet. For additional guidance, continue reading this readme.
## Glossary snippet style guide
This style guide supplements the guidance provided in the glossary template. It's intended to help you think about what and how to write glossary definitions. For more general guidance on style, consult [the core docs style guide](https://kubernetes.io/docs/home/contribute/style-guide/).
### Minimum viable snippet:
Every snippet must include at least the short description. The long description is optional, but should be provided for terms that need additional clarification. For consistency with existing *Concept* definitions, *write your definitions as if the term is plural*.
**short-description** (Required): One line (or two short lines) that provides a minimum definition. Do not repeat the term. Prefer fragments. Model after tooltips. End with a period.
**long-description** (Optional): Longer additional text to appear after (in conjunction with) short description. Provide in cases where the short description is not sufficient for the intro paragraph to a topic. Write complete but concise sentences.
### Examples
```yaml
- name: Pod
- tags:
- Fundamental
- Workload
- API Object
- short-description: The smallest and simplest Kubernetes objects. Represent a set of running processes on your cluster.
- long-description: Pods most often run only a single container, and are managed by a Deployment.
```
```yaml
- name: Deployment
- tags:
- Fundamental
- Workload
- API Object
- short-description: Controllers that provide declarative updates for Pods and ReplicaSets.
- long-description: Deployments are responsible for creating and updating instances of an application.
```
### Thinking about definitions
* **Think of the short description as it would appear in a tooltip.** Is it sufficient to get the reader started? Is it short enough to be read inside a small UI element?
*Tip*: look at the API reference doc content (for example, https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/). Note, however, that this content should be used with care. The concept docs for Pod, for example, are clearer than the reference docs.
* **The long description should follow the short description to make a complete introduction to a topic.** (This is the content that appears at the top of the content, before any generated TOC.) Does it provide information that's not already clear from the short description? Does it provide information that readers should have a general sense of before they dive into the details of the topic it helps introduce?
*Tip:* the long description does not need to be long; it's intended to extend but not replace the short description. Look through current related docs for ideas. (The Deployment long description is taken from a tutorial, for example.)
| apache-2.0 |
ydai1124/gobblin-1 | gobblin-core-base/src/main/java/gobblin/writer/MultiWriterWatermarkTracker.java | 5266 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package gobblin.writer;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import com.google.common.base.Optional;
import gobblin.source.extractor.CheckpointableWatermark;
/**
* A helper class that tracks committed and uncommitted watermarks.
* Useful for implementing {@link WatermarkAwareWriter}s that wrap other {@link WatermarkAwareWriter}s.
*
* Note: The current implementation is not meant to be used in a high-throughput scenario
* (e.g. in the path of a write or a callback). See {@link LastWatermarkTracker}.
*/
public class MultiWriterWatermarkTracker implements WatermarkTracker {
private final ConcurrentHashMap<String, Set<CheckpointableWatermark>> candidateCommittables = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Set<CheckpointableWatermark>> unacknowledgedWatermarks = new ConcurrentHashMap<>();
/**
* Reset current state
*/
public synchronized void reset() {
candidateCommittables.clear();
unacknowledgedWatermarks.clear();
}
private synchronized Set<CheckpointableWatermark> getOrCreate(Map<String, Set<CheckpointableWatermark>> map, String key) {
if (map.containsKey(key)) {
return map.get(key);
} else {
Set<CheckpointableWatermark> set = new TreeSet<>();
map.put(key, set);
return set;
}
}
@Override
public void committedWatermarks(Map<String, CheckpointableWatermark> committedMap) {
committedWatermarks(committedMap.values());
}
public void committedWatermarks(Iterable<CheckpointableWatermark> committedStream) {
for (CheckpointableWatermark committed: committedStream) {
committedWatermark(committed);
}
}
@Override
public void committedWatermark(CheckpointableWatermark committed) {
getOrCreate(candidateCommittables, committed.getSource()).add(committed);
}
@Override
public void unacknowledgedWatermark(CheckpointableWatermark unacked) {
getOrCreate(unacknowledgedWatermarks, unacked.getSource()).add(unacked);
}
@Override
public void unacknowledgedWatermarks(Map<String, CheckpointableWatermark> unackedMap) {
for (CheckpointableWatermark unacked: unackedMap.values()) {
unacknowledgedWatermark(unacked);
}
}
@Override
public Map<String, CheckpointableWatermark> getAllCommitableWatermarks() {
Map<String, CheckpointableWatermark> commitables = new HashMap<>(candidateCommittables.size());
for (String source: candidateCommittables.keySet()) {
Optional<CheckpointableWatermark> commitable = getCommittableWatermark(source);
if (commitable.isPresent()) {
commitables.put(commitable.get().getSource(), commitable.get());
}
}
return commitables;
}
@Override
public Map<String, CheckpointableWatermark> getAllUnacknowledgedWatermarks() {
Map<String, CheckpointableWatermark> unackedMap = new HashMap<>(unacknowledgedWatermarks.size());
for (String source: unacknowledgedWatermarks.keySet()) {
Optional<CheckpointableWatermark> unacked = getUnacknowledgedWatermark(source);
if (unacked.isPresent()) {
unackedMap.put(unacked.get().getSource(), unacked.get());
}
}
return unackedMap;
}
public Optional<CheckpointableWatermark> getCommittableWatermark(String source) {
Set<CheckpointableWatermark> unacked = unacknowledgedWatermarks.get(source);
CheckpointableWatermark
minUnacknowledgedWatermark = (unacked == null || unacked.isEmpty())? null: unacked.iterator().next();
CheckpointableWatermark highestCommitableWatermark = null;
for (CheckpointableWatermark commitableWatermark : candidateCommittables.get(source)) {
if ((minUnacknowledgedWatermark == null) || (commitableWatermark.compareTo(minUnacknowledgedWatermark) < 0)) {
// commitableWatermark < minUnacknowledgedWatermark
highestCommitableWatermark = commitableWatermark;
}
}
if (highestCommitableWatermark == null) {
return Optional.absent();
} else {
return Optional.of(highestCommitableWatermark);
}
}
public Optional<CheckpointableWatermark> getUnacknowledgedWatermark(String source) {
Set<CheckpointableWatermark> unacked = unacknowledgedWatermarks.get(source);
if (unacked.isEmpty()) {
return Optional.absent();
} else {
return Optional.of(unacked.iterator().next());
}
}
}
| apache-2.0 |
lql5083psu/incubator-mnemonic | mnemonic-memory-services/mnemonic-java-vmem-service/src/main/java/org/apache/mnemonic/service/memory/internal/BufferBlockInfo.java | 2204 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mnemonic.service.memory.internal;
import java.nio.ByteBuffer;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Map;
public class BufferBlockInfo {
long bufferBlockBaseAddress = 0L;
int bufferBlockSize;
ByteBuffer bufferBlock = null;
BitSet bufferBlockChunksMap = null;
Map<Long, Integer> chunkSizeMap = new HashMap<>();
public ByteBuffer getBufferBlock() {
return bufferBlock;
}
public void setBufferBlock(ByteBuffer byteBufferBlock) {
this.bufferBlock = byteBufferBlock;
}
public BitSet getBufferBlockChunksMap() {
return bufferBlockChunksMap;
}
public void setBufferBlockChunksMap(BitSet chunksMap) {
this.bufferBlockChunksMap = chunksMap;
}
public long getBufferBlockBaseAddress() {
return bufferBlockBaseAddress;
}
public void setBufferBlockBaseAddress(long bufferBlockBaseAddress) {
this.bufferBlockBaseAddress = bufferBlockBaseAddress;
}
public int getBufferBlockSize() {
return bufferBlockSize;
}
public void setBufferBlockSize(int blockSize) {
this.bufferBlockSize = blockSize;
}
public Map<Long, Integer> getChunkSizeMap() {
return chunkSizeMap;
}
public void setChunkSizeMap(long chunkHandler, int chunkSize) {
chunkSizeMap.put(chunkHandler, chunkSize);
}
}
| apache-2.0 |
stoksey69/googleads-java-lib | modules/dfp_axis/src/main/java/com/google/api/ads/dfp/axis/v201505/ReconciliationImportErrorReason.java | 4014 | /**
* ReconciliationImportErrorReason.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter.
*/
package com.google.api.ads.dfp.axis.v201505;
public class ReconciliationImportErrorReason implements java.io.Serializable {
private java.lang.String _value_;
private static java.util.HashMap _table_ = new java.util.HashMap();
// Constructor
protected ReconciliationImportErrorReason(java.lang.String value) {
_value_ = value;
_table_.put(_value_,this);
}
public static final java.lang.String _MISSING_EDITABLE_COLUMN = "MISSING_EDITABLE_COLUMN";
public static final java.lang.String _INCONSISTENT_IMPORT_COLUMNS = "INCONSISTENT_IMPORT_COLUMNS";
public static final java.lang.String _COLUMN_CONVERSION_TYPE_ERROR = "COLUMN_CONVERSION_TYPE_ERROR";
public static final java.lang.String _INCONSISTENT_COLUMNS_COUNT = "INCONSISTENT_COLUMNS_COUNT";
public static final java.lang.String _IMPORT_INTERNAL_ERROR = "IMPORT_INTERNAL_ERROR";
public static final java.lang.String _UNKNOWN = "UNKNOWN";
public static final ReconciliationImportErrorReason MISSING_EDITABLE_COLUMN = new ReconciliationImportErrorReason(_MISSING_EDITABLE_COLUMN);
public static final ReconciliationImportErrorReason INCONSISTENT_IMPORT_COLUMNS = new ReconciliationImportErrorReason(_INCONSISTENT_IMPORT_COLUMNS);
public static final ReconciliationImportErrorReason COLUMN_CONVERSION_TYPE_ERROR = new ReconciliationImportErrorReason(_COLUMN_CONVERSION_TYPE_ERROR);
public static final ReconciliationImportErrorReason INCONSISTENT_COLUMNS_COUNT = new ReconciliationImportErrorReason(_INCONSISTENT_COLUMNS_COUNT);
public static final ReconciliationImportErrorReason IMPORT_INTERNAL_ERROR = new ReconciliationImportErrorReason(_IMPORT_INTERNAL_ERROR);
public static final ReconciliationImportErrorReason UNKNOWN = new ReconciliationImportErrorReason(_UNKNOWN);
public java.lang.String getValue() { return _value_;}
public static ReconciliationImportErrorReason fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException {
ReconciliationImportErrorReason enumeration = (ReconciliationImportErrorReason)
_table_.get(value);
if (enumeration==null) throw new java.lang.IllegalArgumentException();
return enumeration;
}
public static ReconciliationImportErrorReason fromString(java.lang.String value)
throws java.lang.IllegalArgumentException {
return fromValue(value);
}
public boolean equals(java.lang.Object obj) {return (obj == this);}
public int hashCode() { return toString().hashCode();}
public java.lang.String toString() { return _value_;}
public java.lang.Object readResolve() throws java.io.ObjectStreamException { return fromValue(_value_);}
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumSerializer(
_javaType, _xmlType);
}
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumDeserializer(
_javaType, _xmlType);
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(ReconciliationImportErrorReason.class);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201505", "ReconciliationImportError.Reason"));
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
}
| apache-2.0 |
jfrazelle/kubernetes | staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go | 5811 | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
func TestFinalization(t *testing.T) {
tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t)
require.NoError(t, err)
defer tearDown()
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
require.NoError(t, err)
ns := "not-the-default"
name := "foo123"
noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition)
instance := fixtures.NewNoxuInstance(ns, name)
instance.SetFinalizers([]string{"noxu.example.com/finalizer"})
createdNoxuInstance, err := instantiateCustomResource(t, instance, noxuResourceClient, noxuDefinition)
require.NoError(t, err)
uid := createdNoxuInstance.GetUID()
err = noxuResourceClient.Delete(name, &metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{
UID: &uid,
},
})
require.NoError(t, err)
// Deleting something with a finalizer sets deletion timestamp to a not-nil value but does not
// remove the object from the API server. Here we read it to confirm this.
gottenNoxuInstance, err := noxuResourceClient.Get(name, metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, gottenNoxuInstance.GetDeletionTimestamp())
// Trying to delete it again to confirm it will not remove the object because finalizer is still there.
err = noxuResourceClient.Delete(name, &metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{
UID: &uid,
},
})
require.NoError(t, err)
// Removing the finalizers to allow the following delete remove the object.
// This step will fail if previous delete wrongly removed the object. The
// object will be deleted as part of the finalizer update.
for {
gottenNoxuInstance.SetFinalizers(nil)
_, err = noxuResourceClient.Update(gottenNoxuInstance, metav1.UpdateOptions{})
if err == nil {
break
}
if !errors.IsConflict(err) {
require.NoError(t, err) // Fail on unexpected error
}
gottenNoxuInstance, err = noxuResourceClient.Get(name, metav1.GetOptions{})
require.NoError(t, err)
}
// Check that the object is actually gone.
_, err = noxuResourceClient.Get(name, metav1.GetOptions{})
require.Error(t, err)
require.True(t, errors.IsNotFound(err), "%#v", err)
}
func TestFinalizationAndDeletion(t *testing.T) {
tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t)
require.NoError(t, err)
defer tearDown()
// Create a CRD.
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
require.NoError(t, err)
// Create a CR with a finalizer.
ns := "not-the-default"
name := "foo123"
noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition)
instance := fixtures.NewNoxuInstance(ns, name)
instance.SetFinalizers([]string{"noxu.example.com/finalizer"})
createdNoxuInstance, err := instantiateCustomResource(t, instance, noxuResourceClient, noxuDefinition)
require.NoError(t, err)
// Delete a CR. Because there's a finalizer, it will not get deleted now.
uid := createdNoxuInstance.GetUID()
err = noxuResourceClient.Delete(name, &metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{
UID: &uid,
},
})
require.NoError(t, err)
// Check is the CR scheduled for deletion.
gottenNoxuInstance, err := noxuResourceClient.Get(name, metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, gottenNoxuInstance.GetDeletionTimestamp())
// Delete the CRD.
fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
// Check is CR still there after the CRD deletion.
gottenNoxuInstance, err = noxuResourceClient.Get(name, metav1.GetOptions{})
require.NoError(t, err)
// Update the CR to remove the finalizer.
for {
gottenNoxuInstance.SetFinalizers(nil)
_, err = noxuResourceClient.Update(gottenNoxuInstance, metav1.UpdateOptions{})
if err == nil {
break
}
if !errors.IsConflict(err) {
require.NoError(t, err) // Fail on unexpected error
}
gottenNoxuInstance, err = noxuResourceClient.Get(name, metav1.GetOptions{})
require.NoError(t, err)
}
// Verify the CR is gone.
// It should return the NonFound error.
_, err = noxuResourceClient.Get(name, metav1.GetOptions{})
if !errors.IsNotFound(err) {
t.Fatalf("unable to delete cr: %v", err)
}
err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{})
return errors.IsNotFound(err), err
})
if !errors.IsNotFound(err) {
t.Fatalf("unable to delete crd: %v", err)
}
}
| apache-2.0 |
stemlending/fabric | internal/peer/lifecycle/chaincode/mock/writer.go | 3051 | // Code generated by counterfeiter. DO NOT EDIT.
package mock
import (
"sync"
)
type Writer struct {
WriteFileStub func(string, string, []byte) error
writeFileMutex sync.RWMutex
writeFileArgsForCall []struct {
arg1 string
arg2 string
arg3 []byte
}
writeFileReturns struct {
result1 error
}
writeFileReturnsOnCall map[int]struct {
result1 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *Writer) WriteFile(arg1 string, arg2 string, arg3 []byte) error {
var arg3Copy []byte
if arg3 != nil {
arg3Copy = make([]byte, len(arg3))
copy(arg3Copy, arg3)
}
fake.writeFileMutex.Lock()
ret, specificReturn := fake.writeFileReturnsOnCall[len(fake.writeFileArgsForCall)]
fake.writeFileArgsForCall = append(fake.writeFileArgsForCall, struct {
arg1 string
arg2 string
arg3 []byte
}{arg1, arg2, arg3Copy})
fake.recordInvocation("WriteFile", []interface{}{arg1, arg2, arg3Copy})
fake.writeFileMutex.Unlock()
if fake.WriteFileStub != nil {
return fake.WriteFileStub(arg1, arg2, arg3)
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.writeFileReturns
return fakeReturns.result1
}
func (fake *Writer) WriteFileCallCount() int {
fake.writeFileMutex.RLock()
defer fake.writeFileMutex.RUnlock()
return len(fake.writeFileArgsForCall)
}
func (fake *Writer) WriteFileCalls(stub func(string, string, []byte) error) {
fake.writeFileMutex.Lock()
defer fake.writeFileMutex.Unlock()
fake.WriteFileStub = stub
}
func (fake *Writer) WriteFileArgsForCall(i int) (string, string, []byte) {
fake.writeFileMutex.RLock()
defer fake.writeFileMutex.RUnlock()
argsForCall := fake.writeFileArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *Writer) WriteFileReturns(result1 error) {
fake.writeFileMutex.Lock()
defer fake.writeFileMutex.Unlock()
fake.WriteFileStub = nil
fake.writeFileReturns = struct {
result1 error
}{result1}
}
func (fake *Writer) WriteFileReturnsOnCall(i int, result1 error) {
fake.writeFileMutex.Lock()
defer fake.writeFileMutex.Unlock()
fake.WriteFileStub = nil
if fake.writeFileReturnsOnCall == nil {
fake.writeFileReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.writeFileReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *Writer) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.writeFileMutex.RLock()
defer fake.writeFileMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *Writer) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
| apache-2.0 |
yyangpan/forestdb | src/hbtrie.h | 5885 | /* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 2010 Couchbase, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _JSAHN_HBTRIE_H
#define _JSAHN_HBTRIE_H
#include "common.h"
#include "btree.h"
#include "list.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HBTRIE_MAX_KEYLEN (FDB_MAX_KEYLEN_INTERNAL+16)
#define HBTRIE_HEADROOM (256)
typedef size_t hbtrie_func_readkey(void *handle, uint64_t offset, void *buf);
typedef int hbtrie_cmp_func(void *key1, void *key2, void* aux);
// a function pointer to a routine that returns a function pointer
typedef hbtrie_cmp_func *hbtrie_cmp_map(void *chunk, void *aux);
typedef enum {
HBTRIE_RESULT_SUCCESS,
HBTRIE_RESULT_UPDATE,
HBTRIE_RESULT_FAIL
} hbtrie_result;
#define HBTRIE_FLAG_COMPACT (0x01)
struct btree_blk_ops;
struct btree_kv_ops;
struct hbtrie {
uint8_t chunksize;
uint8_t valuelen;
uint8_t flag;
uint8_t leaf_height_limit;
uint32_t btree_nodesize;
bid_t root_bid;
void *btreeblk_handle;
void *doc_handle;
void *aux;
struct btree_blk_ops *btree_blk_ops;
struct btree_kv_ops *btree_kv_ops;
struct btree_kv_ops *btree_leaf_kv_ops;
hbtrie_func_readkey *readkey;
hbtrie_cmp_map *map;
btree_cmp_args cmp_args;
void *last_map_chunk;
};
struct hbtrie_iterator {
struct hbtrie trie;
struct list btreeit_list;
void *curkey;
size_t keylen;
uint8_t flags;
#define HBTRIE_ITERATOR_REV 0x01
#define HBTRIE_ITERATOR_FAILED 0x02
#define HBTRIE_ITERATOR_MOVED 0x04
};
#define HBTRIE_ITR_IS_REV(iterator) \
((iterator)->flags & HBTRIE_ITERATOR_REV)
#define HBTRIE_ITR_IS_FWD(iterator) \
(!((iterator)->flags & HBTRIE_ITERATOR_REV))
#define HBTRIE_ITR_SET_REV(iterator) \
((iterator)->flags |= HBTRIE_ITERATOR_REV)
#define HBTRIE_ITR_SET_FWD(iterator) \
((iterator)->flags &= ~HBTRIE_ITERATOR_REV)
#define HBTRIE_ITR_IS_FAILED(iterator) \
((iterator)->flags & HBTRIE_ITERATOR_FAILED)
#define HBTRIE_ITR_SET_FAILED(iterator) \
((iterator)->flags |= HBTRIE_ITERATOR_FAILED)
#define HBTRIE_ITR_CLR_FAILED(iterator) \
((iterator)->flags &= ~HBTRIE_ITERATOR_FAILED)
#define HBTRIE_ITR_IS_MOVED(iterator) \
((iterator)->flags & HBTRIE_ITERATOR_MOVED)
#define HBTRIE_ITR_SET_MOVED(iterator) \
((iterator)->flags |= HBTRIE_ITERATOR_MOVED)
int _hbtrie_reform_key(struct hbtrie *trie, void *rawkey, int rawkeylen, void *outkey);
void hbtrie_get_chunk(struct hbtrie *trie,
void *key,
int keylen,
int chunkno,
void *out);
void hbtrie_init(struct hbtrie *trie,
int chunksize,
int valuelen,
int btree_nodesize,
bid_t root_bid,
void *btreeblk_handle,
struct btree_blk_ops *btree_blk_ops,
void *doc_handle,
hbtrie_func_readkey *readkey);
void hbtrie_free(struct hbtrie *trie);
void hbtrie_set_flag(struct hbtrie *trie, uint8_t flag);
void hbtrie_set_leaf_height_limit(struct hbtrie *trie, uint8_t limit);
void hbtrie_set_leaf_cmp(struct hbtrie *trie, btree_cmp_func *cmp);
void hbtrie_set_map_function(struct hbtrie *trie,
hbtrie_cmp_map *map_func);
hbtrie_result hbtrie_iterator_init(struct hbtrie *trie,
struct hbtrie_iterator *it,
void *initial_key,
size_t keylen);
hbtrie_result hbtrie_iterator_free(struct hbtrie_iterator *it);
hbtrie_result hbtrie_last(struct hbtrie_iterator *it);
hbtrie_result hbtrie_prev(struct hbtrie_iterator *it,
void *key_buf,
size_t *keylen,
void *value_buf);
hbtrie_result hbtrie_next(struct hbtrie_iterator *it,
void *key_buf,
size_t *keylen,
void *value_buf);
hbtrie_result hbtrie_next_value_only(struct hbtrie_iterator *it,
void *value_buf);
hbtrie_result hbtrie_find(struct hbtrie *trie,
void *rawkey,
int rawkeylen,
void *valuebuf);
hbtrie_result hbtrie_find_offset(struct hbtrie *trie,
void *rawkey,
int rawkeylen,
void *valuebuf);
hbtrie_result hbtrie_find_partial(struct hbtrie *trie, void *rawkey,
int rawkeylen, void *valuebuf);
hbtrie_result hbtrie_remove(struct hbtrie *trie, void *rawkey, int rawkeylen);
hbtrie_result hbtrie_remove_partial(struct hbtrie *trie,
void *rawkey,
int rawkeylen);
hbtrie_result hbtrie_insert(struct hbtrie *trie,
void *rawkey,
int rawkeylen,
void *value,
void *oldvalue_out);
hbtrie_result hbtrie_insert_partial(struct hbtrie *trie,
void *rawkey, int rawkeylen,
void *value, void *oldvalue_out);
#ifdef __cplusplus
}
#endif
#endif
| apache-2.0 |
firzhan/wso2-ode | jbi-karaf-commands/src/main/java/org/apache/ode/karaf/commands/OdeCommandsBase.java | 3894 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.ode.karaf.commands;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import javax.management.*;
import org.apache.felix.karaf.shell.console.OsgiCommandSupport;
import org.apache.ode.bpel.pmapi.*;
import org.apache.ode.jbi.OdeContext;
public abstract class OdeCommandsBase extends OsgiCommandSupport {
protected static String COMPONENT_NAME = "org.apache.servicemix:Type=Component,Name=OdeBpelEngine,SubType=Management";
protected static final String LIST_ALL_PROCESSES = "listAllProcesses";
protected static final String LIST_ALL_INSTANCES = "listAllInstances";
protected static final String TERMINATE = "terminate";
protected MBeanServer getMBeanServer() {
OdeContext ode = OdeContext.getInstance();
if (ode != null) {
return ode.getContext().getMBeanServer();
}
return null;
}
/**
* Invokes an operation on the ODE MBean server
*
* @param <T>
* @param operationName
* @param args
* @param T
* @return
*/
@SuppressWarnings("unchecked")
protected <T> T invoke(final String operationName, final Object[] params,
final String[] signature, Class<?> T, long timeoutInSeconds)
throws Exception {
ExecutorService executor = Executors.newSingleThreadExecutor();
Callable<T> callable = new Callable<T>() {
public T call() throws Exception {
MBeanServer server = getMBeanServer();
if (server != null) {
return (T) server.invoke(new ObjectName(COMPONENT_NAME),
operationName, params, signature);
}
return null;
}
};
Future<T> future = executor.submit(callable);
executor.shutdown();
return future.get(timeoutInSeconds, TimeUnit.SECONDS);
}
protected List<TProcessInfo> getProcesses(long timeoutInSeconds)
throws Exception {
ProcessInfoListDocument result = invoke(LIST_ALL_PROCESSES, null, null,
ProcessInfoListDocument.class, timeoutInSeconds);
if (result != null) {
return result.getProcessInfoList().getProcessInfoList();
}
return null;
}
protected List<TInstanceInfo> getActiveInstances(long timeoutInSeconds)
throws Exception {
InstanceInfoListDocument instances = invoke(LIST_ALL_INSTANCES, null,
null, InstanceInfoListDocument.class, timeoutInSeconds);
if (instances != null) {
return instances.getInstanceInfoList().getInstanceInfoList();
}
return null;
}
protected void terminate(Long iid, long timeoutInSeconds) throws Exception {
invoke(TERMINATE, new Long[] { iid }, new String[] { Long.class
.getName() }, InstanceInfoDocument.class, timeoutInSeconds);
}
}
| apache-2.0 |
belliottsmith/cassandra | doc/modules/cassandra/examples/BASH/find_two_snapshots.sh | 103 | $ cd ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots && ls -l
| apache-2.0 |
wwzhe/dataworks-zeus | web/.externalToolBuilders/schedule/src/main/java/com/taobao/zeus/jobs/sub/MapReduceJob.java | 3072 | package com.taobao.zeus.jobs.sub;
import java.io.File;
import java.io.FileFilter;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ToolRunner;
import com.taobao.zeus.jobs.JobContext;
import com.taobao.zeus.jobs.sub.conf.ConfUtil;
import com.taobao.zeus.jobs.sub.main.MapReduceMain;
import com.taobao.zeus.jobs.sub.tool.DownloadHdfsFileJob;
import com.taobao.zeus.store.HierarchyProperties;
import com.taobao.zeus.util.RunningJobKeys;
public class MapReduceJob extends JavaJob{
public MapReduceJob(JobContext jobContext) {
super(jobContext);
String main=getJavaClass();
String args=getMainArguments();
String classpath=getClassPaths();
jobContext.getProperties().setProperty(RunningJobKeys.RUN_JAVA_MAIN_CLASS, "com.taobao.zeus.jobs.sub.main.MapReduceMain");
classpath=getMRClassPath(classpath);
jobContext.getProperties().setProperty(RunningJobKeys.RUN_CLASSPATH, classpath+
File.pathSeparator+getSourcePathFromClass(MapReduceMain.class));
jobContext.getProperties().setProperty(RunningJobKeys.RUN_JAVA_MAIN_ARGS, main+" "+args);
jobContext.getProperties().setProperty(RunningJobKeys.JOB_RUN_TYPE, "MapReduceJob");
}
//hadoop2依赖的JAR包,Apache需要的jar在${HADOOP_HOME}/libs/目录下,其他版本可能在${HADOOP_HOME}/lib
public String getMRClassPath(String classpath){
StringBuilder sb=new StringBuilder(classpath);
String hadoophome=System.getenv("HADOOP_HOME");
if(hadoophome!=null && !"".equals(hadoophome)){
File f1=new File(hadoophome+"/libs");
if(f1.exists()){
sb.append(File.pathSeparator);
sb.append(hadoophome);
sb.append("/libs/*");
}
File f2=new File(hadoophome+"/lib");
if(f2.exists()){
sb.append(File.pathSeparator);
sb.append(hadoophome);
sb.append("/lib/*");
}
}
return sb.toString();
}
@Override
public Integer run() throws Exception {
List<Map<String, String>> resources=jobContext.getResources();
if(resources!=null && !resources.isEmpty()){
StringBuffer sb=new StringBuffer();
for(Map<String, String> map:jobContext.getResources()){
if(map.get("uri")!=null){
String uri=map.get("uri");
if(uri.startsWith("hdfs://") && uri.endsWith(".jar")){
sb.append(uri.substring("hdfs://".length())).append(",");
}
}
}
jobContext.getProperties().setProperty("core-site.tmpjars", sb.toString().substring(0, sb.toString().length()-1));
}
return super.run();
}
public static void main(String[] args) {
JobContext context=JobContext.getTempJobContext(JobContext.SYSTEM_RUN);
Map<String, String> map=new HashMap<String, String>();
map.put("hadoop.ugi.name", "uginame");
HierarchyProperties properties=new HierarchyProperties(map);
context.setProperties(properties);
new MapReduceJob(context);
}
}
| apache-2.0 |
GBGamer/rust | src/test/ui/borrowck/two-phase-sneaky.rs | 1031 | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// cmpile-flags: -Z borrowck=mir -Z two-phase-borrows
// This is the first counter-example from Niko's blog post
// smallcultfollowing.com/babysteps/blog/2017/03/01/nested-method-calls-via-two-phase-borrowing/
// of a danger for code to crash if we just turned off the check for whether
// a mutable-borrow aliases another borrow.
fn main() {
let mut v: Vec<String> = vec![format!("Hello, ")];
v[0].push_str({
v.push(format!("foo"));
//~^ ERROR cannot borrow `v` as mutable more than once at a time [E0499]
"World!"
});
}
| apache-2.0 |
vega113/incubator-wave | wave/src/test/java/com/google/wave/api/data/converter/v22/EventDataConverterV22Test.java | 5547 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.google.wave.api.data.converter.v22;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.google.common.collect.Lists;
import com.google.wave.api.BlipData;
import com.google.wave.api.BlipThread;
import com.google.wave.api.impl.EventMessageBundle;
import junit.framework.TestCase;
import org.waveprotocol.wave.model.conversation.Blips;
import org.waveprotocol.wave.model.conversation.Conversation;
import org.waveprotocol.wave.model.conversation.ConversationBlip;
import org.waveprotocol.wave.model.conversation.ConversationView;
import org.waveprotocol.wave.model.conversation.WaveBasedConversationView;
import org.waveprotocol.wave.model.document.Document;
import org.waveprotocol.wave.model.document.operation.impl.DocInitializationBuilder;
import org.waveprotocol.wave.model.id.IdGenerator;
import org.waveprotocol.wave.model.id.WaveId;
import org.waveprotocol.wave.model.id.WaveletId;
import org.waveprotocol.wave.model.testing.BasicFactories;
import org.waveprotocol.wave.model.testing.FakeIdGenerator;
import org.waveprotocol.wave.model.wave.Wavelet;
import org.waveprotocol.wave.model.wave.opbased.ObservableWaveView;
import java.util.List;
import java.util.Map;
/**
* Test cases for {@link EventDataConverterV22}.
*
*/
public class EventDataConverterV22Test extends TestCase {
private static final WaveId WAVE_ID = WaveId.of("example.com", "123");
private static final WaveletId WAVELET_ID = WaveletId.of("example.com", "conv+root");
private Conversation conversation;
@Override
protected void setUp() throws Exception {
Blips.init();
conversation = makeConversation();
}
public void testToBlipData() throws Exception {
Wavelet wavelet = mock(Wavelet.class);
when(wavelet.getWaveId()).thenReturn(WAVE_ID);
when(wavelet.getId()).thenReturn(WAVELET_ID);
ConversationBlip blip = conversation.getRootThread().getFirstBlip();
String replyThreadId = blip.addReplyThread(3).getId();
EventDataConverterV22 converter = new EventDataConverterV22();
EventMessageBundle eventMessageBundle = new EventMessageBundle(null, null);
BlipData blipData = converter.toBlipData(blip, wavelet,
eventMessageBundle);
assertEquals(blip.getThread().getId(), blipData.getThreadId());
assertEquals(Lists.newArrayList(replyThreadId), blipData.getReplyThreadIds());
Map<String, BlipThread> threads = eventMessageBundle.getThreads();
assertEquals(1, threads.size());
assertEquals(1, threads.get(replyThreadId).getLocation());
}
public void testFindBlipParent() {
ConversationBlip first = conversation.getRootThread().getFirstBlip();
ConversationBlip second = conversation.getRootThread().appendBlip();
ConversationBlip reply = first.addReplyThread().appendBlip();
ConversationBlip secondReply = reply.getThread().appendBlip();
ConversationBlip inlineReply = first.addReplyThread(3).appendBlip();
EventDataConverterV22 converter = new EventDataConverterV22();
assertNull(converter.findBlipParent(first));
assertNull(converter.findBlipParent(second));
assertSame(first, converter.findBlipParent(reply));
assertSame(first, converter.findBlipParent(inlineReply));
assertSame(first, converter.findBlipParent(secondReply));
}
public void testFindBlipChildren() {
ConversationBlip first = conversation.getRootThread().getFirstBlip();
ConversationBlip second = conversation.getRootThread().appendBlip();
ConversationBlip reply = first.addReplyThread().appendBlip();
ConversationBlip secondReply = reply.getThread().appendBlip();
ConversationBlip inlineReply = first.addReplyThread(3).appendBlip();
EventDataConverterV22 converter = new EventDataConverterV22();
assertEquals(0, converter.findBlipChildren(second).size());
List<ConversationBlip> children = converter.findBlipChildren(first);
assertEquals(3, children.size());
assertEquals(inlineReply.getId(), children.get(0).getId());
assertEquals(reply.getId(), children.get(1).getId());
assertEquals(secondReply.getId(), children.get(2).getId());
}
private static Conversation makeConversation() {
IdGenerator idGenerator = FakeIdGenerator.create();
ObservableWaveView waveView = BasicFactories.fakeWaveViewBuilder().with(idGenerator).build();
ConversationView convView = WaveBasedConversationView.create(waveView, idGenerator);
Conversation conversation = convView.createRoot();
// Force empty document.
ConversationBlip blip = conversation.getRootThread().appendBlip(
new DocInitializationBuilder().build());
Document document = blip.getContent();
document.appendXml(Blips.INITIAL_BODY);
return conversation;
}
}
| apache-2.0 |
johtani/elasticsearch | src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java | 12249 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.threadpool;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.elasticsearch.threadpool.ThreadPool.Names;
import org.junit.Test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
import static org.hamcrest.Matchers.*;
/**
*/
public class UpdateThreadPoolSettingsTests extends ElasticsearchTestCase {
private ThreadPool.Info info(ThreadPool threadPool, String name) {
for (ThreadPool.Info info : threadPool.info()) {
if (info.getName().equals(name)) {
return info;
}
}
return null;
}
@Test
public void testCachedExecutorType() throws InterruptedException {
ThreadPool threadPool = new ThreadPool(
ImmutableSettings.settingsBuilder()
.put("threadpool.search.type", "cached")
.put("name","testCachedExecutorType").build(), null);
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
// Replace with different type
threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "same").build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("same"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(MoreExecutors.directExecutor().getClass()));
// Replace with different type again
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.keep_alive", "10m")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(1));
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
// Put old type back
threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "cached").build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
// Make sure keep alive value reused
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
// Change keep alive
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
// Make sure executor didn't change
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
// Set the same keep alive
threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
// Make sure keep alive value didn't change
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
// Make sure executor didn't change
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
terminate(threadPool);
}
@Test
public void testFixedExecutorType() throws InterruptedException {
ThreadPool threadPool = new ThreadPool(settingsBuilder()
.put("threadpool.search.type", "fixed")
.put("name","testCachedExecutorType").build(), null);
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
// Replace with different type
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.keep_alive", "10m")
.put("threadpool.search.min", "2")
.put("threadpool.search.size", "15")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
// Put old type back
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "fixed")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
// Make sure keep alive value is not used
assertThat(info(threadPool, Names.SEARCH).getKeepAlive(), nullValue());
// Make sure keep pool size value were reused
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(15));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(15));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
// Change size
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.updateSettings(settingsBuilder().put("threadpool.search.size", "10").build());
// Make sure size values changed
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(10));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(10));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(10));
// Make sure executor didn't change
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
// Change queue capacity
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.queue", "500")
.build());
terminate(threadPool);
}
@Test
public void testScalingExecutorType() throws InterruptedException {
ThreadPool threadPool = new ThreadPool(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.size", 10)
.put("name","testCachedExecutorType").build(), null);
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(1));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
// Change settings that doesn't require pool replacement
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.updateSettings(settingsBuilder()
.put("threadpool.search.type", "scaling")
.put("threadpool.search.keep_alive", "10m")
.put("threadpool.search.min", "2")
.put("threadpool.search.size", "15")
.build());
assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
// Make sure keep alive value changed
assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
terminate(threadPool);
}
@Test(timeout = 10000)
public void testShutdownDownNowDoesntBlock() throws Exception {
ThreadPool threadPool = new ThreadPool(ImmutableSettings.settingsBuilder()
.put("threadpool.search.type", "cached")
.put("name","testCachedExecutorType").build(), null);
final CountDownLatch latch = new CountDownLatch(1);
Executor oldExecutor = threadPool.executor(Names.SEARCH);
threadPool.executor(Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(20000);
} catch (InterruptedException ex) {
latch.countDown();
Thread.currentThread().interrupt();
}
}
});
threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "fixed").build());
assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
threadPool.shutdownNow(); // interrupt the thread
latch.await();
terminate(threadPool);
}
}
| apache-2.0 |
electrum/presto | core/trino-main/src/main/java/io/trino/operator/SplitOperatorInfo.java | 1565 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.operator;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.trino.connector.CatalogName;
import static java.util.Objects.requireNonNull;
public class SplitOperatorInfo
implements OperatorInfo
{
private final CatalogName catalogName;
// NOTE: this deserializes to a map instead of the expected type
private final Object splitInfo;
@JsonCreator
public SplitOperatorInfo(
@JsonProperty("catalogName") CatalogName catalogName,
@JsonProperty("splitInfo") Object splitInfo)
{
this.catalogName = requireNonNull(catalogName, "catalogName is null");
this.splitInfo = splitInfo;
}
@Override
public boolean isFinal()
{
return true;
}
@JsonProperty
public Object getSplitInfo()
{
return splitInfo;
}
@JsonProperty
public CatalogName getCatalogName()
{
return catalogName;
}
}
| apache-2.0 |
FreeRDP/FreeRDP | winpr/libwinpr/crypto/test/TestCryptoProtectMemory.c | 1594 |
#include <winpr/crt.h>
#include <winpr/print.h>
#include <winpr/crypto.h>
#include <winpr/ssl.h>
#include <winpr/wlog.h>
static const char* SECRET_PASSWORD_TEST = "MySecretPassword123!";
int TestCryptoProtectMemory(int argc, char* argv[])
{
UINT32 cbPlainText;
UINT32 cbCipherText;
const char* pPlainText;
BYTE* pCipherText;
WINPR_UNUSED(argc);
WINPR_UNUSED(argv);
pPlainText = SECRET_PASSWORD_TEST;
cbPlainText = strlen(pPlainText) + 1;
cbCipherText = cbPlainText +
(CRYPTPROTECTMEMORY_BLOCK_SIZE - (cbPlainText % CRYPTPROTECTMEMORY_BLOCK_SIZE));
printf("cbPlainText: %" PRIu32 " cbCipherText: %" PRIu32 "\n", cbPlainText, cbCipherText);
pCipherText = (BYTE*)malloc(cbCipherText);
if (!pCipherText)
{
printf("Unable to allocate memory\n");
return -1;
}
CopyMemory(pCipherText, pPlainText, cbPlainText);
ZeroMemory(&pCipherText[cbPlainText], (cbCipherText - cbPlainText));
winpr_InitializeSSL(WINPR_SSL_INIT_DEFAULT);
if (!CryptProtectMemory(pCipherText, cbCipherText, CRYPTPROTECTMEMORY_SAME_PROCESS))
{
printf("CryptProtectMemory failure\n");
return -1;
}
printf("PlainText: %s (cbPlainText = %" PRIu32 ", cbCipherText = %" PRIu32 ")\n", pPlainText,
cbPlainText, cbCipherText);
winpr_HexDump("crypto.test", WLOG_DEBUG, pCipherText, cbCipherText);
if (!CryptUnprotectMemory(pCipherText, cbCipherText, CRYPTPROTECTMEMORY_SAME_PROCESS))
{
printf("CryptUnprotectMemory failure\n");
return -1;
}
printf("Decrypted CipherText: %s\n", pCipherText);
SecureZeroMemory(pCipherText, cbCipherText);
free(pCipherText);
return 0;
}
| apache-2.0 |
Lilykos/grobid | grobid-core/src/main/java/org/grobid/core/utilities/GrobidPropertyKeys.java | 3430 | package org.grobid.core.utilities;
/**
* This class contains all the keys of the properties files.
*
* @author Damien Ridereau
*/
public interface GrobidPropertyKeys {
public static final String PROP_GROBID_IS_CONTEXT_SERVER = "grobid.is.context.server";
public static final String PROP_TMP_PATH = "grobid.temp.path";
// public static final String PROP_BIN_PATH = "grobid.bin.path";
public static final String PROP_NATIVE_LIB_PATH = "grobid.nativelibrary.path";
public static final String PROP_3RD_PARTY_PDF2XML = "grobid.3rdparty.pdf2xml.path";
public static final String PROP_3RD_PARTY_PDF2XML_MEMORY_LIMIT = "grobid.3rdparty.pdf2xml.memory.limit.mb";
public static final String PROP_GROBID_CRF_ENGINE = "grobid.crf.engine";
public static final String PROP_USE_LANG_ID = "grobid.use_language_id";
public static final String PROP_LANG_DETECTOR_FACTORY = "grobid.language_detector_factory";
public static final String PROP_CROSSREF_ID = "grobid.crossref_id";
public static final String PROP_CROSSREF_PW = "grobid.crossref_pw";
public static final String PROP_CROSSREF_HOST = "grobid.crossref_host";
public static final String PROP_CROSSREF_PORT = "grobid.crossref_port";
public static final String PROP_MYSQL_HOST = "grobid.mysql_host";
public static final String PROP_MYSQL_PORT = "grobid.mysql_port";
public static final String PROP_MYSQL_USERNAME = "grobid.mysql_username";
public static final String PROP_MYSQL_PW = "grobid.mysql_passwd";
public static final String PROP_MYSQL_DB_NAME = "grobid.mysql_db_name";
public static final String PROP_PROXY_HOST = "grobid.proxy_host";
public static final String PROP_PROXY_PORT = "grobid.proxy_port";
public static final String PROP_NB_THREADS = "grobid.nb_threads";
public static final String PROP_GROBID_MAX_CONNECTIONS = "org.grobid.max.connections";
public static final String PROP_GROBID_POOL_MAX_WAIT = "org.grobid.pool.max.wait";
/**
* Determines if properties like the firstnames, lastnames country codes and
* dictionaries are supposed to be read from $GROBID_HOME path or not
* (possible values (true|false) default is false)
*/
public static final String PROP_RESOURCE_INHOME = "grobid.resources.inHome";
/**
* The name of the env-entry located in the web.xml, via which the
* grobid-service.propeties path is set.
*/
public static final String PROP_GROBID_HOME = "org.grobid.home";
/**
* The name of the env-entry located in the web.xml, via which the
* grobid.propeties path is set.
*/
public static final String PROP_GROBID_PROPERTY = "org.grobid.property";
/**
* The name of the system property, via which the grobid home folder can be
* located.
*/
public static final String PROP_GROBID_SERVICE_PROPERTY = "org.grobid.property.service";
/**
* name of the property setting the admin password
*/
public static final String PROP_GROBID_SERVICE_ADMIN_PW = "org.grobid.service.admin.pw";
/**
* If set to true, parallel execution will be done, else a queuing of
* requests will be done.
*/
public static final String PROP_GROBID_SERVICE_IS_PARALLEL_EXEC = "org.grobid.service.is.parallel.execution";
/**
* The defined paths to create.
*/
public static final String[] PATHES_TO_CREATE = {PROP_TMP_PATH};
}
| apache-2.0 |
mbiarnes/drools-wb | drools-wb-screens/drools-wb-dtable-xls-editor/drools-wb-dtable-xls-editor-backend/src/main/java/org/drools/workbench/screens/dtablexls/backend/server/conversion/builders/GuidedDecisionTableActivationGroupBuilder.java | 2570 | /*
* Copyright 2012 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.workbench.screens.dtablexls.backend.server.conversion.builders;
import org.drools.decisiontable.parser.ActionType;
import org.drools.workbench.models.datamodel.rule.Attribute;
import org.drools.workbench.models.guided.dtable.shared.conversion.ConversionResult;
import org.drools.workbench.models.guided.dtable.shared.model.AttributeCol52;
import org.drools.workbench.models.guided.dtable.shared.model.DTCellValue52;
import org.drools.workbench.models.guided.dtable.shared.model.GuidedDecisionTable52;
/**
* Builder for ActivationGroup Attribute columns
*/
public class GuidedDecisionTableActivationGroupBuilder extends AbstractGuidedDecisionTableAttributeBuilder {
public GuidedDecisionTableActivationGroupBuilder( final int row,
final int column,
final ConversionResult conversionResult ) {
super( row,
column,
ActionType.Code.ACTIVATIONGROUP,
conversionResult );
}
@Override
public void populateDecisionTable( final GuidedDecisionTable52 dtable,
final int maxRowCount ) {
final AttributeCol52 column = new AttributeCol52();
column.setAttribute(Attribute.ACTIVATION_GROUP.getAttributeName());
dtable.getAttributeCols().add( column );
if ( this.values.size() < maxRowCount ) {
for ( int iRow = this.values.size(); iRow < maxRowCount; iRow++ ) {
this.values.add( new DTCellValue52( "" ) );
}
}
addColumnData( dtable,
column );
}
@Override
public void addCellValue( final int row,
final int column,
final String value ) {
final DTCellValue52 dcv = new DTCellValue52( value );
this.values.add( dcv );
}
}
| apache-2.0 |
pkocandr/indy | core/src/test/java/org/commonjava/indy/core/inject/ExpiringMemoryNotFoundCacheTest.java | 3446 | /**
* Copyright (C) 2011-2020 Red Hat, Inc. (https://github.com/Commonjava/indy)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.commonjava.indy.core.inject;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.junit.Assert.assertThat;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.commonjava.indy.conf.DefaultIndyConfiguration;
import org.commonjava.maven.galley.model.ConcreteResource;
import org.commonjava.maven.galley.model.Location;
import org.commonjava.maven.galley.model.SimpleLocation;
import org.junit.Test;
public class ExpiringMemoryNotFoundCacheTest
{
@Test
public void expireUsingConfiguredValue()
throws Exception
{
final DefaultIndyConfiguration config = new DefaultIndyConfiguration();
config.setNotFoundCacheTimeoutSeconds( 1 );
final ExpiringMemoryNotFoundCache nfc = new ExpiringMemoryNotFoundCache( config );
final ConcreteResource res = new ConcreteResource( new SimpleLocation( "test:uri" ), "/path/to/expired/object" );
nfc.addMissing( res );
assertThat( nfc.isMissing( res ), equalTo( true ) );
Thread.sleep( TimeUnit.SECONDS.toMillis( 2 ) );
assertThat( nfc.isMissing( res ), equalTo( false ) );
final Set<String> locMissing = nfc.getMissing( res.getLocation() );
assertThat( locMissing == null || locMissing.isEmpty(), equalTo( true ) );
final Map<Location, Set<String>> allMissing = nfc.getAllMissing();
assertThat( allMissing == null || allMissing.isEmpty(), equalTo( true ) );
}
@Test
public void expireUsingConfiguredValue_DirectCheckDoesntAffectAggregateChecks()
throws Exception
{
final DefaultIndyConfiguration config = new DefaultIndyConfiguration();
config.setNotFoundCacheTimeoutSeconds( 1 );
final ExpiringMemoryNotFoundCache nfc = new ExpiringMemoryNotFoundCache( config );
final ConcreteResource res = new ConcreteResource( new SimpleLocation( "test:uri" ), "/path/to/expired/object" );
nfc.addMissing( res );
assertThat( nfc.isMissing( res ), equalTo( true ) );
Thread.sleep( TimeUnit.SECONDS.toMillis( 2 ) );
Set<String> locMissing = nfc.getMissing( res.getLocation() );
System.out.println( locMissing );
assertThat( locMissing == null || locMissing.isEmpty(), equalTo( true ) );
Map<Location, Set<String>> allMissing = nfc.getAllMissing();
assertThat( allMissing == null || allMissing.isEmpty(), equalTo( true ) );
assertThat( nfc.isMissing( res ), equalTo( false ) );
locMissing = nfc.getMissing( res.getLocation() );
assertThat( locMissing == null || locMissing.isEmpty(), equalTo( true ) );
allMissing = nfc.getAllMissing();
assertThat( allMissing == null || allMissing.isEmpty(), equalTo( true ) );
}
}
| apache-2.0 |
dlnufox/ignite | modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java | 15947 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.distributed;
import java.util.Collection;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
import org.apache.ignite.internal.processors.cache.GridCacheFuture;
import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.util.GridLeanMap;
import org.apache.ignite.internal.util.future.GridCompoundIdentityFuture;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.typedef.CI1;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteUuid;
import org.jetbrains.annotations.Nullable;
/**
* Future verifying that all remote transactions related to transaction were prepared or committed.
*/
public class GridCacheTxRecoveryFuture extends GridCompoundIdentityFuture<Boolean> implements GridCacheFuture<Boolean> {
/** */
private static final long serialVersionUID = 0L;
/** Logger reference. */
private static final AtomicReference<IgniteLogger> logRef = new AtomicReference<>();
/** Logger. */
private static IgniteLogger log;
/** Trackable flag. */
private boolean trackable = true;
/** Context. */
private final GridCacheSharedContext<?, ?> cctx;
/** Future ID. */
private final IgniteUuid futId = IgniteUuid.randomUuid();
/** Transaction. */
private final IgniteInternalTx tx;
/** All involved nodes. */
private final Map<UUID, ClusterNode> nodes;
/** ID of failed node started transaction. */
private final UUID failedNodeId;
/** Transaction nodes mapping. */
private final Map<UUID, Collection<UUID>> txNodes;
/** */
private final boolean nearTxCheck;
/**
* @param cctx Context.
* @param tx Transaction.
* @param failedNodeId ID of failed node started transaction.
* @param txNodes Transaction mapping.
*/
@SuppressWarnings("ConstantConditions")
public GridCacheTxRecoveryFuture(GridCacheSharedContext<?, ?> cctx,
IgniteInternalTx tx,
UUID failedNodeId,
Map<UUID, Collection<UUID>> txNodes)
{
super(cctx.kernalContext(), CU.boolReducer());
this.cctx = cctx;
this.tx = tx;
this.txNodes = txNodes;
this.failedNodeId = failedNodeId;
if (log == null)
log = U.logger(cctx.kernalContext(), logRef, GridCacheTxRecoveryFuture.class);
nodes = new GridLeanMap<>();
UUID locNodeId = cctx.localNodeId();
for (Map.Entry<UUID, Collection<UUID>> e : tx.transactionNodes().entrySet()) {
if (!locNodeId.equals(e.getKey()) && !failedNodeId.equals(e.getKey()) && !nodes.containsKey(e.getKey())) {
ClusterNode node = cctx.discovery().node(e.getKey());
if (node != null)
nodes.put(node.id(), node);
else if (log.isDebugEnabled())
log.debug("Transaction node left (will ignore) " + e.getKey());
}
for (UUID nodeId : e.getValue()) {
if (!locNodeId.equals(nodeId) && !failedNodeId.equals(nodeId) && !nodes.containsKey(nodeId)) {
ClusterNode node = cctx.discovery().node(nodeId);
if (node != null)
nodes.put(node.id(), node);
else if (log.isDebugEnabled())
log.debug("Transaction node left (will ignore) " + e.getKey());
}
}
}
UUID nearNodeId = tx.eventNodeId();
nearTxCheck = !failedNodeId.equals(nearNodeId) && cctx.discovery().alive(nearNodeId);
}
/**
* Initializes future.
*/
@SuppressWarnings("ConstantConditions")
public void prepare() {
if (nearTxCheck) {
UUID nearNodeId = tx.eventNodeId();
if (cctx.localNodeId().equals(nearNodeId)) {
IgniteInternalFuture<Boolean> fut = cctx.tm().txCommitted(tx.nearXidVersion());
fut.listen(new CI1<IgniteInternalFuture<Boolean>>() {
@Override public void apply(IgniteInternalFuture<Boolean> fut) {
try {
onDone(fut.get());
}
catch (IgniteCheckedException e) {
onDone(e);
}
}
});
}
else {
MiniFuture fut = new MiniFuture(tx.eventNodeId());
add(fut);
GridCacheTxRecoveryRequest req = new GridCacheTxRecoveryRequest(
tx,
0,
true,
futureId(),
fut.futureId());
try {
cctx.io().send(nearNodeId, req, tx.ioPolicy());
}
catch (ClusterTopologyCheckedException ignore) {
fut.onNodeLeft();
}
catch (IgniteCheckedException e) {
fut.onError(e);
}
markInitialized();
}
return;
}
// First check transactions on local node.
int locTxNum = nodeTransactions(cctx.localNodeId());
if (locTxNum > 1) {
IgniteInternalFuture<Boolean> fut = cctx.tm().txsPreparedOrCommitted(tx.nearXidVersion(), locTxNum);
if (fut == null || fut.isDone()) {
boolean prepared;
try {
prepared = fut == null ? true : fut.get();
}
catch (IgniteCheckedException e) {
U.error(log, "Check prepared transaction future failed: " + e, e);
prepared = false;
}
if (!prepared) {
onDone(false);
markInitialized();
return;
}
}
else {
fut.listen(new CI1<IgniteInternalFuture<Boolean>>() {
@Override public void apply(IgniteInternalFuture<Boolean> fut) {
boolean prepared;
try {
prepared = fut.get();
}
catch (IgniteCheckedException e) {
U.error(log, "Check prepared transaction future failed: " + e, e);
prepared = false;
}
if (!prepared) {
onDone(false);
markInitialized();
}
else
proceedPrepare();
}
});
return;
}
}
proceedPrepare();
}
/**
* Process prepare after local check.
*/
private void proceedPrepare() {
for (Map.Entry<UUID, Collection<UUID>> entry : txNodes.entrySet()) {
UUID nodeId = entry.getKey();
// Skip left nodes and local node.
if (!nodes.containsKey(nodeId) && nodeId.equals(cctx.localNodeId()))
continue;
/*
* If primary node failed then send message to all backups, otherwise
* send message only to primary node.
*/
if (nodeId.equals(failedNodeId)) {
for (UUID id : entry.getValue()) {
// Skip backup node if it is local node or if it is also was mapped as primary.
if (txNodes.containsKey(id) || id.equals(cctx.localNodeId()))
continue;
MiniFuture fut = new MiniFuture(id);
add(fut);
GridCacheTxRecoveryRequest req = new GridCacheTxRecoveryRequest(tx,
nodeTransactions(id),
false,
futureId(),
fut.futureId());
try {
cctx.io().send(id, req, tx.ioPolicy());
}
catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
}
catch (IgniteCheckedException e) {
fut.onError(e);
break;
}
}
}
else {
MiniFuture fut = new MiniFuture(nodeId);
add(fut);
GridCacheTxRecoveryRequest req = new GridCacheTxRecoveryRequest(
tx,
nodeTransactions(nodeId),
false,
futureId(),
fut.futureId());
try {
cctx.io().send(nodeId, req, tx.ioPolicy());
}
catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
}
catch (IgniteCheckedException e) {
fut.onError(e);
break;
}
}
}
markInitialized();
}
/**
* @param nodeId Node ID.
* @return Number of transactions on node.
*/
private int nodeTransactions(UUID nodeId) {
int cnt = txNodes.containsKey(nodeId) ? 1 : 0; // +1 if node is primary.
for (Collection<UUID> backups : txNodes.values()) {
for (UUID backup : backups) {
if (backup.equals(nodeId)) {
cnt++; // +1 if node is backup.
break;
}
}
}
return cnt;
}
/**
* @param nodeId Node ID.
* @param res Response.
*/
public void onResult(UUID nodeId, GridCacheTxRecoveryResponse res) {
if (!isDone()) {
for (IgniteInternalFuture<Boolean> fut : pending()) {
if (isMini(fut)) {
MiniFuture f = (MiniFuture)fut;
if (f.futureId().equals(res.miniId())) {
assert f.nodeId().equals(nodeId);
f.onResult(res);
break;
}
}
}
}
}
/** {@inheritDoc} */
@Override public IgniteUuid futureId() {
return futId;
}
/** {@inheritDoc} */
@Override public GridCacheVersion version() {
return tx.xidVersion();
}
/** {@inheritDoc} */
@Override public Collection<? extends ClusterNode> nodes() {
return nodes.values();
}
/** {@inheritDoc} */
@Override public boolean onNodeLeft(UUID nodeId) {
for (IgniteInternalFuture<?> fut : futures())
if (isMini(fut)) {
MiniFuture f = (MiniFuture)fut;
if (f.nodeId().equals(nodeId))
f.onNodeLeft();
}
return true;
}
/** {@inheritDoc} */
@Override public boolean trackable() {
return trackable;
}
/** {@inheritDoc} */
@Override public void markNotTrackable() {
trackable = false;
}
/** {@inheritDoc} */
@Override public boolean onDone(@Nullable Boolean res, @Nullable Throwable err) {
if (super.onDone(res, err)) {
cctx.mvcc().removeFuture(this);
if (err == null) {
assert res != null;
cctx.tm().finishTxOnRecovery(tx, res);
}
else {
if (err instanceof ClusterTopologyCheckedException && nearTxCheck) {
if (log.isDebugEnabled())
log.debug("Failed to check transaction on near node, " +
"ignoring [err=" + err + ", tx=" + tx + ']');
}
else {
if (log.isDebugEnabled())
log.debug("Failed to check prepared transactions, " +
"invalidating transaction [err=" + err + ", tx=" + tx + ']');
cctx.tm().salvageTx(tx);
}
}
}
return false;
}
/**
* @param f Future.
* @return {@code True} if mini-future.
*/
private boolean isMini(IgniteInternalFuture<?> f) {
return f.getClass().equals(MiniFuture.class);
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(GridCacheTxRecoveryFuture.class, this, "super", super.toString());
}
/**
*
*/
private class MiniFuture extends GridFutureAdapter<Boolean> {
/** */
private static final long serialVersionUID = 0L;
/** Mini future ID. */
private final IgniteUuid futId = IgniteUuid.randomUuid();
/** Node ID. */
private UUID nodeId;
/**
* @param nodeId Node ID.
*/
private MiniFuture(UUID nodeId) {
this.nodeId = nodeId;
}
/**
* @return Node ID.
*/
private UUID nodeId() {
return nodeId;
}
/**
* @return Future ID.
*/
private IgniteUuid futureId() {
return futId;
}
/**
* @param e Error.
*/
private void onError(Throwable e) {
if (log.isDebugEnabled())
log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');
onDone(e);
}
/**
*/
private void onNodeLeft() {
if (log.isDebugEnabled())
log.debug("Transaction node left grid (will ignore) [fut=" + this + ']');
if (nearTxCheck) {
// Near and originating nodes left, need initiate tx check.
cctx.tm().commitIfPrepared(tx);
onDone(new ClusterTopologyCheckedException("Transaction node left grid (will ignore)."));
}
else
onDone(true);
}
/**
* @param res Result callback.
*/
private void onResult(GridCacheTxRecoveryResponse res) {
onDone(res.success());
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(MiniFuture.class, this, "done", isDone(), "err", error());
}
}
}
| apache-2.0 |
theotherjimmy/mbed | targets/TARGET_Silicon_Labs/TARGET_EFM32/TARGET_EFM32LG/device/efm32lg_rmu.h | 15264 | /**************************************************************************//**
* @file efm32lg_rmu.h
* @brief EFM32LG_RMU register and bit field definitions
* @version 5.0.0
******************************************************************************
* @section License
* <b>Copyright 2016 Silicon Laboratories, Inc. http://www.silabs.com</b>
******************************************************************************
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software.@n
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.@n
* 3. This notice may not be removed or altered from any source distribution.
*
* DISCLAIMER OF WARRANTY/LIMITATION OF REMEDIES: Silicon Laboratories, Inc.
* has no obligation to support this Software. Silicon Laboratories, Inc. is
* providing the Software "AS IS", with no express or implied warranties of any
* kind, including, but not limited to, any implied warranties of
* merchantability or fitness for any particular purpose or warranties against
* infringement of any proprietary rights of a third party.
*
* Silicon Laboratories, Inc. will not be liable for any consequential,
* incidental, or special damages, or any other relief, or for any claim by
* any third party, arising from your use of this Software.
*
*****************************************************************************/
/**************************************************************************//**
* @addtogroup Parts
* @{
******************************************************************************/
/**************************************************************************//**
* @defgroup EFM32LG_RMU
* @{
* @brief EFM32LG_RMU Register Declaration
*****************************************************************************/
typedef struct
{
__IOM uint32_t CTRL; /**< Control Register */
__IM uint32_t RSTCAUSE; /**< Reset Cause Register */
__OM uint32_t CMD; /**< Command Register */
} RMU_TypeDef; /** @} */
/**************************************************************************//**
* @defgroup EFM32LG_RMU_BitFields
* @{
*****************************************************************************/
/* Bit fields for RMU CTRL */
#define _RMU_CTRL_RESETVALUE 0x00000002UL /**< Default value for RMU_CTRL */
#define _RMU_CTRL_MASK 0x00000003UL /**< Mask for RMU_CTRL */
#define RMU_CTRL_LOCKUPRDIS (0x1UL << 0) /**< Lockup Reset Disable */
#define _RMU_CTRL_LOCKUPRDIS_SHIFT 0 /**< Shift value for RMU_LOCKUPRDIS */
#define _RMU_CTRL_LOCKUPRDIS_MASK 0x1UL /**< Bit mask for RMU_LOCKUPRDIS */
#define _RMU_CTRL_LOCKUPRDIS_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_CTRL */
#define RMU_CTRL_LOCKUPRDIS_DEFAULT (_RMU_CTRL_LOCKUPRDIS_DEFAULT << 0) /**< Shifted mode DEFAULT for RMU_CTRL */
#define RMU_CTRL_BURSTEN (0x1UL << 1) /**< Backup domain reset enable */
#define _RMU_CTRL_BURSTEN_SHIFT 1 /**< Shift value for RMU_BURSTEN */
#define _RMU_CTRL_BURSTEN_MASK 0x2UL /**< Bit mask for RMU_BURSTEN */
#define _RMU_CTRL_BURSTEN_DEFAULT 0x00000001UL /**< Mode DEFAULT for RMU_CTRL */
#define RMU_CTRL_BURSTEN_DEFAULT (_RMU_CTRL_BURSTEN_DEFAULT << 1) /**< Shifted mode DEFAULT for RMU_CTRL */
/* Bit fields for RMU RSTCAUSE */
#define _RMU_RSTCAUSE_RESETVALUE 0x00000000UL /**< Default value for RMU_RSTCAUSE */
#define _RMU_RSTCAUSE_MASK 0x0000FFFFUL /**< Mask for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_PORST (0x1UL << 0) /**< Power On Reset */
#define _RMU_RSTCAUSE_PORST_SHIFT 0 /**< Shift value for RMU_PORST */
#define _RMU_RSTCAUSE_PORST_MASK 0x1UL /**< Bit mask for RMU_PORST */
#define _RMU_RSTCAUSE_PORST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_PORST_DEFAULT (_RMU_RSTCAUSE_PORST_DEFAULT << 0) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODUNREGRST (0x1UL << 1) /**< Brown Out Detector Unregulated Domain Reset */
#define _RMU_RSTCAUSE_BODUNREGRST_SHIFT 1 /**< Shift value for RMU_BODUNREGRST */
#define _RMU_RSTCAUSE_BODUNREGRST_MASK 0x2UL /**< Bit mask for RMU_BODUNREGRST */
#define _RMU_RSTCAUSE_BODUNREGRST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODUNREGRST_DEFAULT (_RMU_RSTCAUSE_BODUNREGRST_DEFAULT << 1) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODREGRST (0x1UL << 2) /**< Brown Out Detector Regulated Domain Reset */
#define _RMU_RSTCAUSE_BODREGRST_SHIFT 2 /**< Shift value for RMU_BODREGRST */
#define _RMU_RSTCAUSE_BODREGRST_MASK 0x4UL /**< Bit mask for RMU_BODREGRST */
#define _RMU_RSTCAUSE_BODREGRST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODREGRST_DEFAULT (_RMU_RSTCAUSE_BODREGRST_DEFAULT << 2) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_EXTRST (0x1UL << 3) /**< External Pin Reset */
#define _RMU_RSTCAUSE_EXTRST_SHIFT 3 /**< Shift value for RMU_EXTRST */
#define _RMU_RSTCAUSE_EXTRST_MASK 0x8UL /**< Bit mask for RMU_EXTRST */
#define _RMU_RSTCAUSE_EXTRST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_EXTRST_DEFAULT (_RMU_RSTCAUSE_EXTRST_DEFAULT << 3) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_WDOGRST (0x1UL << 4) /**< Watchdog Reset */
#define _RMU_RSTCAUSE_WDOGRST_SHIFT 4 /**< Shift value for RMU_WDOGRST */
#define _RMU_RSTCAUSE_WDOGRST_MASK 0x10UL /**< Bit mask for RMU_WDOGRST */
#define _RMU_RSTCAUSE_WDOGRST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_WDOGRST_DEFAULT (_RMU_RSTCAUSE_WDOGRST_DEFAULT << 4) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_LOCKUPRST (0x1UL << 5) /**< LOCKUP Reset */
#define _RMU_RSTCAUSE_LOCKUPRST_SHIFT 5 /**< Shift value for RMU_LOCKUPRST */
#define _RMU_RSTCAUSE_LOCKUPRST_MASK 0x20UL /**< Bit mask for RMU_LOCKUPRST */
#define _RMU_RSTCAUSE_LOCKUPRST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_LOCKUPRST_DEFAULT (_RMU_RSTCAUSE_LOCKUPRST_DEFAULT << 5) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_SYSREQRST (0x1UL << 6) /**< System Request Reset */
#define _RMU_RSTCAUSE_SYSREQRST_SHIFT 6 /**< Shift value for RMU_SYSREQRST */
#define _RMU_RSTCAUSE_SYSREQRST_MASK 0x40UL /**< Bit mask for RMU_SYSREQRST */
#define _RMU_RSTCAUSE_SYSREQRST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_SYSREQRST_DEFAULT (_RMU_RSTCAUSE_SYSREQRST_DEFAULT << 6) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_EM4RST (0x1UL << 7) /**< EM4 Reset */
#define _RMU_RSTCAUSE_EM4RST_SHIFT 7 /**< Shift value for RMU_EM4RST */
#define _RMU_RSTCAUSE_EM4RST_MASK 0x80UL /**< Bit mask for RMU_EM4RST */
#define _RMU_RSTCAUSE_EM4RST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_EM4RST_DEFAULT (_RMU_RSTCAUSE_EM4RST_DEFAULT << 7) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_EM4WURST (0x1UL << 8) /**< EM4 Wake-up Reset */
#define _RMU_RSTCAUSE_EM4WURST_SHIFT 8 /**< Shift value for RMU_EM4WURST */
#define _RMU_RSTCAUSE_EM4WURST_MASK 0x100UL /**< Bit mask for RMU_EM4WURST */
#define _RMU_RSTCAUSE_EM4WURST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_EM4WURST_DEFAULT (_RMU_RSTCAUSE_EM4WURST_DEFAULT << 8) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODAVDD0 (0x1UL << 9) /**< AVDD0 Bod Reset */
#define _RMU_RSTCAUSE_BODAVDD0_SHIFT 9 /**< Shift value for RMU_BODAVDD0 */
#define _RMU_RSTCAUSE_BODAVDD0_MASK 0x200UL /**< Bit mask for RMU_BODAVDD0 */
#define _RMU_RSTCAUSE_BODAVDD0_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODAVDD0_DEFAULT (_RMU_RSTCAUSE_BODAVDD0_DEFAULT << 9) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODAVDD1 (0x1UL << 10) /**< AVDD1 Bod Reset */
#define _RMU_RSTCAUSE_BODAVDD1_SHIFT 10 /**< Shift value for RMU_BODAVDD1 */
#define _RMU_RSTCAUSE_BODAVDD1_MASK 0x400UL /**< Bit mask for RMU_BODAVDD1 */
#define _RMU_RSTCAUSE_BODAVDD1_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BODAVDD1_DEFAULT (_RMU_RSTCAUSE_BODAVDD1_DEFAULT << 10) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODVDDDREG (0x1UL << 11) /**< Backup Brown Out Detector, VDD_DREG */
#define _RMU_RSTCAUSE_BUBODVDDDREG_SHIFT 11 /**< Shift value for RMU_BUBODVDDDREG */
#define _RMU_RSTCAUSE_BUBODVDDDREG_MASK 0x800UL /**< Bit mask for RMU_BUBODVDDDREG */
#define _RMU_RSTCAUSE_BUBODVDDDREG_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODVDDDREG_DEFAULT (_RMU_RSTCAUSE_BUBODVDDDREG_DEFAULT << 11) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODBUVIN (0x1UL << 12) /**< Backup Brown Out Detector, BU_VIN */
#define _RMU_RSTCAUSE_BUBODBUVIN_SHIFT 12 /**< Shift value for RMU_BUBODBUVIN */
#define _RMU_RSTCAUSE_BUBODBUVIN_MASK 0x1000UL /**< Bit mask for RMU_BUBODBUVIN */
#define _RMU_RSTCAUSE_BUBODBUVIN_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODBUVIN_DEFAULT (_RMU_RSTCAUSE_BUBODBUVIN_DEFAULT << 12) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODUNREG (0x1UL << 13) /**< Backup Brown Out Detector Unregulated Domain */
#define _RMU_RSTCAUSE_BUBODUNREG_SHIFT 13 /**< Shift value for RMU_BUBODUNREG */
#define _RMU_RSTCAUSE_BUBODUNREG_MASK 0x2000UL /**< Bit mask for RMU_BUBODUNREG */
#define _RMU_RSTCAUSE_BUBODUNREG_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODUNREG_DEFAULT (_RMU_RSTCAUSE_BUBODUNREG_DEFAULT << 13) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODREG (0x1UL << 14) /**< Backup Brown Out Detector Regulated Domain */
#define _RMU_RSTCAUSE_BUBODREG_SHIFT 14 /**< Shift value for RMU_BUBODREG */
#define _RMU_RSTCAUSE_BUBODREG_MASK 0x4000UL /**< Bit mask for RMU_BUBODREG */
#define _RMU_RSTCAUSE_BUBODREG_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUBODREG_DEFAULT (_RMU_RSTCAUSE_BUBODREG_DEFAULT << 14) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUMODERST (0x1UL << 15) /**< Backup mode reset */
#define _RMU_RSTCAUSE_BUMODERST_SHIFT 15 /**< Shift value for RMU_BUMODERST */
#define _RMU_RSTCAUSE_BUMODERST_MASK 0x8000UL /**< Bit mask for RMU_BUMODERST */
#define _RMU_RSTCAUSE_BUMODERST_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_RSTCAUSE */
#define RMU_RSTCAUSE_BUMODERST_DEFAULT (_RMU_RSTCAUSE_BUMODERST_DEFAULT << 15) /**< Shifted mode DEFAULT for RMU_RSTCAUSE */
/* Bit fields for RMU CMD */
#define _RMU_CMD_RESETVALUE 0x00000000UL /**< Default value for RMU_CMD */
#define _RMU_CMD_MASK 0x00000001UL /**< Mask for RMU_CMD */
#define RMU_CMD_RCCLR (0x1UL << 0) /**< Reset Cause Clear */
#define _RMU_CMD_RCCLR_SHIFT 0 /**< Shift value for RMU_RCCLR */
#define _RMU_CMD_RCCLR_MASK 0x1UL /**< Bit mask for RMU_RCCLR */
#define _RMU_CMD_RCCLR_DEFAULT 0x00000000UL /**< Mode DEFAULT for RMU_CMD */
#define RMU_CMD_RCCLR_DEFAULT (_RMU_CMD_RCCLR_DEFAULT << 0) /**< Shifted mode DEFAULT for RMU_CMD */
/** @} End of group EFM32LG_RMU */
/** @} End of group Parts */
| apache-2.0 |
ghchinoy/tensorflow | tensorflow/core/kernels/reduction_ops_gpu_int.cu.cc | 2716 | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "tensorflow/core/kernels/reduction_gpu_kernels.cu.h"
namespace tensorflow {
namespace functor {
typedef Eigen::GpuDevice GPUDevice;
// Derive Index type. int (32-bit) or long (64-bit) depending on the
// compile-time configuration. "float" here is not relevant.
// TODO(zhifengc): Moves the definition to TTypes.
typedef TTypes<float>::Tensor::Index Index;
// T: the data type
// REDUCER: the reducer functor
// NUM_AXES: the number of axes to reduce
// IN_DIMS: the number of dimensions of the input tensor
#define DEFINE(T, REDUCER, IN_DIMS, NUM_AXES) \
template void ReduceFunctor<GPUDevice, REDUCER>::Reduce( \
OpKernelContext* ctx, TTypes<T, IN_DIMS - NUM_AXES>::Tensor out, \
TTypes<T, IN_DIMS>::ConstTensor in, \
const Eigen::array<Index, NUM_AXES>& reduction_axes, \
const REDUCER& reducer);
#define DEFINE_IDENTITY(T, REDUCER) \
template void ReduceFunctor<GPUDevice, REDUCER>::FillIdentity( \
const GPUDevice& d, TTypes<T>::Vec out, const REDUCER& reducer);
#define DEFINE_FOR_TYPE_AND_R(T, R) \
DEFINE(T, R, 1, 1); \
DEFINE(T, R, 2, 1); \
DEFINE(T, R, 3, 1); \
DEFINE(T, R, 3, 2); \
DEFINE_IDENTITY(T, R)
#define DEFINE_FOR_ALL_REDUCERS(T) \
DEFINE_FOR_TYPE_AND_R(T, Eigen::internal::SumReducer<T>); \
DEFINE_FOR_TYPE_AND_R(T, functor::MeanReducer<T>); \
DEFINE_FOR_TYPE_AND_R(T, functor::EuclideanNormReducer<T>); \
DEFINE_FOR_TYPE_AND_R(T, Eigen::internal::MinReducer<T>); \
DEFINE_FOR_TYPE_AND_R(T, Eigen::internal::MaxReducer<T>); \
DEFINE_FOR_TYPE_AND_R(T, Eigen::internal::ProdReducer<T>)
DEFINE_FOR_ALL_REDUCERS(int32);
DEFINE_FOR_ALL_REDUCERS(int64);
#undef DEFINE_FOR_ALL_REDUCERS
#undef DEFINE_FOR_TYPE_AND_R
#undef DEFINE
} // end namespace functor
} // end namespace tensorflow
#endif // GOOGLE_CUDA
| apache-2.0 |
BiaoLiu/osharp-2015.8.28 | src/OSharp.Autofac/Properties/AssemblyInfo.cs | 1374 | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// 有关程序集的一般信息由以下
// 控制。更改这些特性值可修改
// 与程序集关联的信息。
[assembly: AssemblyTitle("OSharp.Autofac")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("柳柳软件")]
[assembly: AssemblyProduct("OSharp.Autofac")]
[assembly: AssemblyCopyright("Copyright © 柳柳软件 2015")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
//将 ComVisible 设置为 false 将使此程序集中的类型
//对 COM 组件不可见。 如果需要从 COM 访问此程序集中的类型,
//请将此类型的 ComVisible 特性设置为 true。
[assembly: ComVisible(false)]
// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID
[assembly: Guid("50a2b6a0-8e08-4554-9595-6801f627e16b")]
// 程序集的版本信息由下列四个值组成:
//
// 主版本
// 次版本
// 生成号
// 修订号
//
//可以指定所有这些值,也可以使用“生成号”和“修订号”的默认值,
// 方法是按如下所示使用“*”: :
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("3.0.0.0")]
[assembly: AssemblyFileVersion("3.0.3.0")]
[assembly: AssemblyInformationalVersion("3.0.3")]
| apache-2.0 |
jeorme/OG-Platform | sesame/sesame-engine/src/main/java/com/opengamma/sesame/graph/FunctionIdProvider.java | 810 | /**
* Copyright (C) 2014 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.sesame.graph;
/**
* Provides IDs for function instances.
* <p>
* If two functions are logically equal they will be assigned the same ID. This is used by the caching mechanism
* to allow safe sharing of values calculated by different functions.
* <p>
* Two function instances are considered to be logically equal if their {@link FunctionModelNode} instances are
* equal.
*/
public interface FunctionIdProvider {
/**
* Returns the ID of a function instance.
*
* @param fn a function
* @return the function's ID
* @throws IllegalArgumentException if the function has no known ID
*/
FunctionId getFunctionId(Object fn);
}
| apache-2.0 |
indi60/hbase-pmc | target/hbase-0.94.1/hbase-0.94.1/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java | 3874 | /**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.mockito.Mockito.*;
@Category(MediumTests.class)
public class TestMetaScanner {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(1);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testMetaScanner() throws Exception {
LOG.info("Starting testMetaScanner");
final byte[] TABLENAME = Bytes.toBytes("testMetaScanner");
final byte[] FAMILY = Bytes.toBytes("family");
TEST_UTIL.createTable(TABLENAME, FAMILY);
Configuration conf = TEST_UTIL.getConfiguration();
HTable table = new HTable(conf, TABLENAME);
TEST_UTIL.createMultiRegions(conf, table, FAMILY,
new byte[][]{
HConstants.EMPTY_START_ROW,
Bytes.toBytes("region_a"),
Bytes.toBytes("region_b")});
// Make sure all the regions are deployed
TEST_UTIL.countRows(table);
MetaScanner.MetaScannerVisitor visitor =
mock(MetaScanner.MetaScannerVisitor.class);
doReturn(true).when(visitor).processRow((Result)anyObject());
// Scanning the entire table should give us three rows
MetaScanner.metaScan(conf, visitor, TABLENAME);
verify(visitor, times(3)).processRow((Result)anyObject());
// Scanning the table with a specified empty start row should also
// give us three META rows
reset(visitor);
doReturn(true).when(visitor).processRow((Result)anyObject());
MetaScanner.metaScan(conf, visitor, TABLENAME, HConstants.EMPTY_BYTE_ARRAY, 1000);
verify(visitor, times(3)).processRow((Result)anyObject());
// Scanning the table starting in the middle should give us two rows:
// region_a and region_b
reset(visitor);
doReturn(true).when(visitor).processRow((Result)anyObject());
MetaScanner.metaScan(conf, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1000);
verify(visitor, times(2)).processRow((Result)anyObject());
// Scanning with a limit of 1 should only give us one row
reset(visitor);
doReturn(true).when(visitor).processRow((Result)anyObject());
MetaScanner.metaScan(conf, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1);
verify(visitor, times(1)).processRow((Result)anyObject());
table.close();
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}
| apache-2.0 |
romartin/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-project-backend/src/main/java/org/kie/workbench/common/stunner/bpmn/project/backend/service/ProjectOpenReusableSubprocessServiceImpl.java | 3469 | /*
* Copyright 2021 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.bpmn.project.backend.service;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
import org.jboss.errai.bus.server.annotations.Service;
import org.kie.soup.commons.util.Sets;
import org.kie.workbench.common.services.refactoring.model.index.terms.valueterms.ValueIndexTerm;
import org.kie.workbench.common.services.refactoring.model.index.terms.valueterms.ValueResourceIndexTerm;
import org.kie.workbench.common.services.refactoring.service.RefactoringQueryService;
import org.kie.workbench.common.services.refactoring.service.ResourceType;
import org.kie.workbench.common.stunner.bpmn.project.backend.query.FindBpmnProcessIdsQuery;
import org.kie.workbench.common.stunner.bpmn.project.service.ProjectOpenReusableSubprocessService;
import org.uberfire.backend.vfs.Path;
@ApplicationScoped
@Service
public class ProjectOpenReusableSubprocessServiceImpl implements ProjectOpenReusableSubprocessService {
private final RefactoringQueryService queryService;
private final Supplier<ResourceType> resourceType;
private final Supplier<String> queryName;
private final Set<ValueIndexTerm> queryTerms;
// CDI proxy.
protected ProjectOpenReusableSubprocessServiceImpl() {
this(null);
}
@Inject
public ProjectOpenReusableSubprocessServiceImpl(final RefactoringQueryService queryService) {
this.queryService = queryService;
this.resourceType = () -> ResourceType.BPMN2;
this.queryName = () -> FindBpmnProcessIdsQuery.NAME;
this.queryTerms = new Sets.Builder<ValueIndexTerm>()
.add(new ValueResourceIndexTerm("*",
resourceType.get(),
ValueIndexTerm.TermSearchType.WILDCARD))
.build();
}
String getQueryName() {
return queryName.get();
}
Set<ValueIndexTerm> createQueryTerms() {
return queryTerms;
}
@Override
@SuppressWarnings("unchecked")
public List<String> openReusableSubprocess(String processId) {
List<String> answer = new ArrayList<>();
Map<String, Path> subprocesses = queryService
.query(getQueryName(), createQueryTerms())
.stream()
.map(row -> (Map<String, Path>) row.getValue())
.filter(row -> row.get(processId) != null)
.findFirst()
.orElse(null);
if (subprocesses == null) {
return answer;
}
answer.add(subprocesses.get(processId).getFileName());
answer.add(subprocesses.get(processId).toURI());
return answer;
}
}
| apache-2.0 |
mnagy/origin | assets/test/integration/e2e.js | 2284 | require('jasmine-beforeall');
var h = require('./helpers.js');
describe('', function() {
afterAll(function(){
h.afterAllTeardown();
});
// This UI test suite expects to be run as part of hack/test-end-to-end.sh
// It requires the example project be created with all of its resources in order to pass
describe('unauthenticated user', function() {
beforeEach(function() {
h.commonSetup();
});
afterEach(function() {
h.commonTeardown();
});
it('should be able to log in', function() {
browser.get('/');
// The login page doesn't use angular, so we have to use the underlying WebDriver instance
var driver = browser.driver;
driver.wait(function() {
return driver.isElementPresent(by.name("username"));
}, 3000);
expect(browser.driver.getCurrentUrl()).toMatch(/\/login/);
expect(browser.driver.getTitle()).toMatch(/Login -/);
h.login(true);
expect(browser.getTitle()).toEqual("OpenShift Web Console");
expect(element(by.css(".navbar-utility .username")).getText()).toEqual("e2e-user");
});
});
describe('authenticated e2e-user', function() {
beforeEach(function() {
h.commonSetup();
h.login();
});
afterEach(function() {
h.commonTeardown();
});
describe('with test project', function() {
it('should be able to list the test project', function() {
browser.get('/').then(function() {
h.waitForPresence('h2.project', 'test');
});
});
it('should have access to the test project', function() {
h.goToPage('/project/test');
h.waitForPresence('h1', 'test');
h.waitForPresence('.component .service', 'database');
h.waitForPresence('.component .service', 'frontend');
h.waitForPresence('.component .route', 'www.example.com');
h.waitForPresence('.pod-template-build a', '#1');
h.waitForPresence('.deployment-trigger', 'from image change');
// Check the pod count inside the donut chart for each rc.
h.waitForPresence('#service-database .pod-count', '1');
h.waitForPresence('#service-frontend .pod-count', '2');
// TODO: validate correlated images, builds, source
});
});
});
});
| apache-2.0 |
rheinwein/panamax-api | app/controllers/search_controller.rb | 1231 | class SearchController < ApplicationController
using ArrayItemWrapper
def index
q, limit, type = search_params.values_at(:q, :limit, :type)
respond_with perform_search(q, limit, type).merge(q: q)
end
private
def perform_search(q, limit, *types)
types = %w(template local_image remote_image) if types.compact.empty?
{}.tap do |results|
results[:templates] = wrapped_templates(q, limit) if types.include? 'template'
results[:local_images] = wrapped_local_images(q, limit) if types.include? 'local_image'
if types.include? 'remote_image'
results[:remote_images], results[:errors] = wrapped_remote_images(q, limit)
end
end
end
def search_params
# Coerce limit to an integer
params[:limit] = params[:limit].to_i if params[:limit].present?
params.permit(:q, :type, :limit)
end
def wrapped_templates(q, limit)
Template.search(q, limit).wrap(TemplateSerializer)
end
def wrapped_local_images(q, limit)
LocalImage.search(q, limit).wrap(LocalImageSearchResultSerializer)
end
def wrapped_remote_images(q, limit)
images, errors = Registry.search(q, limit)
return images.wrap(RemoteImageSearchResultSerializer), errors
end
end
| apache-2.0 |
gale320/staff | das/common/src/ProviderFactory.cpp | 4315 | /*
* Copyright 2010 Utkin Dmitry
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This file is part of the WSF Staff project.
* Please, visit http://code.google.com/p/staff for more information.
*/
#include <staff/utils/Log.h>
#include <staff/utils/SharedPtr.h>
#include <staff/utils/File.h>
#include <staff/utils/DynamicLibrary.h>
#include <staff/utils/PluginManager.h>
#include <staff/common/Runtime.h>
#include "ProviderFactory.h"
namespace staff
{
namespace das
{
class ProviderFactory::ProviderFactoryImpl
{
public:
typedef std::map<std::string, IProviderAllocator*> ProviderAllocatorMap;
public:
void Init()
{
const std::string sProvidersDir = Runtime::Inst().GetComponentHome("staff.das")
+ STAFF_PATH_SEPARATOR "providers";
StringList lsProviderDirs;
// find directories with providers
File(sProvidersDir).List(lsProviderDirs, "*", File::AttributeDirectory);
if (lsProviderDirs.size() == 0)
{
LogDebug() << "providers is not found";
}
for (StringList::const_iterator itDir = lsProviderDirs.begin();
itDir != lsProviderDirs.end(); ++itDir)
{
// finding libraries with providers
StringList lsProvidersLibs;
StringList lsProvidersNames;
const std::string& sProviderDir =
sProvidersDir + STAFF_PATH_SEPARATOR + *itDir + STAFF_PATH_SEPARATOR;
File(sProviderDir).List(lsProvidersLibs, "*" STAFF_LIBRARY_VEREXT, File::AttributeRegularFile);
for (StringList::const_iterator itProvider = lsProvidersLibs.begin();
itProvider != lsProvidersLibs.end(); ++itProvider)
{
const std::string& sProviderPluginPath = sProviderDir + *itProvider;
try
{
// loading provider
LogDebug() << "Loading DAS provider: " << sProviderPluginPath;
IProviderAllocator* pAllocator = m_tPluginManager.Load(sProviderPluginPath, true);
STAFF_ASSERT(pAllocator, "Can't get allocator for provider: " + *itProvider);
pAllocator->GetProvidersList(lsProvidersNames);
for (StringList::const_iterator itProviderName = lsProvidersNames.begin();
itProviderName != lsProvidersNames.end(); ++itProviderName)
{
LogDebug1() << "Setting DAS provider: " << *itProviderName;
m_mAllocators[*itProviderName] = pAllocator;
}
}
catch (const Exception& rEx)
{
LogWarning() << "Can't load provider: " << sProviderPluginPath << ": " << rEx.what();
continue;
}
}
}
}
public:
ProviderAllocatorMap m_mAllocators;
PluginManager<IProviderAllocator> m_tPluginManager;
};
ProviderFactory::ProviderFactory()
{
m_pImpl = new ProviderFactoryImpl;
try
{
m_pImpl->Init();
}
STAFF_CATCH_ALL
}
ProviderFactory::~ProviderFactory()
{
delete m_pImpl;
}
ProviderFactory& ProviderFactory::Inst()
{
static ProviderFactory tInst;
return tInst;
}
void ProviderFactory::GetProviders(StringList& rlsProviders)
{
rlsProviders.clear();
for (ProviderFactoryImpl::ProviderAllocatorMap::const_iterator itProvider = m_pImpl->m_mAllocators.begin();
itProvider != m_pImpl->m_mAllocators.end(); ++itProvider)
{
rlsProviders.push_back(itProvider->first);
}
}
PProvider ProviderFactory::Allocate(const std::string& sProvider)
{
ProviderFactoryImpl::ProviderAllocatorMap::iterator itProvider = m_pImpl->m_mAllocators.find(sProvider);
STAFF_ASSERT(itProvider != m_pImpl->m_mAllocators.end(), "Can't get allocator for " + sProvider);
return itProvider->second->Allocate(sProvider);
}
}
}
| apache-2.0 |
zero-rp/miniblink49 | third_party/WebKit/Source/core/layout/LayoutMenuList.cpp | 22827 | /*
* This file is part of the select element layoutObject in WebCore.
*
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
* Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Apple Inc. All rights reserved.
* 2009 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include "config.h"
#include "core/layout/LayoutMenuList.h"
#include "core/HTMLNames.h"
#include "core/css/CSSFontSelector.h"
#include "core/css/resolver/StyleResolver.h"
#include "core/dom/AXObjectCache.h"
#include "core/dom/NodeComputedStyle.h"
#include "core/frame/FrameHost.h"
#include "core/frame/FrameView.h"
#include "core/frame/LocalFrame.h"
#include "core/frame/Settings.h"
#include "core/html/HTMLOptGroupElement.h"
#include "core/html/HTMLOptionElement.h"
#include "core/html/HTMLSelectElement.h"
#include "core/layout/LayoutBR.h"
#include "core/layout/LayoutScrollbar.h"
#include "core/layout/LayoutTheme.h"
#include "core/layout/LayoutView.h"
#include "core/page/ChromeClient.h"
#include "platform/fonts/FontCache.h"
#include "platform/geometry/IntSize.h"
#include "platform/text/PlatformLocale.h"
#include <math.h>
namespace blink {
using namespace HTMLNames;
LayoutMenuList::LayoutMenuList(Element* element)
: LayoutFlexibleBox(element)
, m_buttonText(nullptr)
, m_innerBlock(nullptr)
, m_optionsChanged(true)
, m_isEmpty(false)
, m_hasUpdatedActiveOption(false)
, m_popupIsVisible(false)
, m_optionsWidth(0)
, m_lastActiveIndex(-1)
, m_indexToSelectOnCancel(-1)
{
ASSERT(isHTMLSelectElement(element));
}
LayoutMenuList::~LayoutMenuList()
{
ASSERT(!m_popup);
}
void LayoutMenuList::willBeDestroyed()
{
if (m_popup)
m_popup->disconnectClient();
m_popup = nullptr;
LayoutFlexibleBox::willBeDestroyed();
}
// FIXME: Instead of this hack we should add a ShadowRoot to <select> with no insertion point
// to prevent children from rendering.
bool LayoutMenuList::isChildAllowed(LayoutObject* object, const ComputedStyle&) const
{
return object->isAnonymous() && !object->isLayoutFullScreen();
}
void LayoutMenuList::createInnerBlock()
{
if (m_innerBlock) {
ASSERT(firstChild() == m_innerBlock);
ASSERT(!m_innerBlock->nextSibling());
return;
}
// Create an anonymous block.
ASSERT(!firstChild());
m_innerBlock = createAnonymousBlock();
m_buttonText = new LayoutText(&document(), StringImpl::empty());
// We need to set the text explicitly though it was specified in the
// constructor because LayoutText doesn't refer to the text
// specified in the constructor in a case of re-transforming.
m_buttonText->setStyle(mutableStyle());
m_innerBlock->addChild(m_buttonText);
adjustInnerStyle();
LayoutFlexibleBox::addChild(m_innerBlock);
}
void LayoutMenuList::adjustInnerStyle()
{
ComputedStyle& innerStyle = m_innerBlock->mutableStyleRef();
innerStyle.setFlexGrow(1);
innerStyle.setFlexShrink(1);
// min-width: 0; is needed for correct shrinking.
innerStyle.setMinWidth(Length(0, Fixed));
// Use margin:auto instead of align-items:center to get safe centering, i.e.
// when the content overflows, treat it the same as align-items: flex-start.
// But we only do that for the cases where html.css would otherwise use center.
if (style()->alignItemsPosition() == ItemPositionCenter) {
innerStyle.setMarginTop(Length());
innerStyle.setMarginBottom(Length());
innerStyle.setAlignSelfPosition(ItemPositionFlexStart);
}
innerStyle.setPaddingLeft(Length(LayoutTheme::theme().popupInternalPaddingLeft(styleRef()), Fixed));
innerStyle.setPaddingRight(Length(LayoutTheme::theme().popupInternalPaddingRight(styleRef()), Fixed));
innerStyle.setPaddingTop(Length(LayoutTheme::theme().popupInternalPaddingTop(styleRef()), Fixed));
innerStyle.setPaddingBottom(Length(LayoutTheme::theme().popupInternalPaddingBottom(styleRef()), Fixed));
if (m_optionStyle) {
if ((m_optionStyle->direction() != innerStyle.direction() || m_optionStyle->unicodeBidi() != innerStyle.unicodeBidi()))
m_innerBlock->setNeedsLayoutAndPrefWidthsRecalcAndFullPaintInvalidation(LayoutInvalidationReason::StyleChange);
innerStyle.setTextAlign(style()->isLeftToRightDirection() ? LEFT : RIGHT);
innerStyle.setDirection(m_optionStyle->direction());
innerStyle.setUnicodeBidi(m_optionStyle->unicodeBidi());
}
}
inline HTMLSelectElement* LayoutMenuList::selectElement() const
{
return toHTMLSelectElement(node());
}
void LayoutMenuList::addChild(LayoutObject* newChild, LayoutObject* beforeChild)
{
m_innerBlock->addChild(newChild, beforeChild);
ASSERT(m_innerBlock == firstChild());
if (AXObjectCache* cache = document().existingAXObjectCache())
cache->childrenChanged(this);
}
void LayoutMenuList::removeChild(LayoutObject* oldChild)
{
if (oldChild == m_innerBlock || !m_innerBlock) {
LayoutFlexibleBox::removeChild(oldChild);
m_innerBlock = nullptr;
} else {
m_innerBlock->removeChild(oldChild);
}
}
void LayoutMenuList::styleDidChange(StyleDifference diff, const ComputedStyle* oldStyle)
{
LayoutBlock::styleDidChange(diff, oldStyle);
if (!m_innerBlock)
createInnerBlock();
m_buttonText->setStyle(mutableStyle());
adjustInnerStyle();
bool fontChanged = !oldStyle || oldStyle->font() != style()->font();
if (fontChanged)
updateOptionsWidth();
}
void LayoutMenuList::updateOptionsWidth()
{
float maxOptionWidth = 0;
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
int size = listItems.size();
for (int i = 0; i < size; ++i) {
HTMLElement* element = listItems[i];
if (!isHTMLOptionElement(*element))
continue;
String text = toHTMLOptionElement(element)->textIndentedToRespectGroupLabel();
applyTextTransform(style(), text, ' ');
if (LayoutTheme::theme().popupOptionSupportsTextIndent()) {
// Add in the option's text indent. We can't calculate percentage values for now.
float optionWidth = 0;
if (const ComputedStyle* optionStyle = element->computedStyle())
optionWidth += minimumValueForLength(optionStyle->textIndent(), 0);
if (!text.isEmpty())
optionWidth += style()->font().width(text);
maxOptionWidth = std::max(maxOptionWidth, optionWidth);
} else if (!text.isEmpty()) {
maxOptionWidth = std::max(maxOptionWidth, style()->font().width(text));
}
}
int width = static_cast<int>(ceilf(maxOptionWidth));
if (m_optionsWidth == width)
return;
m_optionsWidth = width;
if (parent())
setNeedsLayoutAndPrefWidthsRecalcAndFullPaintInvalidation(LayoutInvalidationReason::MenuWidthChanged);
}
void LayoutMenuList::updateFromElement()
{
if (m_optionsChanged) {
updateOptionsWidth();
m_optionsChanged = false;
}
if (m_popupIsVisible)
m_popup->updateFromElement();
updateText();
}
void LayoutMenuList::setIndexToSelectOnCancel(int listIndex)
{
m_indexToSelectOnCancel = listIndex;
updateText();
}
void LayoutMenuList::updateText()
{
if (m_indexToSelectOnCancel >= 0)
setTextFromOption(selectElement()->listToOptionIndex(m_indexToSelectOnCancel));
else if (selectElement()->suggestedIndex() >= 0)
setTextFromOption(selectElement()->suggestedIndex());
else
setTextFromOption(selectElement()->selectedIndex());
}
void LayoutMenuList::setTextFromOption(int optionIndex)
{
HTMLSelectElement* select = selectElement();
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = select->listItems();
const int size = listItems.size();
String text = emptyString();
m_optionStyle.clear();
if (multiple()) {
unsigned selectedCount = 0;
int firstSelectedIndex = -1;
for (int i = 0; i < size; ++i) {
Element* element = listItems[i];
if (!isHTMLOptionElement(*element))
continue;
if (toHTMLOptionElement(element)->selected()) {
if (++selectedCount == 1)
firstSelectedIndex = i;
}
}
if (selectedCount == 1) {
ASSERT(0 <= firstSelectedIndex);
ASSERT(firstSelectedIndex < size);
HTMLOptionElement* selectedOptionElement = toHTMLOptionElement(listItems[firstSelectedIndex]);
ASSERT(selectedOptionElement->selected());
text = selectedOptionElement->textIndentedToRespectGroupLabel();
m_optionStyle = selectedOptionElement->mutableComputedStyle();
} else {
Locale& locale = select->locale();
String localizedNumberString = locale.convertToLocalizedNumber(String::number(selectedCount));
text = locale.queryString(WebLocalizedString::SelectMenuListText, localizedNumberString);
ASSERT(!m_optionStyle);
}
} else {
const int i = select->optionToListIndex(optionIndex);
if (i >= 0 && i < size) {
Element* element = listItems[i];
if (isHTMLOptionElement(*element)) {
text = toHTMLOptionElement(element)->textIndentedToRespectGroupLabel();
m_optionStyle = element->mutableComputedStyle();
}
}
}
setText(text.stripWhiteSpace());
didUpdateActiveOption(optionIndex);
}
void LayoutMenuList::setText(const String& s)
{
if (s.isEmpty()) {
// FIXME: This is a hack. We need the select to have the same baseline positioning as
// any surrounding text. Wihtout any content, we align the bottom of the select to the bottom
// of the text. With content (In this case the faked " ") we correctly align the middle of
// the select to the middle of the text. It should be possible to remove this, just set
// s.impl() into the text and have things align correctly ... crbug.com/485982
m_isEmpty = true;
m_buttonText->setText(StringImpl::create(" ", 1), true);
} else {
m_isEmpty = false;
m_buttonText->setText(s.impl(), true);
}
adjustInnerStyle();
}
String LayoutMenuList::text() const
{
return m_buttonText && !m_isEmpty ? m_buttonText->text() : String();
}
LayoutRect LayoutMenuList::controlClipRect(const LayoutPoint& additionalOffset) const
{
// Clip to the intersection of the content box and the content box for the inner box
// This will leave room for the arrows which sit in the inner box padding,
// and if the inner box ever spills out of the outer box, that will get clipped too.
LayoutRect outerBox = contentBoxRect();
outerBox.moveBy(additionalOffset);
LayoutRect innerBox(additionalOffset + m_innerBlock->location()
+ LayoutSize(m_innerBlock->paddingLeft(), m_innerBlock->paddingTop())
, m_innerBlock->contentSize());
return intersection(outerBox, innerBox);
}
void LayoutMenuList::computeIntrinsicLogicalWidths(LayoutUnit& minLogicalWidth, LayoutUnit& maxLogicalWidth) const
{
maxLogicalWidth = std::max(m_optionsWidth, LayoutTheme::theme().minimumMenuListSize(styleRef())) + m_innerBlock->paddingLeft() + m_innerBlock->paddingRight();
if (!style()->width().hasPercent())
minLogicalWidth = maxLogicalWidth;
}
void LayoutMenuList::showPopup()
{
if (m_popupIsVisible)
return;
if (document().frameHost()->chromeClient().hasOpenedPopup())
return;
if (!m_popup)
m_popup = document().frameHost()->chromeClient().openPopupMenu(*document().frame(), this);
m_popupIsVisible = true;
FloatQuad quad(localToAbsoluteQuad(FloatQuad(borderBoundingBox())));
IntSize size = pixelSnappedIntRect(frameRect()).size();
HTMLSelectElement* select = selectElement();
m_popup->show(quad, size, select->optionToListIndex(select->selectedIndex()));
if (AXObjectCache* cache = document().existingAXObjectCache())
cache->didShowMenuListPopup(this);
}
void LayoutMenuList::hidePopup()
{
if (m_popup)
m_popup->hide();
}
void LayoutMenuList::valueChanged(unsigned listIndex, bool fireOnChange)
{
// Check to ensure a page navigation has not occurred while
// the popup was up.
Document& doc = toElement(node())->document();
if (&doc != doc.frame()->document())
return;
setIndexToSelectOnCancel(-1);
HTMLSelectElement* select = selectElement();
select->optionSelectedByUser(select->listToOptionIndex(listIndex), fireOnChange);
}
void LayoutMenuList::listBoxSelectItem(int listIndex, bool allowMultiplySelections, bool shift, bool fireOnChangeNow)
{
selectElement()->listBoxSelectItem(listIndex, allowMultiplySelections, shift, fireOnChangeNow);
}
bool LayoutMenuList::multiple() const
{
return selectElement()->multiple();
}
IntRect LayoutMenuList::elementRectRelativeToViewport() const
{
// We don't use absoluteBoundingBoxRect() because it can return an IntRect
// larger the actual size by 1px.
return selectElement()->document().view()->contentsToViewport(roundedIntRect(absoluteBoundingBoxFloatRect()));
}
Element& LayoutMenuList::ownerElement() const
{
return *selectElement();
}
const ComputedStyle* LayoutMenuList::computedStyleForItem(Element& element) const
{
return element.computedStyle() ? element.computedStyle() : element.ensureComputedStyle();
}
void LayoutMenuList::didSetSelectedIndex(int listIndex)
{
didUpdateActiveOption(selectElement()->listToOptionIndex(listIndex));
}
void LayoutMenuList::didUpdateActiveOption(int optionIndex)
{
if (!document().existingAXObjectCache())
return;
if (m_lastActiveIndex == optionIndex)
return;
m_lastActiveIndex = optionIndex;
HTMLSelectElement* select = selectElement();
int listIndex = select->optionToListIndex(optionIndex);
if (listIndex < 0 || listIndex >= static_cast<int>(select->listItems().size()))
return;
// We skip sending accessiblity notifications for the very first option, otherwise
// we get extra focus and select events that are undesired.
if (!m_hasUpdatedActiveOption) {
m_hasUpdatedActiveOption = true;
return;
}
document().existingAXObjectCache()->handleUpdateActiveMenuOption(this, optionIndex);
}
String LayoutMenuList::itemText(unsigned listIndex) const
{
HTMLSelectElement* select = selectElement();
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = select->listItems();
if (listIndex >= listItems.size())
return String();
String itemString;
Element* element = listItems[listIndex];
if (isHTMLOptGroupElement(*element))
itemString = toHTMLOptGroupElement(*element).groupLabelText();
else if (isHTMLOptionElement(*element))
itemString = toHTMLOptionElement(*element).textIndentedToRespectGroupLabel();
applyTextTransform(style(), itemString, ' ');
return itemString;
}
String LayoutMenuList::itemAccessibilityText(unsigned listIndex) const
{
// Allow the accessible name be changed if necessary.
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
if (listIndex >= listItems.size())
return String();
return listItems[listIndex]->fastGetAttribute(aria_labelAttr);
}
String LayoutMenuList::itemToolTip(unsigned listIndex) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
if (listIndex >= listItems.size())
return String();
return listItems[listIndex]->title();
}
bool LayoutMenuList::itemIsEnabled(unsigned listIndex) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
if (listIndex >= listItems.size())
return false;
HTMLElement* element = listItems[listIndex];
if (!isHTMLOptionElement(*element))
return false;
bool groupEnabled = true;
if (Element* parentElement = element->parentElement()) {
if (isHTMLOptGroupElement(*parentElement))
groupEnabled = !parentElement->isDisabledFormControl();
}
if (!groupEnabled)
return false;
return !element->isDisabledFormControl();
}
PopupMenuStyle LayoutMenuList::itemStyle(unsigned listIndex) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
if (listIndex >= listItems.size()) {
// If we are making an out of bounds access, then we want to use the style
// of a different option element (index 0). However, if there isn't an option element
// before at index 0, we fall back to the menu's style.
if (!listIndex)
return menuStyle();
// Try to retrieve the style of an option element we know exists (index 0).
listIndex = 0;
}
HTMLElement* element = listItems[listIndex];
Color itemBackgroundColor;
bool itemHasCustomBackgroundColor;
getItemBackgroundColor(listIndex, itemBackgroundColor, itemHasCustomBackgroundColor);
const ComputedStyle* style = element->computedStyle() ? element->computedStyle() : element->ensureComputedStyle();
return style ? PopupMenuStyle(resolveColor(*style, CSSPropertyColor), itemBackgroundColor, style->font(), style->visibility() == VISIBLE,
isHTMLOptionElement(*element) ? toHTMLOptionElement(*element).isDisplayNone() : style->display() == NONE,
style->textIndent(), style->direction(), isOverride(style->unicodeBidi()),
itemHasCustomBackgroundColor ? PopupMenuStyle::CustomBackgroundColor : PopupMenuStyle::DefaultBackgroundColor) : menuStyle();
}
void LayoutMenuList::getItemBackgroundColor(unsigned listIndex, Color& itemBackgroundColor, bool& itemHasCustomBackgroundColor) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
if (listIndex >= listItems.size()) {
itemBackgroundColor = resolveColor(CSSPropertyBackgroundColor);
itemHasCustomBackgroundColor = false;
return;
}
HTMLElement* element = listItems[listIndex];
Color backgroundColor;
if (const ComputedStyle* style = element->computedStyle())
backgroundColor = resolveColor(*style, CSSPropertyBackgroundColor);
itemHasCustomBackgroundColor = backgroundColor.alpha();
// If the item has an opaque background color, return that.
if (!backgroundColor.hasAlpha()) {
itemBackgroundColor = backgroundColor;
return;
}
// Otherwise, the item's background is overlayed on top of the menu background.
backgroundColor = resolveColor(CSSPropertyBackgroundColor).blend(backgroundColor);
if (!backgroundColor.hasAlpha()) {
itemBackgroundColor = backgroundColor;
return;
}
// If the menu background is not opaque, then add an opaque white background behind.
itemBackgroundColor = Color(Color::white).blend(backgroundColor);
}
PopupMenuStyle LayoutMenuList::menuStyle() const
{
const LayoutObject* o = m_innerBlock ? m_innerBlock : this;
const ComputedStyle& style = o->styleRef();
return PopupMenuStyle(o->resolveColor(CSSPropertyColor), o->resolveColor(CSSPropertyBackgroundColor), style.font(), style.visibility() == VISIBLE,
style.display() == NONE, style.textIndent(), style.direction(), isOverride(style.unicodeBidi()));
}
LayoutUnit LayoutMenuList::clientPaddingLeft() const
{
return paddingLeft() + m_innerBlock->paddingLeft();
}
const int endOfLinePadding = 2;
LayoutUnit LayoutMenuList::clientPaddingRight() const
{
if (style()->appearance() == MenulistPart || style()->appearance() == MenulistButtonPart) {
// For these appearance values, the theme applies padding to leave room for the
// drop-down button. But leaving room for the button inside the popup menu itself
// looks strange, so we return a small default padding to avoid having a large empty
// space appear on the side of the popup menu.
return endOfLinePadding;
}
// If the appearance isn't MenulistPart, then the select is styled (non-native), so
// we want to return the user specified padding.
return paddingRight() + m_innerBlock->paddingRight();
}
int LayoutMenuList::listSize() const
{
return selectElement()->listItems().size();
}
int LayoutMenuList::selectedIndex() const
{
HTMLSelectElement* select = selectElement();
return select->optionToListIndex(select->selectedIndex());
}
void LayoutMenuList::popupDidHide()
{
m_popupIsVisible = false;
if (AXObjectCache* cache = document().existingAXObjectCache())
cache->didHideMenuListPopup(this);
}
void LayoutMenuList::popupDidCancel()
{
if (m_indexToSelectOnCancel >= 0)
valueChanged(m_indexToSelectOnCancel);
}
bool LayoutMenuList::itemIsSeparator(unsigned listIndex) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
return listIndex < listItems.size() && isHTMLHRElement(*listItems[listIndex]);
}
bool LayoutMenuList::itemIsLabel(unsigned listIndex) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
return listIndex < listItems.size() && isHTMLOptGroupElement(*listItems[listIndex]);
}
bool LayoutMenuList::itemIsSelected(unsigned listIndex) const
{
const WillBeHeapVector<RawPtrWillBeMember<HTMLElement>>& listItems = selectElement()->listItems();
if (listIndex >= listItems.size())
return false;
HTMLElement* element = listItems[listIndex];
return isHTMLOptionElement(*element) && toHTMLOptionElement(*element).selected();
}
void LayoutMenuList::provisionalSelectionChanged(unsigned listIndex)
{
setIndexToSelectOnCancel(listIndex);
}
} // namespace blink
| apache-2.0 |
pkletsko/camel | components/camel-dropbox/src/test/java/org/apache/camel/component/dropbox/integration/producer/DropboxProducerGetFolderTest.java | 5497 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.dropbox.integration.producer;
import java.util.List;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.dropbox.integration.DropboxTestSupport;
import org.apache.camel.component.dropbox.util.DropboxConstants;
import org.apache.camel.component.dropbox.util.DropboxResultHeader;
import org.apache.camel.component.mock.MockEndpoint;
import org.junit.Test;
public class DropboxProducerGetFolderTest extends DropboxTestSupport {
public DropboxProducerGetFolderTest() throws Exception { }
@Test
public void testCamelDropbox() throws Exception {
template.send("direct:start", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader("test", "test");
}
});
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(1);
assertMockEndpointsSatisfied();
List<Exchange> exchanges = mock.getReceivedExchanges();
Exchange exchange = exchanges.get(0);
Object header = exchange.getIn().getHeader(DropboxResultHeader.DOWNLOADED_FILES.name());
Object body = exchange.getIn().getBody();
assertNotNull(header);
assertNotNull(body);
}
@Test
public void testCamelDropboxWithOptionInHeader() throws Exception {
template.send("direct:start2", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader("test", "test");
}
});
template.send("direct:start3", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader("test", "test");
exchange.getIn().setHeader(DropboxConstants.HEADER_REMOTE_PATH, "/XXX");
}
});
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(2);
assertMockEndpointsSatisfied();
List<Exchange> exchanges = mock.getReceivedExchanges();
Exchange exchange = exchanges.get(0);
Object header = exchange.getIn().getHeader(DropboxResultHeader.DOWNLOADED_FILES.name());
Object body = exchange.getIn().getBody();
assertNotNull(header);
assertNotNull(body);
exchange = exchanges.get(1);
header = exchange.getIn().getHeader(DropboxResultHeader.DOWNLOADED_FILES.name());
body = exchange.getIn().getBody();
assertNotNull(header);
assertNotNull(body);
}
@Test
public void testCamelDropboxHeaderHasPriorityOnParameter() throws Exception {
template.send("direct:start4", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader("test", "test");
}
});
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(1);
assertMockEndpointsSatisfied();
List<Exchange> exchanges = mock.getReceivedExchanges();
Exchange exchange = exchanges.get(0);
Object header = exchange.getIn().getHeader(DropboxResultHeader.DOWNLOADED_FILES.name());
Object body = exchange.getIn().getBody();
assertNotNull(header);
assertNotNull(body);
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
public void configure() {
from("direct:start")
.to("dropbox://get?accessToken={{accessToken}}&clientIdentifier={{clientIdentifier}}&remotePath=/XXX")
.to("mock:result");
from("direct:start2")
.setHeader(DropboxConstants.HEADER_REMOTE_PATH, constant("/XXX"))
.to("dropbox://get?accessToken={{accessToken}}&clientIdentifier={{clientIdentifier}}")
.to("mock:result");
from("direct:start3")
.to("dropbox://get?accessToken={{accessToken}}&clientIdentifier={{clientIdentifier}}")
.to("mock:result");
from("direct:start4")
.setHeader(DropboxConstants.HEADER_REMOTE_PATH, constant("/XXX"))
.to("dropbox://get?accessToken={{accessToken}}&clientIdentifier={{clientIdentifier}}&remotePath=/aWrongPath")
.to("mock:result");
}
};
}
}
| apache-2.0 |
bogdandrutu/opencensus-java | contrib/appengine_standard_util/src/main/java/io/opencensus/contrib/appengine/standard/util/AppEngineCloudTraceContextUtils.java | 3787 | /*
* Copyright 2018, OpenCensus Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.opencensus.contrib.appengine.standard.util;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.apphosting.api.CloudTraceContext;
import com.google.common.annotations.VisibleForTesting;
import io.opencensus.trace.SpanContext;
import io.opencensus.trace.SpanId;
import io.opencensus.trace.TraceId;
import io.opencensus.trace.TraceOptions;
import io.opencensus.trace.Tracestate;
import java.nio.ByteBuffer;
/**
* Utility class to convert between {@link io.opencensus.trace.SpanContext} and {@link
* CloudTraceContext}.
*
* @since 0.14
*/
public final class AppEngineCloudTraceContextUtils {
private static final byte[] INVALID_TRACE_ID =
TraceIdProto.newBuilder().setHi(0).setLo(0).build().toByteArray();
private static final long INVALID_SPAN_ID = 0L;
private static final long INVALID_TRACE_MASK = 0L;
private static final Tracestate TRACESTATE_DEFAULT = Tracestate.builder().build();
@VisibleForTesting
static final CloudTraceContext INVALID_CLOUD_TRACE_CONTEXT =
new CloudTraceContext(INVALID_TRACE_ID, INVALID_SPAN_ID, INVALID_TRACE_MASK);
/**
* Converts AppEngine {@code CloudTraceContext} to {@code SpanContext}.
*
* @param cloudTraceContext the AppEngine {@code CloudTraceContext}.
* @return the converted {@code SpanContext}.
* @since 0.14
*/
public static SpanContext fromCloudTraceContext(CloudTraceContext cloudTraceContext) {
checkNotNull(cloudTraceContext, "cloudTraceContext");
try {
// Extract the trace ID from the binary protobuf CloudTraceContext#traceId.
TraceIdProto traceIdProto = TraceIdProto.parseFrom(cloudTraceContext.getTraceId());
ByteBuffer traceIdBuf = ByteBuffer.allocate(TraceId.SIZE);
traceIdBuf.putLong(traceIdProto.getHi());
traceIdBuf.putLong(traceIdProto.getLo());
ByteBuffer spanIdBuf = ByteBuffer.allocate(SpanId.SIZE);
spanIdBuf.putLong(cloudTraceContext.getSpanId());
return SpanContext.create(
TraceId.fromBytes(traceIdBuf.array()),
SpanId.fromBytes(spanIdBuf.array()),
TraceOptions.builder().setIsSampled(cloudTraceContext.isTraceEnabled()).build(),
TRACESTATE_DEFAULT);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
}
/**
* Converts {@code SpanContext} to AppEngine {@code CloudTraceContext}.
*
* @param spanContext the {@code SpanContext}.
* @return the converted AppEngine {@code CloudTraceContext}.
* @since 0.14
*/
public static CloudTraceContext toCloudTraceContext(SpanContext spanContext) {
checkNotNull(spanContext, "spanContext");
ByteBuffer traceIdBuf = ByteBuffer.wrap(spanContext.getTraceId().getBytes());
TraceIdProto traceIdProto =
TraceIdProto.newBuilder().setHi(traceIdBuf.getLong()).setLo(traceIdBuf.getLong()).build();
ByteBuffer spanIdBuf = ByteBuffer.wrap(spanContext.getSpanId().getBytes());
return new CloudTraceContext(
traceIdProto.toByteArray(),
spanIdBuf.getLong(),
spanContext.getTraceOptions().isSampled() ? 1L : 0L);
}
private AppEngineCloudTraceContextUtils() {}
}
| apache-2.0 |
kingthorin/zap-extensions | addOns/ascanrulesBeta/src/main/java/org/zaproxy/zap/extension/ascanrulesBeta/HexString.java | 1851 | /*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Copyright 2019 The ZAP Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.zap.extension.ascanrulesBeta;
import java.nio.charset.StandardCharsets;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/** Duplicated from Replacer addon. */
class HexString {
private static final Pattern HEX_VALUE = Pattern.compile("\\\\?\\\\x\\p{XDigit}{2}");
private static final String ESCAPED_ESCAPE_CHAR = "\\\\";
static String compile(String binaryRegex) {
Matcher matcher = HEX_VALUE.matcher(binaryRegex);
StringBuffer sb = new StringBuffer();
while (matcher.find()) {
String value = matcher.group();
if (!value.startsWith(ESCAPED_ESCAPE_CHAR)) {
value = convertByte(value.substring(2));
}
matcher.appendReplacement(sb, value);
}
matcher.appendTail(sb);
return sb.toString();
}
private static String convertByte(String value) {
return Matcher.quoteReplacement(
new String(
new byte[] {(byte) Integer.parseInt(value, 16)},
StandardCharsets.US_ASCII));
}
}
| apache-2.0 |
vitesse-ftian/dgtools | tpcds/tpcds_tools/tools/s_subcategory.h | 2134 | /*
* Legal Notice
*
* This document and associated source code (the "Work") is a part of a
* benchmark specification maintained by the TPC.
*
* The TPC reserves all right, title, and interest to the Work as provided
* under U.S. and international laws, including without limitation all patent
* and trademark rights therein.
*
* No Warranty
*
* 1.1 TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THE INFORMATION
* CONTAINED HEREIN IS PROVIDED "AS IS" AND WITH ALL FAULTS, AND THE
* AUTHORS AND DEVELOPERS OF THE WORK HEREBY DISCLAIM ALL OTHER
* WARRANTIES AND CONDITIONS, EITHER EXPRESS, IMPLIED OR STATUTORY,
* INCLUDING, BUT NOT LIMITED TO, ANY (IF ANY) IMPLIED WARRANTIES,
* DUTIES OR CONDITIONS OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR
* PURPOSE, OF ACCURACY OR COMPLETENESS OF RESPONSES, OF RESULTS, OF
* WORKMANLIKE EFFORT, OF LACK OF VIRUSES, AND OF LACK OF NEGLIGENCE.
* ALSO, THERE IS NO WARRANTY OR CONDITION OF TITLE, QUIET ENJOYMENT,
* QUIET POSSESSION, CORRESPONDENCE TO DESCRIPTION OR NON-INFRINGEMENT
* WITH REGARD TO THE WORK.
* 1.2 IN NO EVENT WILL ANY AUTHOR OR DEVELOPER OF THE WORK BE LIABLE TO
* ANY OTHER PARTY FOR ANY DAMAGES, INCLUDING BUT NOT LIMITED TO THE
* COST OF PROCURING SUBSTITUTE GOODS OR SERVICES, LOST PROFITS, LOSS
* OF USE, LOSS OF DATA, OR ANY INCIDENTAL, CONSEQUENTIAL, DIRECT,
* INDIRECT, OR SPECIAL DAMAGES WHETHER UNDER CONTRACT, TORT, WARRANTY,
* OR OTHERWISE, ARISING IN ANY WAY OUT OF THIS OR ANY OTHER AGREEMENT
* RELATING TO THE WORK, WHETHER OR NOT SUCH AUTHOR OR DEVELOPER HAD
* ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES.
*
* Contributors:
* Gradient Systems
*/
#ifndef S_SUBCATEGORY_H
#define S_SUBCATEGORY_H
#define RS_S_SBCT_NAME 30
#define RS_S_SBCT_DESC 100
struct S_SUBCATEGORY_TBL {
ds_key_t kID;
ds_key_t kCategoryID;
char szName[RS_S_SBCT_NAME + 1];
char szDesc[RS_S_SBCT_DESC + 1];
};
int mk_s_subcategory(void *pDest, ds_key_t kIndex);
int pr_s_subcategory(void *pSrc);
int ld_s_subcategory(void *pSrc);
#endif
| apache-2.0 |
deltaprojects/druid | processing/src/main/java/org/apache/druid/query/aggregation/post/ExpressionPostAggregator.java | 8546 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.query.aggregation.post;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.druid.java.util.common.guava.Comparators;
import org.apache.druid.math.expr.Expr;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.math.expr.Parser;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.cache.CacheKeyBuilder;
import org.apache.druid.utils.CollectionUtils;
import javax.annotation.Nullable;
import java.util.Comparator;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
public class ExpressionPostAggregator implements PostAggregator
{
private static final Comparator<Comparable> DEFAULT_COMPARATOR = Comparator.nullsFirst(
(Comparable o1, Comparable o2) -> {
if (o1 instanceof Long && o2 instanceof Long) {
return Long.compare((long) o1, (long) o2);
} else if (o1 instanceof Number && o2 instanceof Number) {
return Double.compare(((Number) o1).doubleValue(), ((Number) o2).doubleValue());
} else {
return o1.compareTo(o2);
}
}
);
private final String name;
private final String expression;
private final Comparator<Comparable> comparator;
private final String ordering;
private final ExprMacroTable macroTable;
private final Map<String, Function<Object, Object>> finalizers;
private final Supplier<Expr> parsed;
private final Supplier<Set<String>> dependentFields;
/**
* Constructor for serialization.
*/
@JsonCreator
public ExpressionPostAggregator(
@JsonProperty("name") String name,
@JsonProperty("expression") String expression,
@JsonProperty("ordering") @Nullable String ordering,
@JacksonInject ExprMacroTable macroTable
)
{
this(name, expression, ordering, macroTable, ImmutableMap.of());
}
/**
* Constructor for {@link #decorate(Map)}.
*/
private ExpressionPostAggregator(
final String name,
final String expression,
@Nullable final String ordering,
final ExprMacroTable macroTable,
final Map<String, Function<Object, Object>> finalizers
)
{
this(
name,
expression,
ordering,
macroTable,
finalizers,
Suppliers.memoize(() -> Parser.parse(expression, macroTable))
);
}
private ExpressionPostAggregator(
final String name,
final String expression,
@Nullable final String ordering,
final ExprMacroTable macroTable,
final Map<String, Function<Object, Object>> finalizers,
final Supplier<Expr> parsed
)
{
this(
name,
expression,
ordering,
macroTable,
finalizers,
parsed,
Suppliers.memoize(() -> parsed.get().analyzeInputs().getRequiredBindings()));
}
private ExpressionPostAggregator(
final String name,
final String expression,
@Nullable final String ordering,
final ExprMacroTable macroTable,
final Map<String, Function<Object, Object>> finalizers,
final Supplier<Expr> parsed,
final Supplier<Set<String>> dependentFields
)
{
Preconditions.checkArgument(expression != null, "expression cannot be null");
this.name = name;
this.expression = expression;
this.ordering = ordering;
this.comparator = ordering == null ? DEFAULT_COMPARATOR : Ordering.valueOf(ordering);
this.macroTable = macroTable;
this.finalizers = finalizers;
this.parsed = parsed;
this.dependentFields = dependentFields;
}
@Override
public Set<String> getDependentFields()
{
return dependentFields.get();
}
@Override
public Comparator getComparator()
{
return comparator;
}
@Override
public Object compute(Map<String, Object> values)
{
// Maps.transformEntries is lazy, will only finalize values we actually read.
final Map<String, Object> finalizedValues = Maps.transformEntries(
values,
(String k, Object v) -> {
final Function<Object, Object> finalizer = finalizers.get(k);
return finalizer != null ? finalizer.apply(v) : v;
}
);
return parsed.get().eval(Parser.withMap(finalizedValues)).value();
}
@Override
@JsonProperty
public String getName()
{
return name;
}
@Override
public ExpressionPostAggregator decorate(final Map<String, AggregatorFactory> aggregators)
{
return new ExpressionPostAggregator(
name,
expression,
ordering,
macroTable,
CollectionUtils.mapValues(aggregators, aggregatorFactory -> obj -> aggregatorFactory.finalizeComputation(obj)),
parsed,
dependentFields
);
}
@JsonProperty("expression")
public String getExpression()
{
return expression;
}
@JsonProperty("ordering")
public String getOrdering()
{
return ordering;
}
@Override
public String toString()
{
return "ExpressionPostAggregator{" +
"name='" + name + '\'' +
", expression='" + expression + '\'' +
", ordering=" + ordering +
'}';
}
@Override
public byte[] getCacheKey()
{
return new CacheKeyBuilder(PostAggregatorIds.EXPRESSION)
.appendString(expression)
.appendString(ordering)
.build();
}
public enum Ordering implements Comparator<Comparable>
{
/**
* Ensures the following order: numeric > NaN > Infinite.
*
* The name may be referenced via Ordering.valueOf(String) in the constructor {@link
* ExpressionPostAggregator#ExpressionPostAggregator(String, String, String, ExprMacroTable, Map)}.
*/
@SuppressWarnings("unused")
numericFirst {
@Override
public int compare(Comparable lhs, Comparable rhs)
{
if (lhs instanceof Long && rhs instanceof Long) {
return Long.compare(((Number) lhs).longValue(), ((Number) rhs).longValue());
} else if (lhs instanceof Number && rhs instanceof Number) {
double d1 = ((Number) lhs).doubleValue();
double d2 = ((Number) rhs).doubleValue();
if (Double.isFinite(d1) && !Double.isFinite(d2)) {
return 1;
}
if (!Double.isFinite(d1) && Double.isFinite(d2)) {
return -1;
}
return Double.compare(d1, d2);
} else {
return Comparators.<Comparable>naturalNullsFirst().compare(lhs, rhs);
}
}
}
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ExpressionPostAggregator that = (ExpressionPostAggregator) o;
if (!comparator.equals(that.comparator)) {
return false;
}
if (!Objects.equals(name, that.name)) {
return false;
}
if (!Objects.equals(expression, that.expression)) {
return false;
}
if (!Objects.equals(ordering, that.ordering)) {
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = name != null ? name.hashCode() : 0;
result = 31 * result + expression.hashCode();
result = 31 * result + comparator.hashCode();
result = 31 * result + (ordering != null ? ordering.hashCode() : 0);
return result;
}
}
| apache-2.0 |
DoudTechData/ignite | modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java | 7875 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.yardstick;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Map;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteSpring;
import org.apache.ignite.Ignition;
import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.ConnectorConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.configuration.NearCacheConfiguration;
import org.apache.ignite.configuration.TransactionConfiguration;
import org.apache.ignite.internal.util.IgniteUtils;
import org.apache.ignite.lang.IgniteBiTuple;
import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.xml.XmlBeanDefinitionReader;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.core.io.UrlResource;
import org.yardstickframework.BenchmarkConfiguration;
import org.yardstickframework.BenchmarkServer;
import org.yardstickframework.BenchmarkUtils;
import static org.apache.ignite.cache.CacheMemoryMode.OFFHEAP_VALUES;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MARSHALLER;
/**
* Standalone Ignite node.
*/
public class IgniteNode implements BenchmarkServer {
/** Grid instance. */
private Ignite ignite;
/** Client mode. */
private boolean clientMode;
/** */
public IgniteNode() {
// No-op.
}
/** */
public IgniteNode(boolean clientMode) {
this.clientMode = clientMode;
}
/** */
public IgniteNode(boolean clientMode, Ignite ignite) {
this.clientMode = clientMode;
this.ignite = ignite;
}
/** {@inheritDoc} */
@Override public void start(BenchmarkConfiguration cfg) throws Exception {
IgniteBenchmarkArguments args = new IgniteBenchmarkArguments();
BenchmarkUtils.jcommander(cfg.commandLineArguments(), args, "<ignite-node>");
IgniteBiTuple<IgniteConfiguration, ? extends ApplicationContext> tup = loadConfiguration(args.configuration());
IgniteConfiguration c = tup.get1();
assert c != null;
ApplicationContext appCtx = tup.get2();
assert appCtx != null;
for (CacheConfiguration cc : c.getCacheConfiguration()) {
// IgniteNode can not run in CLIENT_ONLY mode,
// except the case when it's used inside IgniteAbstractBenchmark.
boolean cl = args.isClientOnly() && (args.isNearCache() || clientMode);
if (cl)
c.setClientMode(true);
if (args.isNearCache()) {
NearCacheConfiguration nearCfg = new NearCacheConfiguration();
if (args.getNearCacheSize() != 0)
nearCfg.setNearEvictionPolicy(new LruEvictionPolicy(args.getNearCacheSize()));
cc.setNearConfiguration(nearCfg);
}
cc.setWriteSynchronizationMode(args.syncMode());
if (args.orderMode() != null)
cc.setAtomicWriteOrderMode(args.orderMode());
cc.setBackups(args.backups());
if (args.restTcpPort() != 0) {
ConnectorConfiguration ccc = new ConnectorConfiguration();
ccc.setPort(args.restTcpPort());
if (args.restTcpHost() != null)
ccc.setHost(args.restTcpHost());
c.setConnectorConfiguration(ccc);
}
if (args.isOffHeap()) {
cc.setOffHeapMaxMemory(0);
if (args.isOffheapValues())
cc.setMemoryMode(OFFHEAP_VALUES);
else
cc.setEvictionPolicy(new LruEvictionPolicy(50000));
}
cc.setReadThrough(args.isStoreEnabled());
cc.setWriteThrough(args.isStoreEnabled());
cc.setWriteBehindEnabled(args.isWriteBehind());
BenchmarkUtils.println(cfg, "Cache configured with the following parameters: " + cc);
}
TransactionConfiguration tc = c.getTransactionConfiguration();
tc.setDefaultTxConcurrency(args.txConcurrency());
tc.setDefaultTxIsolation(args.txIsolation());
TcpCommunicationSpi commSpi = (TcpCommunicationSpi)c.getCommunicationSpi();
if (commSpi == null)
commSpi = new TcpCommunicationSpi();
c.setCommunicationSpi(commSpi);
ignite = IgniteSpring.start(c, appCtx);
BenchmarkUtils.println("Configured marshaller: " + ignite.cluster().localNode().attribute(ATTR_MARSHALLER));
}
/**
* @param springCfgPath Spring configuration file path.
* @return Tuple with grid configuration and Spring application context.
* @throws Exception If failed.
*/
private static IgniteBiTuple<IgniteConfiguration, ? extends ApplicationContext> loadConfiguration(String springCfgPath)
throws Exception {
URL url;
try {
url = new URL(springCfgPath);
}
catch (MalformedURLException e) {
url = IgniteUtils.resolveIgniteUrl(springCfgPath);
if (url == null)
throw new IgniteCheckedException("Spring XML configuration path is invalid: " + springCfgPath +
". Note that this path should be either absolute or a relative local file system path, " +
"relative to META-INF in classpath or valid URL to IGNITE_HOME.", e);
}
GenericApplicationContext springCtx;
try {
springCtx = new GenericApplicationContext();
new XmlBeanDefinitionReader(springCtx).loadBeanDefinitions(new UrlResource(url));
springCtx.refresh();
}
catch (BeansException e) {
throw new Exception("Failed to instantiate Spring XML application context [springUrl=" +
url + ", err=" + e.getMessage() + ']', e);
}
Map<String, IgniteConfiguration> cfgMap;
try {
cfgMap = springCtx.getBeansOfType(IgniteConfiguration.class);
}
catch (BeansException e) {
throw new Exception("Failed to instantiate bean [type=" + IgniteConfiguration.class + ", err=" +
e.getMessage() + ']', e);
}
if (cfgMap == null || cfgMap.isEmpty())
throw new Exception("Failed to find ignite configuration in: " + url);
return new IgniteBiTuple<>(cfgMap.values().iterator().next(), springCtx);
}
/** {@inheritDoc} */
@Override public void stop() throws Exception {
Ignition.stopAll(true);
}
/** {@inheritDoc} */
@Override public String usage() {
return BenchmarkUtils.usage(new IgniteBenchmarkArguments());
}
/**
* @return Ignite.
*/
public Ignite ignite() {
return ignite;
}
}
| apache-2.0 |
eclipse/gemini.blueprint | core/src/main/java/org/eclipse/gemini/blueprint/service/importer/support/internal/aop/StaticServiceReferenceProxy.java | 2728 | /******************************************************************************
* Copyright (c) 2006, 2010 VMware Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html and the Apache License v2.0
* is available at http://www.opensource.org/licenses/apache2.0.php.
* You may elect to redistribute this code under either of these licenses.
*
* Contributors:
* VMware Inc.
*****************************************************************************/
package org.eclipse.gemini.blueprint.service.importer.support.internal.aop;
import org.eclipse.gemini.blueprint.service.importer.ServiceReferenceProxy;
import org.eclipse.gemini.blueprint.service.importer.support.internal.util.ServiceComparatorUtil;
import org.osgi.framework.Bundle;
import org.osgi.framework.ServiceReference;
import org.springframework.util.Assert;
/**
* Simple {@link ServiceReference} proxy which simply does delegation, without any extra features. It's main purpose is
* to allow the consistent behaviour between dynamic and static proxies.
*
* @author Costin Leau
*
*/
public class StaticServiceReferenceProxy implements ServiceReferenceProxy {
private static final int HASH_CODE = StaticServiceReferenceProxy.class.hashCode() * 13;
private final ServiceReference target;
/**
* Constructs a new <code>StaticServiceReferenceProxy</code> instance.
*
* @param target service reference
*/
public StaticServiceReferenceProxy(ServiceReference target) {
Assert.notNull(target);
this.target = target;
}
public Bundle getBundle() {
return target.getBundle();
}
public Object getProperty(String key) {
return target.getProperty(key);
}
public String[] getPropertyKeys() {
return target.getPropertyKeys();
}
public Bundle[] getUsingBundles() {
return target.getUsingBundles();
}
public boolean isAssignableTo(Bundle bundle, String className) {
return target.isAssignableTo(bundle, className);
}
public ServiceReference getTargetServiceReference() {
return target;
}
public boolean equals(Object obj) {
if (obj instanceof StaticServiceReferenceProxy) {
StaticServiceReferenceProxy other = (StaticServiceReferenceProxy) obj;
return (target.equals(other.target));
}
return false;
}
public int hashCode() {
return HASH_CODE + target.hashCode();
}
public int compareTo(Object other) {
return ServiceComparatorUtil.compare(target, other);
}
} | apache-2.0 |
jkbradley/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/RateStreamMicroBatchStream.scala | 7562 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import java.io._
import java.nio.charset.StandardCharsets
import java.util.concurrent.TimeUnit
import org.apache.commons.io.IOUtils
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, PartitionReaderFactory}
import org.apache.spark.sql.connector.read.streaming.{MicroBatchStream, Offset}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{ManualClock, SystemClock}
class RateStreamMicroBatchStream(
rowsPerSecond: Long,
// The default values here are used in tests.
rampUpTimeSeconds: Long = 0,
numPartitions: Int = 1,
options: CaseInsensitiveStringMap,
checkpointLocation: String)
extends MicroBatchStream with Logging {
import RateStreamProvider._
private[sources] val clock = {
// The option to use a manual clock is provided only for unit testing purposes.
if (options.getBoolean("useManualClock", false)) new ManualClock else new SystemClock
}
private val maxSeconds = Long.MaxValue / rowsPerSecond
if (rampUpTimeSeconds > maxSeconds) {
throw new ArithmeticException(
s"Integer overflow. Max offset with $rowsPerSecond rowsPerSecond" +
s" is $maxSeconds, but 'rampUpTimeSeconds' is $rampUpTimeSeconds.")
}
private[sources] val creationTimeMs = {
val session = SparkSession.getActiveSession.orElse(SparkSession.getDefaultSession)
require(session.isDefined)
val metadataLog =
new HDFSMetadataLog[LongOffset](session.get, checkpointLocation) {
override def serialize(metadata: LongOffset, out: OutputStream): Unit = {
val writer = new BufferedWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8))
writer.write("v" + VERSION + "\n")
writer.write(metadata.json)
writer.flush
}
override def deserialize(in: InputStream): LongOffset = {
val content = IOUtils.toString(new InputStreamReader(in, StandardCharsets.UTF_8))
// HDFSMetadataLog guarantees that it never creates a partial file.
assert(content.length != 0)
if (content(0) == 'v') {
val indexOfNewLine = content.indexOf("\n")
if (indexOfNewLine > 0) {
validateVersion(content.substring(0, indexOfNewLine), VERSION)
LongOffset(SerializedOffset(content.substring(indexOfNewLine + 1)))
} else {
throw new IllegalStateException(
s"Log file was malformed: failed to detect the log file version line.")
}
} else {
throw new IllegalStateException(
s"Log file was malformed: failed to detect the log file version line.")
}
}
}
metadataLog.get(0).getOrElse {
val offset = LongOffset(clock.getTimeMillis())
metadataLog.add(0, offset)
logInfo(s"Start time: $offset")
offset
}.offset
}
@volatile private var lastTimeMs: Long = creationTimeMs
override def initialOffset(): Offset = LongOffset(0L)
override def latestOffset(): Offset = {
val now = clock.getTimeMillis()
if (lastTimeMs < now) {
lastTimeMs = now
}
LongOffset(TimeUnit.MILLISECONDS.toSeconds(lastTimeMs - creationTimeMs))
}
override def deserializeOffset(json: String): Offset = {
LongOffset(json.toLong)
}
override def planInputPartitions(start: Offset, end: Offset): Array[InputPartition] = {
val startSeconds = start.asInstanceOf[LongOffset].offset
val endSeconds = end.asInstanceOf[LongOffset].offset
assert(startSeconds <= endSeconds, s"startSeconds($startSeconds) > endSeconds($endSeconds)")
if (endSeconds > maxSeconds) {
throw new ArithmeticException("Integer overflow. Max offset with " +
s"$rowsPerSecond rowsPerSecond is $maxSeconds, but it's $endSeconds now.")
}
// Fix "lastTimeMs" for recovery
if (lastTimeMs < TimeUnit.SECONDS.toMillis(endSeconds) + creationTimeMs) {
lastTimeMs = TimeUnit.SECONDS.toMillis(endSeconds) + creationTimeMs
}
val rangeStart = valueAtSecond(startSeconds, rowsPerSecond, rampUpTimeSeconds)
val rangeEnd = valueAtSecond(endSeconds, rowsPerSecond, rampUpTimeSeconds)
logDebug(s"startSeconds: $startSeconds, endSeconds: $endSeconds, " +
s"rangeStart: $rangeStart, rangeEnd: $rangeEnd")
if (rangeStart == rangeEnd) {
return Array.empty
}
val localStartTimeMs = creationTimeMs + TimeUnit.SECONDS.toMillis(startSeconds)
val relativeMsPerValue =
TimeUnit.SECONDS.toMillis(endSeconds - startSeconds).toDouble / (rangeEnd - rangeStart)
(0 until numPartitions).map { p =>
RateStreamMicroBatchInputPartition(
p, numPartitions, rangeStart, rangeEnd, localStartTimeMs, relativeMsPerValue)
}.toArray
}
override def createReaderFactory(): PartitionReaderFactory = {
RateStreamMicroBatchReaderFactory
}
override def commit(end: Offset): Unit = {}
override def stop(): Unit = {}
override def toString: String = s"RateStreamV2[rowsPerSecond=$rowsPerSecond, " +
s"rampUpTimeSeconds=$rampUpTimeSeconds, " +
s"numPartitions=${options.getOrDefault(NUM_PARTITIONS, "default")}"
}
case class RateStreamMicroBatchInputPartition(
partitionId: Int,
numPartitions: Int,
rangeStart: Long,
rangeEnd: Long,
localStartTimeMs: Long,
relativeMsPerValue: Double) extends InputPartition
object RateStreamMicroBatchReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val p = partition.asInstanceOf[RateStreamMicroBatchInputPartition]
new RateStreamMicroBatchPartitionReader(p.partitionId, p.numPartitions, p.rangeStart,
p.rangeEnd, p.localStartTimeMs, p.relativeMsPerValue)
}
}
class RateStreamMicroBatchPartitionReader(
partitionId: Int,
numPartitions: Int,
rangeStart: Long,
rangeEnd: Long,
localStartTimeMs: Long,
relativeMsPerValue: Double) extends PartitionReader[InternalRow] {
private var count: Long = 0
override def next(): Boolean = {
rangeStart + partitionId + numPartitions * count < rangeEnd
}
override def get(): InternalRow = {
val currValue = rangeStart + partitionId + numPartitions * count
count += 1
val relative = math.round((currValue - rangeStart) * relativeMsPerValue)
InternalRow(DateTimeUtils.fromMillis(relative + localStartTimeMs), currValue)
}
override def close(): Unit = {}
}
| apache-2.0 |
kruzda/elasticsearch | cookbooks/rax-elasticsearch/README.md | 198 | rax-elasticsearch Cookbook
==========================
elasticsearch cookbook wrapper for heat deployment.
License and Authors
-------------------
Authors: Jason Boyles <[email protected]>
| apache-2.0 |
hfp/tensorflow-xsmm | tensorflow/python/kernel_tests/conv_ops_3d_test.py | 27016 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu:
if not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float64, dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float64, dtypes.float32, dtypes.float16]
else:
return [dtypes.float64, dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, dtype, use_gpu):
total_size_tensor = 1
total_size_filter = 1
for s in tensor_in_sizes:
total_size_tensor *= s
for s in filter_in_sizes:
total_size_filter *= s
# Initializes the input tensor with array containing numbers from 0 to 1.
# We keep the input tensor values fairly small to avoid overflowing float16
# during the conv3d.
x1 = [f * 1.0 / total_size_tensor for f in range(1, total_size_tensor + 1)]
x2 = [f * 1.0 / total_size_filter for f in range(1, total_size_filter + 1)]
with self.cached_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
dtype,
use_gpu=use_gpu)
results.append(result)
with self.cached_session() as sess:
values = self.evaluate(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
tol = 1e-6
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, value.flatten(), atol=tol, rtol=tol)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
total_size_tensor = 1
total_size_filter = 1
for s in tensor_in_sizes:
total_size_tensor *= s
for s in filter_in_sizes:
total_size_filter *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_tensor + 1)]
x2 = [f * 1.0 for f in range(1, total_size_filter + 1)]
with self.cached_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = list(stride)
else:
strides = [stride, stride, stride]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv3d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCDHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, stride,
padding, dilations):
expected_results = []
computed_results = []
default_dilations = (
dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)
for data_format, use_gpu in GetTestConfigs():
# If any dilation rate is larger than 1, only do test on the GPU
# because we currently do not have a CPU implementation for arbitrary
# dilation rates.
if default_dilations or use_gpu:
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, stride, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
with self.cached_session() as sess:
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
print("expected = ", e_value)
print("actual = ", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-6)
def testConv3D1x1x1Filter(self):
expected_output = [
0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5, 0.59259259,
0.62962963, 0.77777778, 0.92592593, 0.85185185, 1.05555556, 1.25925926,
1.07407407, 1.33333333, 1.59259259, 1.2962963, 1.61111111, 1.92592593
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv3D1x1x1Filter2x1x1Dilation(self):
if test.is_gpu_available(cuda_only=True):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 6, 1, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=1,
padding="VALID",
dilations=[2, 1, 1])
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 4.2650463, 4.35763889, 4.45023148,
6.73032407, 6.89236111, 7.05439815, 7.22337963, 7.39930556, 7.57523148,
9.68865741, 9.93402778, 10.17939815, 10.18171296, 10.44097222,
10.70023148
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2Filter1x2x1Dilation(self):
if test.is_gpu_available(cuda_only=True):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 6, 3, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=1,
padding="VALID",
dilations=[1, 2, 1])
def testConv3DStrides(self):
expected_output = [
0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095, 0.13988095,
0.08452381, 0.26071429, 0.35238095, 0.36488095, 0.37738095, 0.38988095,
0.40238095, 0.23452381, 0.46071429, 0.61488095, 0.62738095, 0.63988095,
0.65238095, 0.66488095, 0.38452381, 1.12738095, 1.48988095, 1.50238095,
1.51488095, 1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095,
1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381, 1.52738095,
2.01488095, 2.02738095, 2.03988095, 2.05238095, 2.06488095, 1.18452381,
2.19404762, 2.88988095, 2.90238095, 2.91488095, 2.92738095, 2.93988095,
1.68452381, 2.39404762, 3.15238095, 3.16488095, 3.17738095, 3.18988095,
3.20238095, 1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095,
3.45238095, 3.46488095, 1.98452381
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778, 10.17939815
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611, 1.68998016,
1.6155754, 1.68179563, 1.74801587, 1.9280754, 2.01215278, 2.09623016,
1.98015873, 2.0672123, 2.15426587, 2.03224206, 2.12227183, 2.21230159,
4.4280754, 4.65500992, 4.88194444, 4.48015873, 4.71006944, 4.93998016,
4.53224206, 4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016,
4.8968254, 5.15054563, 5.40426587, 4.94890873, 5.20560516, 5.46230159
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 2.0162037, 2.06597222, 2.11574074,
9.68865741, 9.93402778, 10.17939815, 4.59953704, 4.73263889, 4.86574074
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [
0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037, 0.77777778,
0.92592593, 1.
]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513, 0.40306122,
0.41873178, 0.4340379, 0.19642857, 2.46938776, 2.50874636, 1.1377551,
2.74489796, 2.78425656, 1.26020408, 1.16873178, 1.1840379, 0.51785714,
1.09511662, 1.10604956, 0.44642857, 1.17164723, 1.18258017, 0.47704082,
0.3691691, 0.37244898, 0.125
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [
0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746, 2.744898,
2.784257
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[1.5625, 1.875])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
for data_type in self._DtypesToTest(use_gpu=use_gpu):
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if data_type == dtypes.float64:
tolerance = 1e-8
elif data_type == dtypes.float32:
tolerance = 5e-3
elif data_type == dtypes.float16:
tolerance = 1e-3
with self.cached_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
new_strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
new_strides = strides
conv = nn_ops.conv3d(
input_tensor,
filter_tensor,
new_strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(conv.shape, tensor_shape.TensorShape(output_shape))
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(
orig_input_tensor, input_shape, conv, output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(
filter_tensor, filter_shape, conv, output_shape)
if data_type != dtypes.float16:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
@test_util.run_deprecated_v1
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
# Test the fast path in gemm_pack_rhs/mkldnn_gemm_pack, when channel
# dimension is a multiple of packet size.
@test_util.run_deprecated_v1
def testInputGradientValidPaddingStrideOneFastPath(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(2, 2, 2),
in_depth=8,
out_depth=2,
stride=1,
padding="VALID",
test_input=True)
@test_util.run_deprecated_v1
def testFilterGradientValidPaddingStrideOneFastPath(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=8,
out_depth=2,
stride=1,
padding="VALID",
test_input=False)
# Testing for backprops
def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes,
strides, dilations, padding, data_format, use_gpu,
err, mode):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (
dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)
# If any dilation rate is larger than 1, only do test on the GPU
# because we currently do not have a CPU implementation for arbitrary
# dilation rates.
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCDHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCDHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
actual = nn_ops.conv3d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCDHW":
actual = test_util.NCHWToNHWC(actual)
expected = test_util.NCHWToNHWC(expected)
actual_grad = gradients_impl.gradients(actual, t1
if mode == "input" else t2)[0]
expected_grad = gradients_impl.gradients(expected, t1
if mode == "input" else t2)[0]
# "values" consists of two tensors for two backprops
actual_value = self.evaluate(actual_grad)
expected_value = self.evaluate(expected_grad)
self.assertShapeEqual(actual_value, actual_grad)
self.assertShapeEqual(expected_value, expected_grad)
print("expected = ", expected_value)
print("actual = ", actual_value)
self.assertArrayNear(expected_value.flatten(), actual_value.flatten(),
err)
def testConv3D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackprop(
input_sizes=[1, 3, 6, 1, 1],
filter_sizes=[2, 2, 1, 1, 1],
output_sizes=[1, 1, 5, 1, 1],
strides=[1, 1, 1],
dilations=[2, 1, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5,
mode="filter")
def testConv3D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackprop(
input_sizes=[1, 3, 6, 1, 1],
filter_sizes=[2, 2, 1, 1, 1],
output_sizes=[1, 1, 5, 1, 1],
strides=[1, 1, 1],
dilations=[2, 1, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5,
mode="input")
if __name__ == "__main__":
test.main()
| apache-2.0 |
AzureAutomationTeam/azure-powershell | src/ResourceManager/Sql/Commands.Sql/AdvancedThreatProtection/Services/SqlAdvancedThreatProtectionAdapter.cs | 4342 | // ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Commands.Common.Authentication.Abstractions;
using Microsoft.Azure.Commands.Sql.AdvancedThreatProtection.Model;
using Microsoft.Azure.Commands.Sql.Common;
using Microsoft.Azure.Commands.Sql.ThreatDetection.Model;
using Microsoft.Azure.Commands.Sql.ThreatDetection.Services;
using Microsoft.Azure.Management.Sql.LegacySdk.Models;
using Microsoft.Azure.Management.Sql.Models;
using System.Linq;
namespace Microsoft.Azure.Commands.Sql.AdvancedThreatProtection.Services
{
/// <summary>
/// The SqlAdvancedThreatProtectionAdapter class is responsible for transforming the data that was received form the endpoints to the cmdlets model of AdvancedThreatProtection policy and vice versa
/// </summary>
public class SqlAdvancedThreatProtectionAdapter
{
/// <summary>
/// Gets or sets the Azure subscription
/// </summary>
private IAzureSubscription Subscription { get; set; }
/// <summary>
/// The Threat Detection endpoints communicator used by this adapter
/// </summary>
private SqlThreatDetectionAdapter SqlThreatDetectionAdapter { get; set; }
/// <summary>
/// The Azure endpoints communicator used by this adapter
/// </summary>
private AzureEndpointsCommunicator AzureCommunicator { get; set; }
/// <summary>
/// Gets or sets the Azure profile
/// </summary>
public IAzureContext Context { get; set; }
public SqlAdvancedThreatProtectionAdapter(IAzureContext context)
{
Context = context;
Subscription = context.Subscription;
SqlThreatDetectionAdapter = new SqlThreatDetectionAdapter(Context);
}
/// <summary>
/// Provides a server Advanced Threat Protection policy model for the given database
/// </summary>
public ServerAdvancedThreatProtectionPolicyModel GetServerAdvancedThreatProtectionPolicy(string resourceGroup, string serverName)
{
// Currently Advanced Threat Protection policy is a TD policy until the backend will support Advanced Threat Protection APIs
var threatDetectionPolicy = SqlThreatDetectionAdapter.GetServerThreatDetectionPolicy(resourceGroup, serverName);
var serverAdvancedThreatProtectionPolicyModel = new ServerAdvancedThreatProtectionPolicyModel()
{
ResourceGroupName = resourceGroup,
ServerName = serverName,
IsEnabled = (threatDetectionPolicy.ThreatDetectionState == ThreatDetectionStateType.Enabled)
};
return serverAdvancedThreatProtectionPolicyModel;
}
/// <summary>
/// Sets a server Advanced Threat Protection policy model for the given database
/// </summary>
public ServerAdvancedThreatProtectionPolicyModel SetServerAdvancedThreatProtection(ServerAdvancedThreatProtectionPolicyModel model)
{
// Currently Advanced Threat Protection policy is a TD policy until the backend will support Advanced Threat Protection APIs
var threatDetectionPolicy = SqlThreatDetectionAdapter.GetServerThreatDetectionPolicy(model.ResourceGroupName, model.ServerName);
threatDetectionPolicy.ThreatDetectionState = model.IsEnabled ? ThreatDetectionStateType.Enabled : ThreatDetectionStateType.Disabled;
SqlThreatDetectionAdapter.SetServerThreatDetectionPolicy(threatDetectionPolicy, AzureEnvironment.Endpoint.StorageEndpointSuffix);
return model;
}
}
}
| apache-2.0 |
10045125/spring-boot | spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/PublicMetrics.java | 1069 | /*
* Copyright 2012-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.actuate.endpoint;
import java.util.Collection;
import org.springframework.boot.actuate.metrics.Metric;
/**
* Interface to expose specific {@link Metric}s via a {@link MetricsEndpoint}.
*
* @author Dave Syer
* @see VanillaPublicMetrics
* @see SystemPublicMetrics
*/
public interface PublicMetrics {
/**
* @return an indication of current state through metrics
*/
Collection<Metric<?>> metrics();
}
| apache-2.0 |
jeorme/OG-Platform | projects/OG-Engine/src/main/java/com/opengamma/engine/view/compilation/ViewCompilationContext.java | 5308 | /**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.compilation;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedList;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.threeten.bp.Instant;
import com.google.common.collect.Sets;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphBuilder;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.function.resolver.CompiledFunctionResolver;
import com.opengamma.engine.function.resolver.ComputationTargetResults;
import com.opengamma.engine.function.resolver.DefaultCompiledFunctionResolver;
import com.opengamma.engine.function.resolver.ResolutionRule;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
/**
* Holds context relating to the partially-completed compilation of a view definition, for passing to different stages of the compilation.
*/
/* package */class ViewCompilationContext {
private final ViewDefinition _viewDefinition;
private final ViewCompilationServices _services;
private final Collection<DependencyGraphBuilder> _builders;
private final VersionCorrection _resolverVersionCorrection;
private final Collection<DependencyGraph> _graphs;
private final ConcurrentMap<ComputationTargetReference, UniqueId> _activeResolutions;
private final CompiledFunctionResolver _functions;
private final Collection<ResolutionRule> _rules;
private final ComputationTargetResolver.AtVersionCorrection _targetResolver;
private Set<UniqueId> _expiredResolutions;
/* package */ViewCompilationContext(final ViewDefinition viewDefinition, final ViewCompilationServices compilationServices,
final Instant valuationTime, final VersionCorrection resolverVersionCorrection, final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
_viewDefinition = viewDefinition;
_services = compilationServices;
_builders = new LinkedList<DependencyGraphBuilder>();
_expiredResolutions = Sets.newSetFromMap(new ConcurrentHashMap<UniqueId, Boolean>());
_functions = compilationServices.getFunctionResolver().compile(valuationTime);
_rules = _functions.getAllResolutionRules();
_targetResolver = TargetResolutionLogger.of(compilationServices.getFunctionCompilationContext().getRawComputationTargetResolver().atVersionCorrection(resolverVersionCorrection), resolutions,
_expiredResolutions);
for (final ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
_builders.add(createBuilder(calcConfig));
}
_resolverVersionCorrection = resolverVersionCorrection;
_graphs = new ArrayList<DependencyGraph>(_builders.size());
_activeResolutions = resolutions;
}
public DependencyGraphBuilder createBuilder(final ViewCalculationConfiguration calcConfig) {
final DependencyGraphBuilder builder = _services.getDependencyGraphBuilder().newInstance();
builder.setCalculationConfigurationName(calcConfig.getName());
builder.setMarketDataAvailabilityProvider(_services.getMarketDataAvailabilityProvider());
final FunctionCompilationContext compilationContext = _services.getFunctionCompilationContext().clone();
compilationContext.setViewCalculationConfiguration(calcConfig);
compilationContext.setComputationTargetResolver(_targetResolver);
final Collection<ResolutionRule> transformedRules = calcConfig.getResolutionRuleTransform().transform(_rules);
compilationContext.setComputationTargetResults(new ComputationTargetResults(transformedRules));
final DefaultCompiledFunctionResolver functionResolver = new DefaultCompiledFunctionResolver(compilationContext, transformedRules);
functionResolver.compileRules();
builder.setFunctionResolver(functionResolver);
compilationContext.init();
builder.setCompilationContext(compilationContext);
return builder;
}
public ViewDefinition getViewDefinition() {
return _viewDefinition;
}
public ViewCompilationServices getServices() {
return _services;
}
public CompiledFunctionResolver getCompiledFunctionResolver() {
return _functions;
}
public Collection<DependencyGraphBuilder> getBuilders() {
return _builders;
}
public Collection<DependencyGraph> getGraphs() {
return _graphs;
}
public VersionCorrection getResolverVersionCorrection() {
return _resolverVersionCorrection;
}
public ConcurrentMap<ComputationTargetReference, UniqueId> getActiveResolutions() {
return _activeResolutions;
}
public boolean hasExpiredResolutions() {
return !_expiredResolutions.isEmpty();
}
public Set<UniqueId> takeExpiredResolutions() {
final Set<UniqueId> result = _expiredResolutions;
_expiredResolutions = Sets.newSetFromMap(new ConcurrentHashMap<UniqueId, Boolean>());
return result;
}
}
| apache-2.0 |
shakamunyi/hadoop-20 | src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockWithChecksumFileReader.java | 24443 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.net.SocketException;
import java.nio.channels.FileChannel;
import java.util.Arrays;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream;
import org.apache.hadoop.util.ChecksumUtil;
import org.apache.hadoop.util.CrcConcat;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
/**
* Read from blocks with separate checksum files.
* Block file name:
* blk_(blockId)
*
* Checksum file name:
* blk_(blockId)_(generation_stamp).meta
*
* The on disk file format is:
* Data file keeps just data in the block:
*
* +---------------+
* | |
* | Data |
* | . |
* | . |
* | . |
* | . |
* | . |
* | . |
* | |
* +---------------+
*
* Checksum file:
* +----------------------+
* | Checksum Header |
* +----------------------+
* | Checksum for Chunk 1 |
* +----------------------+
* | Checksum for Chunk 2 |
* +----------------------+
* | . |
* | . |
* | . |
* +----------------------+
* | Checksum for last |
* | Chunk (Partial) |
* +----------------------+
*
*/
public class BlockWithChecksumFileReader extends DatanodeBlockReader {
private InputStreamWithChecksumFactory streamFactory;
private DataInputStream checksumIn; // checksum datastream
private BlockDataFile.Reader blockDataFileReader;
boolean useTransferTo = false;
MemoizedBlock memoizedBlock;
BlockWithChecksumFileReader(int namespaceId, Block block,
boolean isFinalized, boolean ignoreChecksum,
boolean verifyChecksum, boolean corruptChecksumOk,
InputStreamWithChecksumFactory streamFactory) throws IOException {
super(namespaceId, block, isFinalized, ignoreChecksum, verifyChecksum,
corruptChecksumOk);
this.streamFactory = streamFactory;
this.checksumIn = streamFactory.getChecksumStream();
this.block = block;
}
@Override
public void fadviseStream(int advise, long offset, long len)
throws IOException {
blockDataFileReader.posixFadviseIfPossible(offset, len, advise);
}
private void initializeNullChecksum() {
checksumIn = null;
// This only decides the buffer size. Use BUFFER_SIZE?
checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_NULL,
16 * 1024);
}
public DataChecksum getChecksumToSend(long blockLength) throws IOException {
if (!corruptChecksumOk || checksumIn != null) {
// read and handle the common header here. For now just a version
try {
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
short version = header.getVersion();
if (version != FSDataset.FORMAT_VERSION_NON_INLINECHECKSUM) {
LOG.warn("Wrong version (" + version + ") for metadata file for "
+ block + " ignoring ...");
}
checksum = header.getChecksum();
} catch (IOException ioe) {
if (blockLength == 0) {
initializeNullChecksum();
} else {
throw ioe;
}
}
} else {
LOG.warn("Could not find metadata file for " + block);
initializeNullChecksum();
}
super.getChecksumInfo(blockLength);
return checksum;
}
public void initialize(long offset, long blockLength)
throws IOException {
// seek to the right offsets
if (offset > 0) {
long checksumSkip = (offset / bytesPerChecksum) * checksumSize;
// note blockInStream is seeked when created below
if (checksumSkip > 0) {
// Should we use seek() for checksum file as well?
IOUtils.skipFully(checksumIn, checksumSkip);
}
}
blockDataFileReader = streamFactory.getBlockDataFileReader();
memoizedBlock = new MemoizedBlock(blockLength, streamFactory, block);
}
public boolean prepareTransferTo() throws IOException {
useTransferTo = true;
return useTransferTo;
}
@Override
public void sendChunks(OutputStream out, byte[] buf, long offset,
int checksumOff, int numChunks, int len, BlockCrcUpdater crcUpdater,
int packetVersion) throws IOException {
if (packetVersion != DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) {
throw new IOException("packet version " + packetVersion
+ " is not supported by non-inline checksum blocks.");
}
int checksumLen = numChunks * checksumSize;
if (checksumSize > 0 && checksumIn != null) {
try {
checksumIn.readFully(buf, checksumOff, checksumLen);
if (dnData != null) {
dnData.recordReadChunkCheckSumTime();
}
if (crcUpdater != null) {
long tempOffset = offset;
long remain = len;
for (int i = 0; i < checksumLen; i += checksumSize) {
long chunkSize = (remain > bytesPerChecksum) ? bytesPerChecksum
: remain;
crcUpdater.updateBlockCrc(tempOffset, (int) chunkSize,
DataChecksum.getIntFromBytes(buf, checksumOff + i));
remain -= chunkSize;
}
}
} catch (IOException e) {
LOG.warn(" Could not read or failed to veirfy checksum for data"
+ " at offset " + offset + " for block " + block + " got : "
+ StringUtils.stringifyException(e));
IOUtils.closeStream(checksumIn);
checksumIn = null;
if (corruptChecksumOk) {
if (checksumOff < checksumLen) {
// Just fill the array with zeros.
Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
if (dnData != null) {
dnData.recordReadChunkCheckSumTime();
}
}
} else {
throw e;
}
}
}
int dataOff = checksumOff + checksumLen;
if (!useTransferTo) {
// normal transfer
blockDataFileReader.readFully(buf, dataOff, len, offset, true);
if (dnData != null) {
dnData.recordReadChunkDataTime();
}
if (verifyChecksum) {
int dOff = dataOff;
int cOff = checksumOff;
int dLeft = len;
for (int i = 0; i < numChunks; i++) {
checksum.reset();
int dLen = Math.min(dLeft, bytesPerChecksum);
checksum.update(buf, dOff, dLen);
if (!checksum.compare(buf, cOff)) {
throw new ChecksumException("Checksum failed at "
+ (offset + len - dLeft), len);
}
dLeft -= dLen;
dOff += dLen;
cOff += checksumSize;
}
if (dnData != null) {
dnData.recordVerifyCheckSumTime();
}
}
// only recompute checksum if we can't trust the meta data due to
// concurrent writes
if (memoizedBlock.hasBlockChanged(len, offset)) {
ChecksumUtil.updateChunkChecksum(buf, checksumOff, dataOff, len,
checksum);
if (dnData != null) {
dnData.recordUpdateChunkCheckSumTime();
}
}
try {
out.write(buf, 0, dataOff + len);
if (dnData != null) {
dnData.recordSendChunkToClientTime();
}
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("IOException when reading block " + block + " offset " + offset, e);
}
throw BlockSender.ioeToSocketException(e);
}
} else {
try {
// use transferTo(). Checks on out and blockIn are already done.
SocketOutputStream sockOut = (SocketOutputStream) out;
if (memoizedBlock.hasBlockChanged(len, offset)) {
blockDataFileReader.readFully(buf, dataOff, len, offset, true);
if (dnData != null) {
dnData.recordReadChunkDataTime();
}
ChecksumUtil.updateChunkChecksum(buf, checksumOff, dataOff, len,
checksum);
if (dnData != null) {
dnData.recordUpdateChunkCheckSumTime();
}
sockOut.write(buf, 0, dataOff + len);
if (dnData != null) {
dnData.recordSendChunkToClientTime();
}
} else {
// first write the packet
sockOut.write(buf, 0, dataOff);
// no need to flush. since we know out is not a buffered stream.
blockDataFileReader.transferToSocketFully(sockOut,offset, len);
if (dnData != null) {
dnData.recordTransferChunkToClientTime();
}
}
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("IOException when reading block " + block + " offset "
+ offset, e);
}
/*
* exception while writing to the client (well, with transferTo(), it
* could also be while reading from the local file).
*/
throw BlockSender.ioeToSocketException(e);
}
}
}
@Override
public int getPreferredPacketVersion() {
return DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST;
}
public void close() throws IOException {
IOException ioe = null;
// close checksum file
if (checksumIn != null) {
try {
checksumIn.close();
} catch (IOException e) {
ioe = e;
}
checksumIn = null;
}
// throw IOException if there is any
if (ioe != null) {
throw ioe;
}
}
/**
* helper class used to track if a block's meta data is verifiable or not
*/
class MemoizedBlock {
// visible block length
private long blockLength;
private final Block block;
private final InputStreamWithChecksumFactory isf;
private MemoizedBlock(long blockLength,
InputStreamWithChecksumFactory isf, Block block) {
this.blockLength = blockLength;
this.isf = isf;
this.block = block;
}
// logic: if we are starting or ending on a partial chunk and the block
// has more data than we were told at construction, the block has 'changed'
// in a way that we care about (ie, we can't trust crc data)
boolean hasBlockChanged(long dataLen, long offset) throws IOException {
if (isFinalized) {
// We would treat it an error case for a finalized block at open time
// has an unmatched size when closing. There might be false positive
// for append() case. We made the trade-off to avoid false negative.
// always return true so it data integrity is guaranteed by checksum
// checking.
return false;
}
// check if we are using transferTo since we tell if the file has changed
// (blockInPosition >= 0 => we are using transferTo and File Channels
if (useTransferTo) {
long currentLength = blockDataFileReader.size();
return (offset % bytesPerChecksum != 0 || dataLen
% bytesPerChecksum != 0)
&& currentLength > blockLength;
} else {
FSDatasetInterface ds = null;
if (isf instanceof DatanodeBlockReader.BlockInputStreamFactory) {
ds = ((DatanodeBlockReader.BlockInputStreamFactory) isf).getDataset();
}
// offset is the offset into the block
return (offset % bytesPerChecksum != 0 || dataLen % bytesPerChecksum != 0)
&& ds != null
&& ds.getOnDiskLength(namespaceId, block) > blockLength;
}
}
}
public static interface InputStreamWithChecksumFactory extends
BlockSender.InputStreamFactory {
public InputStream createStream(long offset) throws IOException;
public DataInputStream getChecksumStream() throws IOException;
}
/** Find the metadata file for the specified block file.
* Return the generation stamp from the name of the metafile.
*/
static long getGenerationStampFromSeperateChecksumFile(String[] listdir, String blockName) {
for (int j = 0; j < listdir.length; j++) {
String path = listdir[j];
if (!path.startsWith(blockName)) {
continue;
}
String[] vals = StringUtils.split(path, '_');
if (vals.length != 3) { // blk, blkid, genstamp.meta
continue;
}
String[] str = StringUtils.split(vals[2], '.');
if (str.length != 2) {
continue;
}
return Long.parseLong(str[0]);
}
DataNode.LOG.warn("Block " + blockName +
" does not have a metafile!");
return Block.GRANDFATHER_GENERATION_STAMP;
}
/**
* Find generation stamp from block file and meta file.
* @param blockFile
* @param metaFile
* @return
* @throws IOException
*/
static long parseGenerationStampInMetaFile(File blockFile, File metaFile
) throws IOException {
String metaname = metaFile.getName();
String gs = metaname.substring(blockFile.getName().length() + 1,
metaname.length() - FSDataset.METADATA_EXTENSION.length());
try {
return Long.parseLong(gs);
} catch(NumberFormatException nfe) {
throw (IOException)new IOException("blockFile=" + blockFile
+ ", metaFile=" + metaFile).initCause(nfe);
}
}
/**
* This class provides the input stream and length of the metadata
* of a block
*
*/
static class MetaDataInputStream extends FilterInputStream {
MetaDataInputStream(InputStream stream, long len) {
super(stream);
length = len;
}
private long length;
public long getLength() {
return length;
}
}
static protected File getMetaFile(FSDatasetInterface dataset, int namespaceId,
Block b) throws IOException {
return BlockWithChecksumFileWriter.getMetaFile(dataset.getBlockFile(namespaceId, b), b);
}
/**
* Does the meta file exist for this block?
* @param namespaceId - parent namespace id
* @param b - the block
* @return true of the metafile for specified block exits
* @throws IOException
*/
static public boolean metaFileExists(FSDatasetInterface dataset, int namespaceId, Block b) throws IOException {
return getMetaFile(dataset, namespaceId, b).exists();
}
/**
* Returns metaData of block b as an input stream (and its length)
* @param namespaceId - parent namespace id
* @param b - the block
* @return the metadata input stream;
* @throws IOException
*/
static public MetaDataInputStream getMetaDataInputStream(
FSDatasetInterface dataset, int namespace, Block b) throws IOException {
File checksumFile = getMetaFile(dataset, namespace, b);
return new MetaDataInputStream(new FileInputStream(checksumFile),
checksumFile.length());
}
static byte[] getMetaData(FSDatasetInterface dataset, int namespaceId,
Block block) throws IOException {
MetaDataInputStream checksumIn = null;
try {
checksumIn = getMetaDataInputStream(dataset, namespaceId, block);
long fileSize = checksumIn.getLength();
if (fileSize >= 1L << 31 || fileSize <= 0) {
throw new IOException("Unexpected size for checksumFile of block"
+ block);
}
byte[] buf = new byte[(int) fileSize];
IOUtils.readFully(checksumIn, buf, 0, buf.length);
return buf;
} finally {
IOUtils.closeStream(checksumIn);
}
}
/**
* Calculate CRC Checksum of the whole block. Implemented by concatenating
* checksums of all the chunks.
*
* @param datanode
* @param ri
* @param namespaceId
* @param block
* @return
* @throws IOException
*/
static public int getBlockCrc(DataNode datanode, ReplicaToRead ri,
int namespaceId, Block block) throws IOException {
InputStream rawStreamIn = null;
DataInputStream streamIn = null;
try {
int bytesPerCRC;
int checksumSize;
long crcPerBlock;
rawStreamIn = BlockWithChecksumFileReader.getMetaDataInputStream(
datanode.data, namespaceId, block);
streamIn = new DataInputStream(new BufferedInputStream(rawStreamIn,
FSConstants.BUFFER_SIZE));
final BlockMetadataHeader header = BlockMetadataHeader
.readHeader(streamIn);
final DataChecksum checksum = header.getChecksum();
if (checksum.getChecksumType() != DataChecksum.CHECKSUM_CRC32) {
throw new IOException("File Checksum now is only supported for CRC32");
}
bytesPerCRC = checksum.getBytesPerChecksum();
checksumSize = checksum.getChecksumSize();
crcPerBlock = (((BlockWithChecksumFileReader.MetaDataInputStream) rawStreamIn)
.getLength() - BlockMetadataHeader.getHeaderSize()) / checksumSize;
int blockCrc = 0;
byte[] buffer = new byte[checksumSize];
for (int i = 0; i < crcPerBlock; i++) {
IOUtils.readFully(streamIn, buffer, 0, buffer.length);
int intChecksum = ((buffer[0] & 0xff) << 24)
| ((buffer[1] & 0xff) << 16) | ((buffer[2] & 0xff) << 8)
| ((buffer[3] & 0xff));
if (i == 0) {
blockCrc = intChecksum;
} else {
int chunkLength;
if (i != crcPerBlock - 1 || ri.getBytesVisible() % bytesPerCRC == 0) {
chunkLength = bytesPerCRC;
} else {
chunkLength = (int) ri.getBytesVisible() % bytesPerCRC;
}
blockCrc = CrcConcat.concatCrc(blockCrc, intChecksum, chunkLength);
}
}
return blockCrc;
} finally {
if (streamIn != null) {
IOUtils.closeStream(streamIn);
}
if (rawStreamIn != null) {
IOUtils.closeStream(rawStreamIn);
}
}
}
static long readBlockAccelerator(Socket s, File dataFile, Block block,
long startOffset, long length, DataNode datanode) throws IOException {
File checksumFile = BlockWithChecksumFileWriter.getMetaFile(dataFile, block);
FileInputStream datain = new FileInputStream(dataFile);
FileInputStream metain = new FileInputStream(checksumFile);
FileChannel dch = datain.getChannel();
FileChannel mch = metain.getChannel();
// read in type of crc and bytes-per-checksum from metadata file
int versionSize = 2; // the first two bytes in meta file is the version
byte[] cksumHeader = new byte[versionSize + DataChecksum.HEADER_LEN];
int numread = metain.read(cksumHeader);
if (numread != versionSize + DataChecksum.HEADER_LEN) {
String msg = "readBlockAccelerator: metafile header should be atleast " +
(versionSize + DataChecksum.HEADER_LEN) + " bytes " +
" but could read only " + numread + " bytes.";
LOG.warn(msg);
throw new IOException(msg);
}
DataChecksum ckHdr = DataChecksum.newDataChecksum(cksumHeader, versionSize);
int type = ckHdr.getChecksumType();
int bytesPerChecksum = ckHdr.getBytesPerChecksum();
long cheaderSize = DataChecksum.getChecksumHeaderSize();
// align the startOffset with the previous bytesPerChecksum boundary.
long delta = startOffset % bytesPerChecksum;
startOffset -= delta;
length += delta;
// align the length to encompass the entire last checksum chunk
delta = length % bytesPerChecksum;
if (delta != 0) {
delta = bytesPerChecksum - delta;
length += delta;
}
// find the offset in the metafile
long startChunkNumber = startOffset / bytesPerChecksum;
long numChunks = length / bytesPerChecksum;
long checksumSize = ckHdr.getChecksumSize();
long startMetaOffset = versionSize + cheaderSize + startChunkNumber * checksumSize;
long metaLength = numChunks * checksumSize;
// get a connection back to the client
SocketOutputStream out = new SocketOutputStream(s, datanode.socketWriteTimeout);
try {
// write out the checksum type and bytesperchecksum to client
// skip the first two bytes that describe the version
long val = mch.transferTo(versionSize, cheaderSize, out);
if (val != cheaderSize) {
String msg = "readBlockAccelerator for block " + block +
" at offset " + 0 +
" but could not transfer checksum header.";
LOG.warn(msg);
throw new IOException(msg);
}
if (LOG.isDebugEnabled()) {
LOG.debug("readBlockAccelerator metaOffset " + startMetaOffset +
" mlength " + metaLength);
}
// write out the checksums back to the client
val = mch.transferTo(startMetaOffset, metaLength, out);
if (val != metaLength) {
String msg = "readBlockAccelerator for block " + block +
" at offset " + startMetaOffset +
" but could not transfer checksums of size " +
metaLength + ". Transferred only " + val;
LOG.warn(msg);
throw new IOException(msg);
}
if (LOG.isDebugEnabled()) {
LOG.debug("readBlockAccelerator dataOffset " + startOffset +
" length " + length);
}
// send data block back to client
long read = dch.transferTo(startOffset, length, out);
if (read != length) {
String msg = "readBlockAccelerator for block " + block +
" at offset " + startOffset +
" but block size is only " + length +
" and could transfer only " + read;
LOG.warn(msg);
throw new IOException(msg);
}
return read;
} catch ( SocketException ignored ) {
// Its ok for remote side to close the connection anytime.
datanode.myMetrics.blocksRead.inc();
return -1;
} catch ( IOException ioe ) {
/* What exactly should we do here?
* Earlier version shutdown() datanode if there is disk error.
*/
LOG.warn(datanode.getDatanodeInfo() +
":readBlockAccelerator:Got exception while serving " +
block + " to " +
s.getInetAddress() + ":\n" +
StringUtils.stringifyException(ioe) );
throw ioe;
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(datain);
IOUtils.closeStream(metain);
}
}
public static boolean isMetaFilename(String name) {
return name.startsWith(Block.BLOCK_FILE_PREFIX)
&& name.endsWith(Block.METADATA_EXTENSION);
}
/**
* Returns array of two longs: the first one is the block id, and the second
* one is genStamp. The method workds under assumption that metafile name has
* the following format: "blk_<blkid>_<gensmp>.meta"
*/
public static long[] parseMetafileName(String path) {
String[] groundSeparated = StringUtils.split(path, '_');
if (groundSeparated.length != 3) { // blk, blkid, genstamp.meta
throw new IllegalArgumentException("Not a valid meta file name");
}
String[] dotSeparated = StringUtils.split(groundSeparated[2], '.');
if (dotSeparated.length != 2) {
throw new IllegalArgumentException("Not a valid meta file name");
}
return new long[] { Long.parseLong(groundSeparated[1]),
Long.parseLong(dotSeparated[0]) };
}
}
| apache-2.0 |
jeorme/OG-Platform | projects/OG-Financial/src/main/java/com/opengamma/financial/analytics/model/forex/forward/FXForwardPointsMethodPresentValueFunction.java | 6310 | /**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.forex.forward;
import java.util.Collections;
import java.util.Set;
import com.google.common.collect.Iterables;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.analytics.financial.forex.derivative.Forex;
import com.opengamma.analytics.financial.forex.method.ForexForwardPointsMethod;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.math.curve.DoublesCurve;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.FunctionExecutionContext;
import com.opengamma.engine.function.FunctionInputs;
import com.opengamma.engine.value.ComputedValue;
import com.opengamma.engine.value.ValueProperties;
import com.opengamma.engine.value.ValueProperties.Builder;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.financial.analytics.fxforwardcurve.FXForwardCurveDefinition;
import com.opengamma.financial.analytics.model.CalculationPropertyNamesAndValues;
import com.opengamma.financial.analytics.model.forex.ForexVisitors;
import com.opengamma.financial.analytics.model.fx.FXForwardPointsPVFunction;
import com.opengamma.financial.security.FinancialSecurity;
import com.opengamma.util.money.CurrencyAmount;
import com.opengamma.util.money.MultipleCurrencyAmount;
/**
* Calculates the present value of an FX forward using the FX forward rates directly.
* @deprecated Use {@link FXForwardPointsPVFunction}
*/
@Deprecated
public class FXForwardPointsMethodPresentValueFunction extends FXForwardPointsMethodFunction {
private static final ForexForwardPointsMethod CALCULATOR = ForexForwardPointsMethod.getInstance();
public FXForwardPointsMethodPresentValueFunction() {
super(ValueRequirementNames.PRESENT_VALUE);
}
@Override
protected Set<ComputedValue> getResult(final Forex fxForward, final YieldCurveBundle data, final DoublesCurve forwardPoints, final ComputationTarget target,
final Set<ValueRequirement> desiredValues, final FunctionInputs inputs, final FunctionExecutionContext executionContext,
final FXForwardCurveDefinition fxForwardCurveDefinition) {
final MultipleCurrencyAmount mca = CALCULATOR.presentValue(fxForward, data, forwardPoints);
if (mca.size() != 1) {
throw new OpenGammaRuntimeException("Expecting a single value for present value");
}
final CurrencyAmount ca = mca.getCurrencyAmounts()[0];
final String currency = ((FinancialSecurity) target.getSecurity()).accept(ForexVisitors.getReceiveCurrencyVisitor()).getCode();
if (!ca.getCurrency().getCode().equals(currency)) {
throw new OpenGammaRuntimeException("Property currency did not match result currency");
}
final ValueProperties properties = getResultProperties(Iterables.getOnlyElement(desiredValues), currency).get();
final ValueSpecification spec = new ValueSpecification(ValueRequirementNames.PRESENT_VALUE, target.toSpecification(), properties);
return Collections.singleton(new ComputedValue(spec, ca.getAmount()));
}
@Override
protected ValueProperties.Builder getResultProperties(final ComputationTarget target) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, CalculationPropertyNamesAndValues.FORWARD_POINTS)
.withAny(ValuePropertyNames.PAY_CURVE)
.withAny(ValuePropertyNames.RECEIVE_CURVE)
.withAny(ValuePropertyNames.PAY_CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.RECEIVE_CURVE_CALCULATION_CONFIG)
.withAny(ValuePropertyNames.FORWARD_CURVE_NAME)
.withAny(ValuePropertyNames.CURRENCY);
}
@Override
protected ValueProperties.Builder getResultProperties(final ComputationTarget target, final String payCurveName, final String receiveCurveName,
final String payCurveCalculationConfig, final String receiveCurveCalculationConfig, final String forwardCurveName) {
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, CalculationPropertyNamesAndValues.FORWARD_POINTS)
.with(ValuePropertyNames.PAY_CURVE, payCurveName)
.with(ValuePropertyNames.RECEIVE_CURVE, receiveCurveName)
.with(ValuePropertyNames.PAY_CURVE_CALCULATION_CONFIG, payCurveCalculationConfig)
.with(ValuePropertyNames.RECEIVE_CURVE_CALCULATION_CONFIG, receiveCurveCalculationConfig)
.with(ValuePropertyNames.FORWARD_CURVE_NAME, forwardCurveName)
.with(ValuePropertyNames.CURRENCY, ((FinancialSecurity) target.getSecurity()).accept(ForexVisitors.getReceiveCurrencyVisitor()).getCode());
}
protected ValueProperties.Builder getResultProperties(final ValueRequirement desiredValue, final String currency) {
final String payCurveName = desiredValue.getConstraint(ValuePropertyNames.PAY_CURVE);
final String receiveCurveName = desiredValue.getConstraint(ValuePropertyNames.RECEIVE_CURVE);
final String payCurveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.PAY_CURVE_CALCULATION_CONFIG);
final String receiveCurveCalculationConfig = desiredValue.getConstraint(ValuePropertyNames.RECEIVE_CURVE_CALCULATION_CONFIG);
final String forwardCurveName = desiredValue.getConstraint(ValuePropertyNames.FORWARD_CURVE_NAME);
return createValueProperties()
.with(ValuePropertyNames.CALCULATION_METHOD, CalculationPropertyNamesAndValues.FORWARD_POINTS)
.with(ValuePropertyNames.PAY_CURVE, payCurveName)
.with(ValuePropertyNames.RECEIVE_CURVE, receiveCurveName)
.with(ValuePropertyNames.PAY_CURVE_CALCULATION_CONFIG, payCurveCalculationConfig)
.with(ValuePropertyNames.RECEIVE_CURVE_CALCULATION_CONFIG, receiveCurveCalculationConfig)
.with(ValuePropertyNames.FORWARD_CURVE_NAME, forwardCurveName)
.with(ValuePropertyNames.CURRENCY, currency);
}
@Override
protected Builder getResultProperties(final ValueRequirement desiredValue, final ComputationTarget target) {
throw new UnsupportedOperationException();
}
}
| apache-2.0 |
sunpy1106/SpringBeanLifeCycle | src/main/java/org/springframework/beans/factory/xml/NamespaceHandlerSupport.java | 5803 | /*
* Copyright 2002-2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.xml;
import java.util.HashMap;
import java.util.Map;
import org.w3c.dom.Attr;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.BeanDefinitionHolder;
/**
* Support class for implementing custom {@link NamespaceHandler NamespaceHandlers}.
* Parsing and decorating of individual {@link Node Nodes} is done via {@link BeanDefinitionParser}
* and {@link BeanDefinitionDecorator} strategy interfaces, respectively.
*
* <p>Provides the {@link #registerBeanDefinitionParser} and {@link #registerBeanDefinitionDecorator}
* methods for registering a {@link BeanDefinitionParser} or {@link BeanDefinitionDecorator}
* to handle a specific element.
*
* @author Rob Harrop
* @author Juergen Hoeller
* @since 2.0
* @see #registerBeanDefinitionParser(String, BeanDefinitionParser)
* @see #registerBeanDefinitionDecorator(String, BeanDefinitionDecorator)
*/
public abstract class NamespaceHandlerSupport implements NamespaceHandler {
/**
* Stores the {@link BeanDefinitionParser} implementations keyed by the
* local name of the {@link Element Elements} they handle.
*/
private final Map<String, BeanDefinitionParser> parsers =
new HashMap<String, BeanDefinitionParser>();
/**
* Stores the {@link BeanDefinitionDecorator} implementations keyed by the
* local name of the {@link Element Elements} they handle.
*/
private final Map<String, BeanDefinitionDecorator> decorators =
new HashMap<String, BeanDefinitionDecorator>();
/**
* Stores the {@link BeanDefinitionDecorator} implementations keyed by the local
* name of the {@link Attr Attrs} they handle.
*/
private final Map<String, BeanDefinitionDecorator> attributeDecorators =
new HashMap<String, BeanDefinitionDecorator>();
/**
* Parses the supplied {@link Element} by delegating to the {@link BeanDefinitionParser} that is
* registered for that {@link Element}.
*/
@Override
public BeanDefinition parse(Element element, ParserContext parserContext) {
return findParserForElement(element, parserContext).parse(element, parserContext);
}
/**
* Locates the {@link BeanDefinitionParser} from the register implementations using
* the local name of the supplied {@link Element}.
*/
private BeanDefinitionParser findParserForElement(Element element, ParserContext parserContext) {
String localName = parserContext.getDelegate().getLocalName(element);
BeanDefinitionParser parser = this.parsers.get(localName);
if (parser == null) {
parserContext.getReaderContext().fatal(
"Cannot locate BeanDefinitionParser for element [" + localName + "]", element);
}
return parser;
}
/**
* Decorates the supplied {@link Node} by delegating to the {@link BeanDefinitionDecorator} that
* is registered to handle that {@link Node}.
*/
@Override
public BeanDefinitionHolder decorate(
Node node, BeanDefinitionHolder definition, ParserContext parserContext) {
return findDecoratorForNode(node, parserContext).decorate(node, definition, parserContext);
}
/**
* Locates the {@link BeanDefinitionParser} from the register implementations using
* the local name of the supplied {@link Node}. Supports both {@link Element Elements}
* and {@link Attr Attrs}.
*/
private BeanDefinitionDecorator findDecoratorForNode(Node node, ParserContext parserContext) {
BeanDefinitionDecorator decorator = null;
String localName = parserContext.getDelegate().getLocalName(node);
if (node instanceof Element) {
decorator = this.decorators.get(localName);
}
else if (node instanceof Attr) {
decorator = this.attributeDecorators.get(localName);
}
else {
parserContext.getReaderContext().fatal(
"Cannot decorate based on Nodes of type [" + node.getClass().getName() + "]", node);
}
if (decorator == null) {
parserContext.getReaderContext().fatal("Cannot locate BeanDefinitionDecorator for " +
(node instanceof Element ? "element" : "attribute") + " [" + localName + "]", node);
}
return decorator;
}
/**
* Subclasses can call this to register the supplied {@link BeanDefinitionParser} to
* handle the specified element. The element name is the local (non-namespace qualified)
* name.
*/
protected final void registerBeanDefinitionParser(String elementName, BeanDefinitionParser parser) {
this.parsers.put(elementName, parser);
}
/**
* Subclasses can call this to register the supplied {@link BeanDefinitionDecorator} to
* handle the specified element. The element name is the local (non-namespace qualified)
* name.
*/
protected final void registerBeanDefinitionDecorator(String elementName, BeanDefinitionDecorator dec) {
this.decorators.put(elementName, dec);
}
/**
* Subclasses can call this to register the supplied {@link BeanDefinitionDecorator} to
* handle the specified attribute. The attribute name is the local (non-namespace qualified)
* name.
*/
protected final void registerBeanDefinitionDecoratorForAttribute(String attrName, BeanDefinitionDecorator dec) {
this.attributeDecorators.put(attrName, dec);
}
}
| apache-2.0 |
lewismc/yax | lib/fop-1.0/docs/dev/tools.html | 17729 | <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta content="Apache Forrest" name="Generator">
<meta name="Forrest-version" content="0.8">
<meta name="Forrest-skin-name" content="pelt">
<title>FOP Development: Developer Tools</title>
<link type="text/css" href="../skin/basic.css" rel="stylesheet">
<link media="screen" type="text/css" href="../skin/screen.css" rel="stylesheet">
<link media="print" type="text/css" href="../skin/print.css" rel="stylesheet">
<link type="text/css" href="../skin/profile.css" rel="stylesheet">
<script src="../skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="../skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="../skin/fontsize.js" language="javascript" type="text/javascript"></script>
<link rel="shortcut icon" href="../">
</head>
<body onload="init()">
<script type="text/javascript">ndeSetTextSize();</script>
<div id="top">
<!--+
|header
+-->
<div class="header">
<!--+
|start group logo
+-->
<div class="grouplogo">
<a href="http://xmlgraphics.apache.org/"><img class="logoImage" alt="Apache XML Graphics" src="../images/group-logo.gif" title="Apache XML Graphics is responsible for the creation and maintenance of software for managing the conversion of XML formats to graphical output, and the creation and maintenance of related software components, based on software licensed to the Foundation"></a>
</div>
<!--+
|end group logo
+-->
<!--+
|start Project Logo
+-->
<div class="projectlogo">
<a href="http://xmlgraphics.apache.org/fop/"><img class="logoImage" alt="Apache FOP" src="../images/logo.jpg" title="Apache FOP (Formatting Objects Processor) is the world's first output independent formatter. Output formats currently supported include PDF, PCL, PS, SVG, XML (area tree representation), Print, AWT, MIF and TXT. The primary output target is PDF."></a>
</div>
<!--+
|end Project Logo
+-->
<!--+
|start Search
+-->
<div class="searchbox">
<form action="http://www.google.com/search" method="get" class="roundtopsmall">
<input value="xmlgraphics.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">
<input name="Search" value="Search" type="submit">
</form>
</div>
<!--+
|end search
+-->
<!--+
|start Tabs
+-->
<ul id="tabs">
<li>
<a class="unselected" href="../index.html">Home</a>
</li>
<li>
<a class="unselected" href="../0.95/index.html">Version 0.95</a>
</li>
<li>
<a class="unselected" href="../1.0/index.html">Version 1.0</a>
</li>
<li>
<a class="unselected" href="../trunk/index.html">FOP Trunk</a>
</li>
<li class="current">
<a class="selected" href="../dev/index.html">Development</a>
</li>
</ul>
<!--+
|end Tabs
+-->
</div>
</div>
<div id="main">
<div id="publishedStrip">
<!--+
|start Subtabs
+-->
<div id="level2tabs"></div>
<!--+
|end Endtabs
+-->
<script type="text/javascript"><!--
document.write("Last Published: " + document.lastModified);
// --></script>
</div>
<!--+
|breadtrail
+-->
<div class="breadtrail">
<a href="http://www.apache.org/">apache.org</a> > <a href="http://xml.apache.org/">XML Federation</a> > <a href="http://xmlgraphics.apache.org/">xmlgraphics.apache.org</a><script src="../skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
</div>
<!--+
|start Menu, mainarea
+-->
<!--+
|start Menu
+-->
<div id="menu">
<div onclick="SwitchMenu('menu_1.1', '../skin/')" id="menu_1.1Title" class="menutitle">About</div>
<div id="menu_1.1" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/index.html">Basics</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.2', '../skin/')" id="menu_1.2Title" class="menutitle">Design</div>
<div id="menu_1.2" class="menuitemgroup">
<div onclick="SwitchMenu('menu_1.2.1', '../skin/')" id="menu_1.2.1Title" class="menutitle">About</div>
<div id="menu_1.2.1" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/design/index.html">Introduction</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.2.2', '../skin/')" id="menu_1.2.2Title" class="menutitle">Core Process</div>
<div id="menu_1.2.2" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/design/startup.html">Startup</a>
</div>
<div class="menuitem">
<a href="../dev/design/parsing.html">XML Parsing</a>
</div>
<div class="menuitem">
<a href="../dev/design/fotree.html">FO Tree</a>
</div>
<div class="menuitem">
<a href="../dev/design/properties.html">Properties</a>
</div>
<div class="menuitem">
<a href="../dev/design/layout.html">Layout</a>
</div>
<div class="menuitem">
<a href="../dev/design/breakpos.html">Break Possibility</a>
</div>
<div class="menuitem">
<a href="../dev/design/areas.html">Area Tree</a>
</div>
<div class="menuitem">
<a href="../dev/design/renderers.html">Renderers</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.2.3', '../skin/')" id="menu_1.2.3Title" class="menutitle">Miscellaneous</div>
<div id="menu_1.2.3" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/design/images.html">Images</a>
</div>
<div class="menuitem">
<a href="../dev/design/pdf-library.html">PDF Library</a>
</div>
<div class="menuitem">
<a href="../dev/design/svg.html">SVG</a>
</div>
<div class="menuitem">
<a href="../dev/design/embedding.html">Embedding</a>
</div>
<div class="menuitem">
<a href="../dev/design/extending.html">Extending</a>
</div>
<div class="menuitem">
<a href="../dev/design/optimise.html">Optimisations</a>
</div>
<div class="menuitem">
<a href="../dev/design/useragent.html">User Agent</a>
</div>
</div>
<div class="menuitem">
<a href="http://wiki.apache.org/xmlgraphics-fop/FOPProjectPages">Unresolved (Wiki)</a>
</div>
<div class="menuitem">
<a href="../dev/svg.html">SVG</a>
</div>
<div class="menuitem">
<a href="../dev/extensions.html">Extensions</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.3', '../skin/')" id="menu_1.3Title" class="menutitle">Develop</div>
<div id="menu_1.3" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/api-doc.html">API Doc</a>
</div>
<div class="menuitem">
<a href="../dev/implement.html">Walk-Thru</a>
</div>
<div class="menuitem">
<a href="http://issues.apache.org/bugzilla/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&email1=&emailtype1=substring&emailassigned_to1=1&email2=&emailtype2=substring&emailreporter2=1&bugidtype=include&bug_id=&changedin=&votes=&chfieldfrom=&chfieldto=Now&chfieldvalue=&product=Fop&short_desc=%5BPATCH%5D&short_desc_type=allwordssubstr&long_desc=&long_desc_type=allwordssubstr&bug_file_loc=&bug_file_loc_type=allwordssubstr&keywords=&keywords_type=anywords&field0-0-0=noop&type0-0-0=noop&value0-0-0=&namedcmd=Fop+all&newqueryname=fop+patch+queue&tofooter=1&order=Reuse+same+sort+as+last+time">Patch Queue</a>
</div>
<div class="menuitem">
<a href="../dev/conventions.html">Conventions</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.4', '../skin/')" id="menu_1.4Title" class="menutitle">Test</div>
<div id="menu_1.4" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/testing.html">Testing</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.5', '../skin/')" id="menu_1.5Title" class="menutitle">Deploy</div>
<div id="menu_1.5" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/doc.html">Doc Mgmt</a>
</div>
<div class="menuitem">
<a href="../dev/release.html">Release</a>
</div>
<div class="menuitem">
<a href="http://issues.apache.org/bugzilla/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&email1=&emailtype1=substring&emailassigned_to1=1&email2=&emailtype2=substring&emailreporter2=1&bugidtype=include&bug_id=&changedin=&votes=&chfieldfrom=&chfieldto=Now&chfieldvalue=&product=Fop&short_desc=&short_desc_type=allwordssubstr&long_desc=&long_desc_type=allwordssubstr&bug_file_loc=&bug_file_loc_type=allwordssubstr&keywords=&keywords_type=anywords&field0-0-0=noop&type0-0-0=noop&value0-0-0=&order=bug_severity%2Cpriority%20DESC">Bugs</a>
</div>
</div>
<div onclick="SwitchMenu('menu_selected_1.6', '../skin/')" id="menu_selected_1.6Title" class="menutitle" style="background-image: url('../skin/images/chapter_open.gif');">Resources</div>
<div id="menu_selected_1.6" class="selectedmenuitemgroup" style="display: block;">
<div class="menuitem">
<a href="../dev/faq.html">FAQs</a>
</div>
<div class="menupage">
<div class="menupagetitle">Tools</div>
</div>
<div class="menuitem">
<a href="http://svn.apache.org/viewvc/xmlgraphics/fop">ViewVC</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.7', '../skin/')" id="menu_1.7Title" class="menutitle">SubPackages</div>
<div id="menu_1.7" class="menuitemgroup">
<div class="menuitem">
<a href="../dev/rtflib.html">RTFlib</a>
</div>
</div>
<div id="credit"></div>
<div id="roundbottom">
<img style="display: none" class="corner" height="15" width="15" alt="" src="../skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
<!--+
|alternative credits
+-->
<div id="credit2"></div>
</div>
<!--+
|end Menu
+-->
<!--+
|start content
+-->
<div id="content">
<div title="raw XML" class="xmllink">
<a class="dida" href="tools.xml"><img alt="XML - icon" src="../skin/images/xmldoc.gif" class="skin"><br>
XML</a>
</div>
<div title="Portable Document Format" class="pdflink">
<a class="dida" href="tools.pdf"><img alt="PDF -icon" src="../skin/images/pdfdoc.gif" class="skin"><br>
PDF</a>
</div>
<div class="trail">Font size:
<input value="Reset" class="resetfont" title="Reset text" onclick="ndeSetTextSize('reset'); return false;" type="button">
<input value="-a" class="smallerfont" title="Shrink text" onclick="ndeSetTextSize('decr'); return false;" type="button">
<input value="+a" class="biggerfont" title="Enlarge text" onclick="ndeSetTextSize('incr'); return false;" type="button">
</div>
<h1>FOP Development: Developer Tools</h1>
<div id="minitoc-area">
<ul class="minitoc">
<li>
<a href="#checklist">Developer Checklist</a>
</li>
<li>
<a href="#general">General Developer Information</a>
</li>
<li>
<a href="#svn">Subversion (SVN)</a>
<ul class="minitoc">
<li>
<a href="#svn_general">General</a>
</li>
<li>
<a href="#svn_download">Step-by-step instruction for downloading FOP using the SVN command-line client</a>
</li>
<li>
<a href="#tortoisesvn_download">Step-by-step instructions for downloading FOP using TortoiseSVN (on Windows)</a>
</li>
<li>
<a href="#patches">Creating Patches</a>
</li>
<li>
<a href="#svn-doc">Documentation</a>
</li>
</ul>
</li>
<li>
<a href="#ide">Integrated Development Environments (IDEs)</a>
</li>
</ul>
</div>
<p>
This page documents items that may be helpful to other developers,
especially to those who are new to FOP. Exhaustive treatment of these
topics is better suited to other fora, but the information presented
here is intended to deal with FOP-specific issues related to these
tools, especially "gotchas", and to help developers get jump-started.
</p>
<a name="N10014"></a><a name="checklist"></a>
<h2 class="underlined_10">Developer Checklist</h2>
<div class="section">
<p>
Here is a (probably not comprehensive) list of tools you will need
to be a successful FOP developer:
</p>
<ul>
<li>A java IDE (see <a href="#ide">IDE</a>).</li>
<li>A Subversion client (see <a href="#svn">Subversion</a>).</li>
<li>Ant (see <a href="../trunk/compiling.html">Building FOP</a>).</li>
<li>checkstyle (see <a href="conventions.html#java-checkstyle">Checkstyle</a> on the conventions page).</li>
<li>JUnit (see <a href="testing.html#basic">Basic Testing</a>).</li>
</ul>
</div>
<a name="N10044"></a><a name="general"></a>
<h2 class="underlined_10">General Developer Information</h2>
<div class="section">
<p>
See <a class="external" href="http://www.apache.org/dev/contributors.html">the Apache Contributors Tech Guide</a>
for useful information and links for Apache developers, including help
with tools and procedures.
</p>
</div>
<a name="N10052"></a><a name="svn"></a>
<h2 class="underlined_10">Subversion (SVN)</h2>
<div class="section">
<a name="N10058"></a><a name="svn_general"></a>
<h3 class="underlined_5">General</h3>
<p>
Visit <a href="http://xmlgraphics.apache.org/repo.html">Apache XML Graphics Code Repositories</a>
for useful information.
</p>
<p>
You will need a SVN client to be able to gain access to the FOP repository.
For general SVN information, visit
<a class="external" href="http://subversion.tigris.org">Subversion Home</a>.
A comprehensive list of clients for all operating systems and many IDEs
can be found at
<a class="external" href="http://subversion.tigris.org/project_links.html">the Subversion Links page</a>.
For Microsoft Windows we recommend <a class="external" href="http://tortoisesvn.tigris.org">TortoiseSVN</a>.
The command-line client that comes with Subversion is also very easy to use.
</p>
<a name="N10075"></a><a name="svn_download"></a>
<h3 class="underlined_5">Step-by-step instruction for downloading FOP using the SVN command-line client</h3>
<p>
On the command-line (Windows or Unix), simply run:
</p>
<pre class="code">
svn co http://svn.apache.org/repos/asf/xmlgraphics/fop/trunk/ fop-trunk
</pre>
<p>
This will download the FOP trunk into the directory "fop-trunk".
</p>
<a name="N10086"></a><a name="tortoisesvn_download"></a>
<h3 class="underlined_5">Step-by-step instructions for downloading FOP using TortoiseSVN (on Windows)</h3>
<ul>
<li>Create a new, empty directory in a place of your choice.</li>
<li>Right-click the new directory and select "SVN Checkout..." from the context menu.</li>
<li>Enter <span class="codefrag">http://svn.apache.org/repos/asf/xmlgraphics/fop/trunk/</span> as the URL of the repository.</li>
<li>Click "OK" and the download should begin.</li>
</ul>
<a name="N1009F"></a><a name="patches"></a>
<h3 class="underlined_5">Creating Patches</h3>
<ul>
<li>
<span class="codefrag">cd</span> to a directory that contains all of the changes
that you wish to include in the patch. To comprehend the entire
distribution, <span class="codefrag">cd</span> to the top directory where you
checked out the sources.
</li>
<li>
Run: <span class="codefrag">svn up</span> to make sure the diff is created against the latest sources.
</li>
<li>
Run: <span class="codefrag">svn diff >mypatch.diff</span>
<br>This will write the patch to the file "mypatch.diff".
</li>
<li>If you are running TortoiseSVN, you can select "Create Patch..." in the TortoiseSVN context menu.</li>
</ul>
<a name="N100C3"></a><a name="svn-doc"></a>
<h3 class="underlined_5">Documentation</h3>
<ul>
<li>[online resource] <a class="external" href="http://subversion.tigris.org">The Subversion Home Page</a>.</li>
<li>[electronic manual] <a class="external" href="http://svnbook.red-bean.com">Version Control with Subversion</a> (official Subversion manual).
Note that this manual applies to the command-line version of SVN.</li>
<li>[online resource] <a class="external" href="http://subversion.tigris.org/project_links.html">Comprehensive list of links to documentation and Subversion clients and plugins.</a>
</li>
</ul>
</div>
<a name="N100E2"></a><a name="ide"></a>
<h2 class="underlined_10">Integrated Development Environments (IDEs)</h2>
<div class="section">
<p>An IDE is not required, but will generally be found to be helpful, especially for serious debugging and refactoring.</p>
<p>Borland's JBuilder 7/8 does not support Ant builds unless you have the Enterprise Edition (which is quite expensive). This causes problems with any code that is generated by the Ant build. First, you must run the Ant build before you can use the IDE. Second, when you are editing in the IDE, you must be editing the generated files, which means that you must make any changes to the source files in another editor. This is less serious for development on the trunk, but in the maintenance branch, all source files were "generated".</p>
<p>Sun ONE Studio Four does support Ant, but seems to use a built-in version, and as of this writing chokes on the FOP build file, saying that it is not valid. There is awkward because there is no official DTD for Ant, and it may be merely an Ant version issue.</p>
<p>Additional notes on setting up FOP within an IDE (ex. Eclipse) in the <a class="external" href="http://wiki.apache.org/xmlgraphics-fop/FOPIDESetupGuide">Wiki</a>.</p>
</div>
<span class="version">
version 627324</span>
</div>
<!--+
|end content
+-->
<div class="clearboth"> </div>
</div>
<div id="footer">
<!--+
|start bottomstrip
+-->
<div class="lastmodified">
<script type="text/javascript"><!--
document.write("Last Published: " + document.lastModified);
// --></script>
</div>
<div class="copyright">
Copyright ©
1999-2009 <a href="http://www.apache.org/licenses/">The Apache Software Foundation.</a>
</div>
<!--+
|end bottomstrip
+-->
</div>
</body>
</html>
| apache-2.0 |
scnakandala/derby | java/testing/org/apache/derbyTesting/functionTests/tests/lang/PrecedenceTest.java | 2454 | /**
* Derby - Class org.apache.derbyTesting.functionTests.tests.lang.PrecedenceTest
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.derbyTesting.functionTests.tests.lang;
import java.sql.SQLException;
import java.sql.Statement;
import junit.framework.Test;
import org.apache.derbyTesting.junit.BaseJDBCTestCase;
import org.apache.derbyTesting.junit.JDBC;
import org.apache.derbyTesting.junit.TestConfiguration;
/**
* Test case for precedence.sql.It tests precedence of operators other than and,
* or, and not that return boolean.
*/
public class PrecedenceTest extends BaseJDBCTestCase {
public PrecedenceTest(String name) {
super(name);
}
public static Test suite(){
return TestConfiguration.defaultSuite(PrecedenceTest.class);
}
public void testPrecedence() throws SQLException{
String sql = "create table t1(c11 int)";
Statement st = createStatement();
st.executeUpdate(sql);
sql = "insert into t1 values(1)";
assertEquals(1, st.executeUpdate(sql));
sql = "select c11 from t1 where 1 in (1,2,3) = (1=1)";
JDBC.assertSingleValueResultSet(st.executeQuery(sql), "1");
sql = "select c11 from t1 where 'acme widgets' " +
"like 'acme%' in ('1=1')";
JDBC.assertSingleValueResultSet(st.executeQuery(sql), "1");
sql = "select c11 from t1 where 1 between -100 " +
"and 100 is not null";
JDBC.assertSingleValueResultSet(st.executeQuery(sql), "1");
sql = "select c11 from t1 where exists(select *" +
" from (values 1) as t) not in ('1=2')";
JDBC.assertEmpty(st.executeQuery(sql));
st.close();
}
}
| apache-2.0 |
jerryscript-project/jerryscript | tests/jerry/regression-test-issue-3935.js | 716 | // Copyright JS Foundation and other contributors, http://js.foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
const obj = {};
var a = { a : (o) = 1 } = obj;
assert (a === obj);
assert (o === 1);
| apache-2.0 |
songweijia/fffs | sources/hadoop-2.4.1-src/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java | 14116 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3native;
import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.s3.S3Credentials;
import org.apache.hadoop.fs.s3.S3Exception;
import org.jets3t.service.S3Service;
import org.jets3t.service.S3ServiceException;
import org.jets3t.service.ServiceException;
import org.jets3t.service.StorageObjectsChunk;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.MultipartPart;
import org.jets3t.service.model.MultipartUpload;
import org.jets3t.service.model.S3Bucket;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.model.StorageObject;
import org.jets3t.service.security.AWSCredentials;
import org.jets3t.service.utils.MultipartUtils;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
private S3Service s3Service;
private S3Bucket bucket;
private long multipartBlockSize;
private boolean multipartEnabled;
private long multipartCopyBlockSize;
static final long MAX_PART_SIZE = (long)5 * 1024 * 1024 * 1024;
public static final Log LOG =
LogFactory.getLog(Jets3tNativeFileSystemStore.class);
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
try {
AWSCredentials awsCredentials =
new AWSCredentials(s3Credentials.getAccessKey(),
s3Credentials.getSecretAccessKey());
this.s3Service = new RestS3Service(awsCredentials);
} catch (S3ServiceException e) {
handleS3ServiceException(e);
}
multipartEnabled =
conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
multipartBlockSize = Math.min(
conf.getLong("fs.s3n.multipart.uploads.block.size", 64 * 1024 * 1024),
MAX_PART_SIZE);
multipartCopyBlockSize = Math.min(
conf.getLong("fs.s3n.multipart.copy.block.size", MAX_PART_SIZE),
MAX_PART_SIZE);
bucket = new S3Bucket(uri.getHost());
}
@Override
public void storeFile(String key, File file, byte[] md5Hash)
throws IOException {
if (multipartEnabled && file.length() >= multipartBlockSize) {
storeLargeFile(key, file, md5Hash);
return;
}
BufferedInputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
S3Object object = new S3Object(key);
object.setDataInputStream(in);
object.setContentType("binary/octet-stream");
object.setContentLength(file.length());
if (md5Hash != null) {
object.setMd5Hash(md5Hash);
}
s3Service.putObject(bucket, object);
} catch (S3ServiceException e) {
handleS3ServiceException(e);
} finally {
if (in != null) {
try {
in.close();
} catch (IOException e) {
// ignore
}
}
}
}
public void storeLargeFile(String key, File file, byte[] md5Hash)
throws IOException {
S3Object object = new S3Object(key);
object.setDataInputFile(file);
object.setContentType("binary/octet-stream");
object.setContentLength(file.length());
if (md5Hash != null) {
object.setMd5Hash(md5Hash);
}
List<StorageObject> objectsToUploadAsMultipart =
new ArrayList<StorageObject>();
objectsToUploadAsMultipart.add(object);
MultipartUtils mpUtils = new MultipartUtils(multipartBlockSize);
try {
mpUtils.uploadObjects(bucket.getName(), s3Service,
objectsToUploadAsMultipart, null);
} catch (ServiceException e) {
handleServiceException(e);
} catch (Exception e) {
throw new S3Exception(e);
}
}
@Override
public void storeEmptyFile(String key) throws IOException {
try {
S3Object object = new S3Object(key);
object.setDataInputStream(new ByteArrayInputStream(new byte[0]));
object.setContentType("binary/octet-stream");
object.setContentLength(0);
s3Service.putObject(bucket, object);
} catch (S3ServiceException e) {
handleS3ServiceException(e);
}
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
StorageObject object = null;
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
}
object = s3Service.getObjectDetails(bucket.getName(), key);
return new FileMetadata(key, object.getContentLength(),
object.getLastModifiedDate().getTime());
} catch (ServiceException e) {
// Following is brittle. Is there a better way?
if ("NoSuchKey".equals(e.getErrorCode())) {
return null; //return null if key not found
}
handleServiceException(e);
return null; //never returned - keep compiler happy
} finally {
if (object != null) {
object.closeDataInputStream();
}
}
}
/**
* @param key
* The key is the object name that is being retrieved from the S3 bucket
* @return
* This method returns null if the key is not found
* @throws IOException
*/
@Override
public InputStream retrieve(String key) throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName());
}
S3Object object = s3Service.getObject(bucket.getName(), key);
return object.getDataInputStream();
} catch (ServiceException e) {
handleServiceException(key, e);
return null; //return null if key not found
}
}
/**
*
* @param key
* The key is the object name that is being retrieved from the S3 bucket
* @return
* This method returns null if the key is not found
* @throws IOException
*/
@Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName() + " with byteRangeStart: " + byteRangeStart);
}
S3Object object = s3Service.getObject(bucket, key, null, null, null,
null, byteRangeStart, null);
return object.getDataInputStream();
} catch (ServiceException e) {
handleServiceException(key, e);
return null; //return null if key not found
}
}
@Override
public PartialListing list(String prefix, int maxListingLength)
throws IOException {
return list(prefix, maxListingLength, null, false);
}
@Override
public PartialListing list(String prefix, int maxListingLength, String priorLastKey,
boolean recurse) throws IOException {
return list(prefix, recurse ? null : PATH_DELIMITER, maxListingLength, priorLastKey);
}
/**
*
* @return
* This method returns null if the list could not be populated
* due to S3 giving ServiceException
* @throws IOException
*/
private PartialListing list(String prefix, String delimiter,
int maxListingLength, String priorLastKey) throws IOException {
try {
if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
prefix, delimiter, maxListingLength, priorLastKey);
FileMetadata[] fileMetadata =
new FileMetadata[chunk.getObjects().length];
for (int i = 0; i < fileMetadata.length; i++) {
StorageObject object = chunk.getObjects()[i];
fileMetadata[i] = new FileMetadata(object.getKey(),
object.getContentLength(), object.getLastModifiedDate().getTime());
}
return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
chunk.getCommonPrefixes());
} catch (S3ServiceException e) {
handleS3ServiceException(e);
return null; //never returned - keep compiler happy
} catch (ServiceException e) {
handleServiceException(e);
return null; //return null if list could not be populated
}
}
@Override
public void delete(String key) throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Deleting key:" + key + "from bucket" + bucket.getName());
}
s3Service.deleteObject(bucket, key);
} catch (ServiceException e) {
handleServiceException(key, e);
}
}
public void rename(String srcKey, String dstKey) throws IOException {
try {
s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
} catch (ServiceException e) {
handleServiceException(e);
}
}
@Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Copying srcKey: " + srcKey + "to dstKey: " + dstKey + "in bucket: " + bucket.getName());
}
if (multipartEnabled) {
S3Object object = s3Service.getObjectDetails(bucket, srcKey, null,
null, null, null);
if (multipartCopyBlockSize > 0 &&
object.getContentLength() > multipartCopyBlockSize) {
copyLargeFile(object, dstKey);
return;
}
}
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
new S3Object(dstKey), false);
} catch (ServiceException e) {
handleServiceException(srcKey, e);
}
}
public void copyLargeFile(S3Object srcObject, String dstKey) throws IOException {
try {
long partCount = srcObject.getContentLength() / multipartCopyBlockSize +
(srcObject.getContentLength() % multipartCopyBlockSize > 0 ? 1 : 0);
MultipartUpload multipartUpload = s3Service.multipartStartUpload
(bucket.getName(), dstKey, srcObject.getMetadataMap());
List<MultipartPart> listedParts = new ArrayList<MultipartPart>();
for (int i = 0; i < partCount; i++) {
long byteRangeStart = i * multipartCopyBlockSize;
long byteLength;
if (i < partCount - 1) {
byteLength = multipartCopyBlockSize;
} else {
byteLength = srcObject.getContentLength() % multipartCopyBlockSize;
if (byteLength == 0) {
byteLength = multipartCopyBlockSize;
}
}
MultipartPart copiedPart = s3Service.multipartUploadPartCopy
(multipartUpload, i + 1, bucket.getName(), srcObject.getKey(),
null, null, null, null, byteRangeStart,
byteRangeStart + byteLength - 1, null);
listedParts.add(copiedPart);
}
Collections.reverse(listedParts);
s3Service.multipartCompleteUpload(multipartUpload, listedParts);
} catch (ServiceException e) {
handleServiceException(e);
}
}
@Override
public void purge(String prefix) throws IOException {
try {
S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
for (S3Object object : objects) {
s3Service.deleteObject(bucket, object.getKey());
}
} catch (S3ServiceException e) {
handleS3ServiceException(e);
}
}
@Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Native Filesystem, ");
sb.append(bucket.getName()).append("\n");
try {
S3Object[] objects = s3Service.listObjects(bucket.getName());
for (S3Object object : objects) {
sb.append(object.getKey()).append("\n");
}
} catch (S3ServiceException e) {
handleS3ServiceException(e);
}
System.out.println(sb);
}
private void handleServiceException(String key, ServiceException e) throws IOException {
if ("NoSuchKey".equals(e.getErrorCode())) {
throw new FileNotFoundException("Key '" + key + "' does not exist in S3");
} else {
handleServiceException(e);
}
}
private void handleS3ServiceException(S3ServiceException e) throws IOException {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
else {
if(LOG.isDebugEnabled()) {
LOG.debug("S3 Error code: " + e.getS3ErrorCode() + "; S3 Error message: " + e.getS3ErrorMessage());
}
throw new S3Exception(e);
}
}
private void handleServiceException(ServiceException e) throws IOException {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
}
}
}
}
| apache-2.0 |
nmldiegues/tuner-icac14 | tuner-selective/kmeans/kmeans.c | 12898 | /* =============================================================================
*
* kmeans.c
*
* =============================================================================
*
* Description:
*
* Takes as input a file:
* ascii file: containing 1 data point per line
* binary file: first int is the number of objects
* 2nd int is the no. of features of each object
*
* This example performs a fuzzy c-means clustering on the data. Fuzzy clustering
* is performed using min to max clusters and the clustering that gets the best
* score according to a compactness and separation criterion are returned.
*
*
* Author:
*
* Wei-keng Liao
* ECE Department Northwestern University
* email: [email protected]
*
*
* Edited by:
*
* Jay Pisharath
* Northwestern University
*
* Chi Cao Minh
* Stanford University
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <fcntl.h>
#include <getopt.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "cluster.h"
#include "common.h"
#include "thread.h"
#include "tm.h"
#include "util.h"
#define MAX_LINE_LENGTH 1000000 /* max input is 400000 one digit input + spaces */
extern double global_time;
/* =============================================================================
* usage
* =============================================================================
*/
void
usage (char* argv0)
{
char* help =
"Usage: %s [switches] -i filename\n"
" -i filename: file containing data to be clustered\n"
" -b input file is in binary format\n"
" -m max_clusters: maximum number of clusters allowed\n"
" -n min_clusters: minimum number of clusters allowed\n"
" -z : don't zscore transform data\n"
" -t threshold : threshold value\n"
" -p nproc : number of threads\n";
fprintf(stderr, help, argv0);
exit(-1);
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
int max_nclusters = 13;
int min_nclusters = 4;
char* filename = 0;
float* buf;
float** attributes;
float** cluster_centres = NULL;
int i;
int j;
int best_nclusters;
int* cluster_assign;
int numAttributes;
int numObjects;
int use_zscore_transform = 1;
char* line;
int isBinaryFile = 0;
int nloops;
int len;
int nthreads;
float threshold = 0.001;
int opt;
GOTO_REAL();
line = (char*)malloc(MAX_LINE_LENGTH); /* reserve memory line */
nthreads = 1;
while ((opt = getopt(argc,(char**)argv,"p:i:m:n:t:bz")) != EOF) {
switch (opt) {
case 'i': filename = optarg;
break;
case 'b': isBinaryFile = 1;
break;
case 't': threshold = atof(optarg);
break;
case 'm': max_nclusters = atoi(optarg);
break;
case 'n': min_nclusters = atoi(optarg);
break;
case 'z': use_zscore_transform = 0;
break;
case 'p': nthreads = atoi(optarg);
break;
case '?': usage((char*)argv[0]);
break;
default: usage((char*)argv[0]);
break;
}
}
if (filename == 0) {
usage((char*)argv[0]);
}
if (max_nclusters < min_nclusters) {
fprintf(stderr, "Error: max_clusters must be >= min_clusters\n");
usage((char*)argv[0]);
}
SIM_GET_NUM_CPU(nthreads);
numAttributes = 0;
numObjects = 0;
/*
* From the input file, get the numAttributes and numObjects
*/
if (isBinaryFile) {
int infile;
if ((infile = open(filename, O_RDONLY, "0600")) == -1) {
fprintf(stderr, "Error: no such file (%s)\n", filename);
exit(1);
}
read(infile, &numObjects, sizeof(int));
read(infile, &numAttributes, sizeof(int));
/* Allocate space for attributes[] and read attributes of all objects */
buf = (float*)malloc(numObjects * numAttributes * sizeof(float));
assert(buf);
attributes = (float**)malloc(numObjects * sizeof(float*));
assert(attributes);
attributes[0] = (float*)malloc(numObjects * numAttributes * sizeof(float));
assert(attributes[0]);
for (i = 1; i < numObjects; i++) {
attributes[i] = attributes[i-1] + numAttributes;
}
read(infile, buf, (numObjects * numAttributes * sizeof(float)));
close(infile);
} else {
FILE *infile;
if ((infile = fopen(filename, "r")) == NULL) {
fprintf(stderr, "Error: no such file (%s)\n", filename);
exit(1);
}
while (fgets(line, MAX_LINE_LENGTH, infile) != NULL) {
if (strtok(line, " \t\n") != 0) {
numObjects++;
}
}
rewind(infile);
while (fgets(line, MAX_LINE_LENGTH, infile) != NULL) {
if (strtok(line, " \t\n") != 0) {
/* Ignore the id (first attribute): numAttributes = 1; */
while (strtok(NULL, " ,\t\n") != NULL) {
numAttributes++;
}
break;
}
}
/* Allocate space for attributes[] and read attributes of all objects */
buf = (float*)malloc(numObjects * numAttributes * sizeof(float));
assert(buf);
attributes = (float**)malloc(numObjects * sizeof(float*));
assert(attributes);
attributes[0] = (float*)malloc(numObjects * numAttributes * sizeof(float));
assert(attributes[0]);
for (i = 1; i < numObjects; i++) {
attributes[i] = attributes[i-1] + numAttributes;
}
rewind(infile);
i = 0;
while (fgets(line, MAX_LINE_LENGTH, infile) != NULL) {
if (strtok(line, " \t\n") == NULL) {
continue;
}
for (j = 0; j < numAttributes; j++) {
buf[i] = atof(strtok(NULL, " ,\t\n"));
i++;
}
}
fclose(infile);
}
TM_STARTUP(nthreads);
thread_startup(nthreads);
/*
* The core of the clustering
*/
cluster_assign = (int*)malloc(numObjects * sizeof(int));
assert(cluster_assign);
nloops = 1;
len = max_nclusters - min_nclusters + 1;
for (i = 0; i < nloops; i++) {
/*
* Since zscore transform may perform in cluster() which modifies the
* contents of attributes[][], we need to re-store the originals
*/
memcpy(attributes[0], buf, (numObjects * numAttributes * sizeof(float)));
cluster_centres = NULL;
cluster_exec(nthreads,
numObjects,
numAttributes,
attributes, /* [numObjects][numAttributes] */
use_zscore_transform, /* 0 or 1 */
min_nclusters, /* pre-define range from min to max */
max_nclusters,
threshold,
&best_nclusters, /* return: number between min and max */
&cluster_centres, /* return: [best_nclusters][numAttributes] */
cluster_assign); /* return: [numObjects] cluster id for each object */
}
#ifdef GNUPLOT_OUTPUT
{
FILE** fptr;
char outFileName[1024];
fptr = (FILE**)malloc(best_nclusters * sizeof(FILE*));
for (i = 0; i < best_nclusters; i++) {
sprintf(outFileName, "group.%d", i);
fptr[i] = fopen(outFileName, "w");
}
for (i = 0; i < numObjects; i++) {
fprintf(fptr[cluster_assign[i]],
"%6.4f %6.4f\n",
attributes[i][0],
attributes[i][1]);
}
for (i = 0; i < best_nclusters; i++) {
fclose(fptr[i]);
}
free(fptr);
}
#endif /* GNUPLOT_OUTPUT */
#ifdef OUTPUT_TO_FILE
{
/* Output: the coordinates of the cluster centres */
FILE* cluster_centre_file;
FILE* clustering_file;
char outFileName[1024];
sprintf(outFileName, "%s.cluster_centres", filename);
cluster_centre_file = fopen(outFileName, "w");
for (i = 0; i < best_nclusters; i++) {
fprintf(cluster_centre_file, "%d ", i);
for (j = 0; j < numAttributes; j++) {
fprintf(cluster_centre_file, "%f ", cluster_centres[i][j]);
}
fprintf(cluster_centre_file, "\n");
}
fclose(cluster_centre_file);
/* Output: the closest cluster centre to each of the data points */
sprintf(outFileName, "%s.cluster_assign", filename);
clustering_file = fopen(outFileName, "w");
for (i = 0; i < numObjects; i++) {
fprintf(clustering_file, "%d %d\n", i, cluster_assign[i]);
}
fclose(clustering_file);
}
#endif /* OUTPUT TO_FILE */
#ifdef OUTPUT_TO_STDOUT
{
/* Output: the coordinates of the cluster centres */
for (i = 0; i < best_nclusters; i++) {
printf("%d ", i);
for (j = 0; j < numAttributes; j++) {
printf("%f ", cluster_centres[i][j]);
}
printf("\n");
}
}
#endif /* OUTPUT TO_STDOUT */
printf("Time: %lg seconds\n", global_time);
free(cluster_assign);
free(attributes);
free(cluster_centres[0]);
free(cluster_centres);
free(buf);
TM_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of kmeans.c
*
* =============================================================================
*/
| apache-2.0 |
llvm-mirror/libcxx | test/libcxx/iterators/prev.debug1.pass.cpp | 1215 | //===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Can't test the system lib because this test enables debug mode
// MODULES_DEFINES: _LIBCPP_DEBUG=1
// UNSUPPORTED: with_system_cxx_lib
// UNSUPPORTED: c++98, c++03
// UNSUPPORTED: windows
// UNSUPPORTED: with_system_cxx_lib
// <list>
// Call prev(forward_iterator, -1)
#define _LIBCPP_DEBUG 0
#include <iterator>
#include "test_macros.h"
#include "debug_mode_helper.h"
#include "test_iterators.h"
int main(int, char**)
{
int a[] = {1, 2, 3};
bidirectional_iterator<int *> bidi(a+1);
std::prev(bidi, -1); // should work fine
std::prev(bidi, 0); // should work fine
std::prev(bidi, 1); // should work fine
forward_iterator<int *> it(a+1);
std::prev(it, -1); // should work fine
std::prev(it, 0); // should work fine
EXPECT_DEATH( std::prev(it, 1) ); // can't go backwards on a FwdIter
return 0;
}
| apache-2.0 |
CanonicalBootStack/charm-hacluster | tests/charmhelpers/__init__.py | 1285 | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa
| apache-2.0 |
straceX/Ectoplasm | linux-0.0.1/linux/kernel/traps.c | 4771 | /*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'. Currently mostly a debugging-aid, will be extended
* to mainly kill the offending process (probably by giving it a signal,
* but possibly by killing it outright if necessary).
*/
#include <string.h>
#include <linux/head.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/segment.h>
#define get_seg_byte(seg,addr) ({ \
register char __res; \
__asm__("push %%fs;mov %%ax,%%fs;movb %%fs:%2,%%al;pop %%fs" \
:"=a" (__res):"0" (seg),"m" (*(addr))); \
__res;})
#define get_seg_long(seg,addr) ({ \
register unsigned long __res; \
__asm__("push %%fs;mov %%ax,%%fs;movl %%fs:%2,%%eax;pop %%fs" \
:"=a" (__res):"0" (seg),"m" (*(addr))); \
__res;})
#define _fs() ({ \
register unsigned short __res; \
__asm__("mov %%fs,%%ax":"=a" (__res):); \
__res;})
int do_exit(long code);
void page_exception(void);
void divide_error(void);
void debug(void);
void nmi(void);
void int3(void);
void overflow(void);
void bounds(void);
void invalid_op(void);
void device_not_available(void);
void double_fault(void);
void coprocessor_segment_overrun(void);
void invalid_TSS(void);
void segment_not_present(void);
void stack_segment(void);
void general_protection(void);
void page_fault(void);
void coprocessor_error(void);
void reserved(void);
static void die(char * str,long esp_ptr,long nr)
{
long * esp = (long *) esp_ptr;
int i;
printk("%s: %04x\n\r",str,nr&0xffff);
printk("EIP:\t%04x:%p\nEFLAGS:\t%p\nESP:\t%04x:%p\n",
esp[1],esp[0],esp[2],esp[4],esp[3]);
printk("fs: %04x\n",_fs());
printk("base: %p, limit: %p\n",get_base(current->ldt[1]),get_limit(0x17));
if (esp[4] == 0x17) {
printk("Stack: ");
for (i=0;i<4;i++)
printk("%p ",get_seg_long(0x17,i+(long *)esp[3]));
printk("\n");
}
str(i);
printk("Pid: %d, process nr: %d\n\r",current->pid,0xffff & i);
for(i=0;i<10;i++)
printk("%02x ",0xff & get_seg_byte(esp[1],(i+(char *)esp[0])));
printk("\n\r");
do_exit(11); /* play segment exception */
}
void do_double_fault(long esp, long error_code)
{
die("double fault",esp,error_code);
}
void do_general_protection(long esp, long error_code)
{
die("general protection",esp,error_code);
}
void do_divide_error(long esp, long error_code)
{
die("divide error",esp,error_code);
}
void do_int3(long * esp, long error_code,
long fs,long es,long ds,
long ebp,long esi,long edi,
long edx,long ecx,long ebx,long eax)
{
int tr;
__asm__("str %%ax":"=a" (tr):"0" (0));
printk("eax\t\tebx\t\tecx\t\tedx\n\r%8x\t%8x\t%8x\t%8x\n\r",
eax,ebx,ecx,edx);
printk("esi\t\tedi\t\tebp\t\tesp\n\r%8x\t%8x\t%8x\t%8x\n\r",
esi,edi,ebp,(long) esp);
printk("\n\rds\tes\tfs\ttr\n\r%4x\t%4x\t%4x\t%4x\n\r",
ds,es,fs,tr);
printk("EIP: %8x CS: %4x EFLAGS: %8x\n\r",esp[0],esp[1],esp[2]);
}
void do_nmi(long esp, long error_code)
{
die("nmi",esp,error_code);
}
void do_debug(long esp, long error_code)
{
die("debug",esp,error_code);
}
void do_overflow(long esp, long error_code)
{
die("overflow",esp,error_code);
}
void do_bounds(long esp, long error_code)
{
die("bounds",esp,error_code);
}
void do_invalid_op(long esp, long error_code)
{
die("invalid operand",esp,error_code);
}
void do_device_not_available(long esp, long error_code)
{
die("device not available",esp,error_code);
}
void do_coprocessor_segment_overrun(long esp, long error_code)
{
die("coprocessor segment overrun",esp,error_code);
}
void do_invalid_TSS(long esp,long error_code)
{
die("invalid TSS",esp,error_code);
}
void do_segment_not_present(long esp,long error_code)
{
die("segment not present",esp,error_code);
}
void do_stack_segment(long esp,long error_code)
{
die("stack segment",esp,error_code);
}
void do_coprocessor_error(long esp, long error_code)
{
die("coprocessor error",esp,error_code);
}
void do_reserved(long esp, long error_code)
{
die("reserved (15,17-31) error",esp,error_code);
}
void trap_init(void)
{
int i;
set_trap_gate(0,÷_error);
set_trap_gate(1,&debug);
set_trap_gate(2,&nmi);
set_system_gate(3,&int3); /* int3-5 can be called from all */
set_system_gate(4,&overflow);
set_system_gate(5,&bounds);
set_trap_gate(6,&invalid_op);
set_trap_gate(7,&device_not_available);
set_trap_gate(8,&double_fault);
set_trap_gate(9,&coprocessor_segment_overrun);
set_trap_gate(10,&invalid_TSS);
set_trap_gate(11,&segment_not_present);
set_trap_gate(12,&stack_segment);
set_trap_gate(13,&general_protection);
set_trap_gate(14,&page_fault);
set_trap_gate(15,&reserved);
set_trap_gate(16,&coprocessor_error);
for (i=17;i<32;i++)
set_trap_gate(i,&reserved);
/* __asm__("movl $0x3ff000,%%eax\n\t"
"movl %%eax,%%db0\n\t"
"movl $0x000d0303,%%eax\n\t"
"movl %%eax,%%db7"
:::"ax");*/
}
| apache-2.0 |
MuShiiii/commons-io | src/test/java/org/apache/commons/io/output/TeeOutputStreamTest.java | 4322 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.io.output;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.junit.Assert;
/**
* @version $Id$
*/
public class TeeOutputStreamTest extends TestCase {
private static class ExceptionOnCloseByteArrayOutputStream extends ByteArrayOutputStream {
@Override
public void close() throws IOException {
throw new IOException();
}
}
private static class RecordCloseByteArrayOutputStream extends ByteArrayOutputStream {
boolean closed;
@Override
public void close() throws IOException {
super.close();
closed = true;
}
}
public TeeOutputStreamTest(final String name) {
super(name);
}
/**
* Tests that the branch {@code OutputStream} is closed when closing the main {@code OutputStream} throws an
* exception on {@link TeeOutputStream#close()}.
*/
public void testCloseBranchIOException() {
final ByteArrayOutputStream badOs = new ExceptionOnCloseByteArrayOutputStream();
final RecordCloseByteArrayOutputStream goodOs = new RecordCloseByteArrayOutputStream();
final TeeOutputStream tos = new TeeOutputStream(goodOs, badOs);
try {
tos.close();
Assert.fail("Expected " + IOException.class.getName());
} catch (final IOException e) {
Assert.assertTrue(goodOs.closed);
}
}
/**
* Tests that the main {@code OutputStream} is closed when closing the branch {@code OutputStream} throws an
* exception on {@link TeeOutputStream#close()}.
*/
public void testCloseMainIOException() {
final ByteArrayOutputStream badOs = new ExceptionOnCloseByteArrayOutputStream();
final RecordCloseByteArrayOutputStream goodOs = new RecordCloseByteArrayOutputStream();
final TeeOutputStream tos = new TeeOutputStream(badOs, goodOs);
try {
tos.close();
Assert.fail("Expected " + IOException.class.getName());
} catch (final IOException e) {
Assert.assertTrue(goodOs.closed);
}
}
public void testTee() throws IOException {
final ByteArrayOutputStream baos1 = new ByteArrayOutputStream();
final ByteArrayOutputStream baos2 = new ByteArrayOutputStream();
final TeeOutputStream tos = new TeeOutputStream(baos1, baos2);
for (int i = 0; i < 20; i++) {
tos.write(i);
}
assertByteArrayEquals("TeeOutputStream.write(int)", baos1.toByteArray(), baos2.toByteArray());
final byte[] array = new byte[10];
for (int i = 20; i < 30; i++) {
array[i - 20] = (byte) i;
}
tos.write(array);
assertByteArrayEquals("TeeOutputStream.write(byte[])", baos1.toByteArray(), baos2.toByteArray());
for (int i = 25; i < 35; i++) {
array[i - 25] = (byte) i;
}
tos.write(array, 5, 5);
assertByteArrayEquals("TeeOutputStream.write(byte[], int, int)", baos1.toByteArray(), baos2.toByteArray());
tos.flush();
tos.close();
}
private void assertByteArrayEquals(final String msg, final byte[] array1, final byte[] array2) {
assertEquals(msg + ": array size mismatch", array1.length, array2.length);
for (int i = 0; i < array1.length; i++) {
assertEquals(msg + ": array[ " + i + "] mismatch", array1[i], array2[i]);
}
}
}
| apache-2.0 |
Subsets and Splits