code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
3
942
language
stringclasses
30 values
license
stringclasses
15 values
size
int32
3
1.05M
/* Theme Name:The Project - Responsive Website Template Author:HtmlCoder Author URI:http://www.htmlcoder.me Author e-mail:[email protected] Version:1.4.0 Created:May 2015 License URI:http://support.wrapbootstrap.com/ File Description:Brown Skin */ /* 1- Typography ------------------------------------------------- -------------------------------------------------*/ a { color: #91633c; } a:hover { color: #795332; } a:focus, a:active { color: #795332; } mark, .mark { background-color: #91633c; color: #ffffff; } .text-default { color: #91633c; } /* Tables ---------------------------------- */ .table.table-colored { border-bottom-color: #91633c; } .table-colored > thead > tr > th { border-color: #91633c; background-color: #91633c; color: #ffffff; } /* 2- Layout ------------------------------------------------- -------------------------------------------------*/ .header-top.dark { background-color: #37322e; border-bottom: 1px solid #2a2623; } .header.dark { background-color: #453f39; border-top-color: #453f39; } .header-top.colored { background-color: #91633c; border-bottom: 1px solid #91633c; } .default-bg { background-color: #91633c; color: #ffffff; } .default-bg-50 { background-color: rgba(145, 99, 60, 0.5); color: #ffffff; } /*Transparent Header*/ .transparent-header header.header.dark.transparent-header-on { background-color: rgba(69, 63, 57, 0.8); border-top-color: rgba(42, 38, 35, 0.5); } /*Fixed Navigation*/ .fixed-header-on .dark.header.fixed { background-color: rgba(69, 63, 57, 0.95); } /* Small devices (tablets, 768px and up) */ @media (min-width: 768px) { .fixed-header-on .dark.header .dropdown-menu { border: 1px solid #37322e; } } /*Translucent Backgrounds*/ .default-translucent-bg:after { background-color: rgba(145, 99, 60, 0.7); } .default-translucent-bg.hovered:hover:after { background-color: rgba(145, 99, 60, 0.8); } .default-hovered:hover:after, .default-hovered:hover { background-color: #91633c; } /* 4 Pages ------------------------------------------------- -------------------------------------------------*/ /* Blog Timeline layout ---------------------------------- */ .timeline-date-label:after { background-color: #91633c; } /* 5 Components ------------------------------------------------- -------------------------------------------------*/ /* Buttons ---------------------------------- */ /*Default Button*/ .btn-default { color: #ffffff; background-color: #91633c; border-color: #795332; } .btn-default:hover, .btn-default:focus, .btn-default.focus, .btn-default:active, .btn-default.active, .open > .dropdown-toggle.btn-default { color: #ffffff; background-color: #795332; border-color: #795332; } .btn-default.disabled, .btn-default[disabled], fieldset[disabled] .btn-default { background-color: #91633c; border-color: #795332; } .btn-default-transparent { color: #777777; background-color: transparent; border-color: #91633c; } .btn-default-transparent:hover, .btn-default-transparent:focus, .btn-default-transparent.focus, .btn-default-transparent:active, .btn-default-transparent.active, .open > .btn-default-transparent.dropdown-toggle { color: #ffffff; background-color: #795332; border-color: #66452a; } .default-bg .btn-default, .default-bg .btn-default-transparent, .default-translucent-bg .btn-default, .default-translucent-bg .btn-default-transparent { color: #91633c; background-color: #ffffff; border-color: #ffffff; } .default-bg .btn-default:hover, .default-bg .btn-default:focus, .default-bg .btn-default.focus, .default-bg .btn-default:active, .default-bg .btn-default.active, .default-bg .open > .dropdown-toggle.btn-default, .default-bg .btn-default-transparent:hover, .default-bg .btn-default-transparent:focus, .default-bg .btn-default-transparent.focus, .default-bg .btn-default-transparent:active, .default-bg .btn-default-transparent.active, .default-bg .open > .dropdown-toggle.btn-default-transparent, .default-translucent-bg .btn-default:hover, .default-translucent-bg .btn-default:focus, .default-translucent-bg .btn-default.focus, .default-translucent-bg .btn-default:active, .default-translucent-bg .btn-default.active, .default-translucent-bg .open > .dropdown-toggle.btn-default, .default-translucent-bg .btn-default-transparent:hover, .default-translucent-bg .btn-default-transparent:focus, .default-translucent-bg .btn-default-transparent.focus, .default-translucent-bg .btn-default-transparent:active, .default-translucent-bg .btn-default-transparent.active, .default-translucent-bg .open > .dropdown-toggle.btn-default-transparent { color: #ffffff; background-color: transparent; border-color: #ffffff; } /*White Button*/ .btn-white { color: #777777; background-color: #ffffff; border-color: #ffffff; } .btn-white:hover, .btn-white:focus, .btn-white.focus, .btn-white:active, .btn-white.active, .open > .dropdown-toggle.btn-white { color: #795332; background-color: #ffffff; border-color: #ffffff; } /*Background Transition*/ .hvr-sweep-to-right:hover, .hvr-sweep-to-left:hover, .hvr-sweep-to-bottom:hover, .hvr-sweep-to-top:hover, .hvr-bounce-to-right:hover, .hvr-bounce-to-left:hover, .hvr-bounce-to-bottom:hover, .hvr-bounce-to-top:hover, .hvr-radial-out:hover, .hvr-rectangle-out:hover, .hvr-shutter-out-horizontal:hover, .hvr-shutter-out-vertical:hover { background: transparent; } .hvr-radial-in, .hvr-rectangle-in, .hvr-shutter-in-horizontal, .hvr-shutter-in-vertical { background: #795332; } .hvr-radial-in:hover, .hvr-rectangle-in:hover, .hvr-shutter-in-horizontal:hover, .hvr-shutter-in-vertical:hover { background: #795332; } .hvr-radial-in:before, .hvr-rectangle-in:before, .hvr-shutter-in-horizontal:before, .hvr-shutter-in-vertical:before { background: #ffffff; } .hvr-sweep-to-right:before, .hvr-sweep-to-left:before, .hvr-sweep-to-bottom:before, .hvr-sweep-to-top:before, .hvr-bounce-to-right:before, .hvr-bounce-to-left:before, .hvr-bounce-to-bottom:before, .hvr-bounce-to-top:before, .hvr-radial-out:before, .hvr-rectangle-out:before, .hvr-shutter-out-horizontal:before, .hvr-shutter-out-vertical:before { background: #795332; } /*ie9*/ .no-csstransitions .hvr-radial-in, .no-csstransitions .hvr-rectangle-in, .no-csstransitions .hvr-shutter-in-horizontal, .no-csstransitions .hvr-shutter-in-vertical { background: transparent; } .no-csstransitions .hvr-sweep-to-right:hover, .no-csstransitions .hvr-sweep-to-left:hover, .no-csstransitions .hvr-sweep-to-bottom:hover, .no-csstransitions .hvr-sweep-to-top:hover, .no-csstransitions .hvr-bounce-to-right:hover, .no-csstransitions .hvr-bounce-to-left:hover, .no-csstransitions .hvr-bounce-to-bottom:hover, .no-csstransitions .hvr-bounce-to-top:hover, .no-csstransitions .hvr-radial-out:hover, .no-csstransitions .hvr-rectangle-out:hover, .no-csstransitions .hvr-shutter-out-horizontal:hover, .no-csstransitions .hvr-shutter-out-vertical:hover, .no-csstransitions .hvr-radial-in:hover, .no-csstransitions .hvr-rectangle-in:hover, .no-csstransitions .hvr-shutter-in-horizontal:hover, .no-csstransitions .hvr-shutter-in-vertical:hover { background: #795332; } /* Image overlay ---------------------------------- */ .overlay-top a, .overlay-bottom a, .overlay-to-top a { color: #ffffff; } /* Pager ---------------------------------- */ ul.pagination li > a:hover, ul.pagination li > a:focus { background-color: transparent; color: #ffffff; background-color: #795332; border-color: #795332; } ul.pagination li.active a, .pagination > .active > a:hover, .pagination > .active > a:focus { color: #ffffff; background-color: #91633c; border-color: #91633c; } /* Breadcrumb ---------------------------------- */ .banner .breadcrumb > li a:hover { color: #795332; } /* Nav pills ---------------------------------- */ .nav-pills.style-2 > li.active > a, .nav-pills.style-2 > li.active > a:hover, .nav-pills.style-2 > li.active > a:focus, .nav-pills.style-2 > li > a:hover, .nav-pills.style-2 > li > a:focus { color: #91633c; background-color: #fafafa; border: 1px solid #eaeaea; } .nav-pills.nav-stacked > li.active > a, .nav-pills.nav-stacked > li.active > a:hover, .nav-pills.nav-stacked > li.active > a:focus, .nav-pills.nav-stacked > li > a:hover, .nav-pills.nav-stacked > li > a:focus { color: #91633c; background-color: transparent; border-color: transparent; } .nav-pills > li.active > a:after { color: #91633c; } .nav-pills.nav-stacked:not(.list-style-icons) > li.active > a:hover:after, .nav-pills.nav-stacked:not(.list-style-icons) > li.active > a:focus:after, .nav-pills.nav-stacked:not(.list-style-icons) > li > a:hover:after, .nav-pills.nav-stacked:not(.list-style-icons) > li > a:focus:after { color: #795332; } .nav-pills.nav-stacked.list-style-icons > li > a > i { color: #cdcdcd; } .nav-pills.nav-stacked.list-style-icons > li.active > a > i, .nav-pills.nav-stacked.list-style-icons > li > a:hover > i, .nav-pills.nav-stacked.list-style-icons > li > a:focus > i { color: #795332; } /*footer*/ .dark .footer-content .nav-pills > li > a, .dark .footer-content .nav-pills > li.active > a, .dark .footer-content .nav-pills > li.active > a:focus, .dark .footer-content .nav-pills > li > a:focus { color: #999999; } .dark .footer-content .nav-pills > li.active > a:hover, .dark .footer-content .nav-pills > li > a:hover { color: #795332; } /* Collapse ---------------------------------- */ /*Style 2*/ .collapse-style-2 .panel-heading a { color: #ffffff; background-color: #91633c; } .panel-heading a { color: inherit; } /* Progress bars ---------------------------------- */ .progress-bar-default { background-color: #91633c; } /* Forms ---------------------------------- */ .default-bg .form-control { background-color: rgba(0, 0, 0, 0.2); border-color: #795332; -webkit-box-shadow: inset 0 2px 3px rgba(0, 0, 0, 0.35); box-shadow: inset 0 2px 3px rgba(0, 0, 0, 0.35); color: #ffffff; } .default-bg .has-success .form-control:focus, .default-bg .has-warning .form-control:focus, .default-bg .has-error .form-control:focus { -webkit-box-shadow: inset 0 2px 3px rgba(0, 0, 0, 0.35); box-shadow: inset 0 2px 3px rgba(0, 0, 0, 0.35); } .default-bg .input-group-addon { background-color: rgba(0, 0, 0, 0.2); border-color: #795332; } .default-bg .form-control:focus { background-color: rgba(0, 0, 0, 0.1); } .default-bg .form-control-feedback { color: #ffffff; } .dark.header .form-control, .dark.header-top .form-control { border-color: #37322e; } /*Icons ---------------------------------- */ .icon.default-bg { background-color: #91633c; color: #ffffff; } .icon.light-bg { background-color: #ffffff; color: #91633c; } /*Listing Items ---------------------------------- */ .listing-item .overlay-container .badge { border: 1px solid #91633c; background-color: rgba(255, 255, 255, 0.95); color: #91633c; } /*Modals ---------------------------------- */ .modal-content .modal-header { background-color: #91633c; } .modal-content .modal-title { color: #ffffff; } /*Breadcrumb ---------------------------------- */ .banner .dark.breadcrumb-container { background-color: rgba(69, 63, 57, 0.6); } /*Pace (Page loader) ---------------------------------- */ .page-loader-1 .pace { border: 1px solid #91633c; } .page-loader-1 .pace .pace-progress { background: #91633c; } .page-loader-2 .pace .pace-progress:before { background: #91633c; } .page-loader-2 .pace .pace-activity { border: 5px solid #91633c; } .page-loader-2 .pace .pace-activity:after { border: 5px solid #91633c; } .page-loader-2 .pace .pace-activity:before { border: 5px solid #91633c; } .page-loader-3 .pace .pace-progress { background: #91633c; } .page-loader-4 .pace .pace-progress { background: #91633c; } .page-loader-5 .pace .pace-progress:after { color: #91633c; } .page-loader-6 .pace .pace-activity { background: #91633c; } /* 6 Navigations ------------------------------------------------- -------------------------------------------------*/ /* 6.1 Light Version ---------------------------------- */ /* first level menu item on hover/focus */ .navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus, .navbar-default .navbar-nav > .active > a, .navbar-default .navbar-nav > .active > a:hover, .navbar-default .navbar-nav > .active > a:focus { background-color: transparent; color: #91633c; } .transparent-header .header:not(.dark) .navbar-nav > li.open > a, .transparent-header .header:not(.dark) .navbar-nav > li > a:hover, .transparent-header .header:not(.dark) .navbar-nav > li > a:focus { color: #91633c; } /* first level menu item when opened */ .main-navigation .navbar-nav > .open > a, .main-navigation .navbar-nav > .open > a:hover, .main-navigation .navbar-nav > .open > a:focus, .main-navigation .navbar-nav > .dropdown > a:focus { background-color: #ffffff; color: #91633c; border-bottom-color: #ffffff; border-right: 1px solid #f5f5f5; border-left: 1px solid #f5f5f5; } /* second level menu item on focus/hover and when opened */ .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus, .nav .open > a, .nav .open > a:hover, .nav .open > a:focus, .dropdown-menu > .active > a, .dropdown-menu > .active > a:hover, .dropdown-menu > .active > a:focus, .dropdown-menu .menu > .active > a, .dropdown-menu .menu > .active > a:hover, .dropdown-menu .menu > .active > a:focus { background-color: #fcfcfc; color: #795332; border-color: #f7f7f7; } /* Mega Menu ------------------------------ */ header:not(.dark) .mega-menu .menu > li > a:hover i, header:not(.dark) .dropdown-menu > li > a:hover i, header:not(.dark) .mega-menu .menu > li.active > a i, header:not(.dark) .dropdown-menu > li.active > a i { color: #91633c; } /* mega menu menu item on focus/hover*/ .mega-menu .menu > li > a:hover, .mega-menu .menu > li > a:focus { background-color: #fcfcfc; color: #795332; border-color: #f7f7f7; text-decoration: none; } /* Arrow for parent menu item ------------------------------ */ .header:not(.dark) .active.dropdown > a:before, .header:not(.dark).centered .active.dropdown > a:before { color: #91633c; } .transparent-header .header:not(.dark) .dropdown:not(.open):not(.active) > a:before { color: #777777; } .transparent-header .header:not(.dark) .navbar-default .navbar-nav > .dropdown.open > a:before { color: #91633c; } .dropdown .dropdown.open > a:before, .header.centered:not(.dark) .dropdown .dropdown.open > a:before { color: #91633c; } /* Mobile Menu ------------------------------ */ /* mobile menu toggle button on hover/focus */ .navbar-default .navbar-toggle { border-color: #ccc; } .navbar-default .navbar-toggle:hover, .navbar-default .navbar-toggle:focus { background-color: #fafafa; border-color: #795332; } .navbar-default .navbar-toggle .icon-bar { background-color: #ccc; } .navbar-default .navbar-toggle:hover .icon-bar, .navbar-default .navbar-toggle:focus .icon-bar { background-color: #795332; } /* Small devices (tablets, phones less than 767px) */ @media (max-width: 767px) { /* Mobile menu ------------------------------ */ /* active item */ .navbar-default .navbar-nav .open .dropdown-menu > .active > a, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { color: #795332; background-color: transparent; } /* first level item hover and focus states */ .navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus, .navbar-default .navbar-nav > .active > a, .navbar-default .navbar-nav > .active > a:hover, .navbar-default .navbar-nav > .active > a:focus { color: #795332; background-color: #ffffff; border-bottom-color: transparent; } /* second level item on hover/focus */ .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { color: #795332; } /* Arrow for parent menu item */ .navbar-default .navbar-nav > .dropdown.open > a:before { color: #91633c; } } /* 6.2 Dark Version ------------------------------ */ /* first level active menu item when opened */ .dark.header .main-navigation .navbar-nav > .open > a, .dark.header .main-navigation .navbar-nav > .open > a:hover, .dark.header .main-navigation .navbar-nav > .open > a:focus, .dark.header .main-navigation .navbar-nav > .dropdown > a:focus { border-bottom-color: #37322e; } /* second level menu item */ .dark.header .dropdown-menu { background-color: #37322e; border: 1px solid rgba(69, 63, 57, 0.8); } .dark.header .dropdown-menu .divider { background-color: #453f39; } .dark.header .dropdown-menu > li > a, .dark.header .mega-menu .menu > li > a { border-bottom: 1px solid rgba(69, 63, 57, 0.8); } .dark.header .mega-menu .menu > li > a:hover, .dark.header .mega-menu .menu > li > a:focus { border-color: rgba(69, 63, 57, 0.8); } .dark.header .dropdown-menu > li > a:hover, .dark.header .dropdown-menu > li > a:focus, .dark.header .nav .open > a, .dark.header .nav .open > a:hover, .dark.header .nav .open > a:focus, .dark.header .dropdown-menu > .active > a, .dark.header .dropdown-menu > .active > a:hover, .dark.header .dropdown-menu > .active > a:focus, .dark.header .dropdown-menu .menu > .active > a, .dark.header .dropdown-menu .menu > .active > a:hover, .dark.header .dropdown-menu .menu > .active > a:focus { border-color: rgba(69, 63, 57, 0.8); } /* Small devices (tablets, phones less than 767px) */ @media (max-width: 767px) { .transparent-header .dark .main-navigation .navbar.navbar-default { background-color: rgba(69, 63, 57, 0.9); border-color: rgba(42, 38, 35, 0.5); } } /* 6.3 Subfooter navigation ------------------------------ */ .subfooter .navbar-default .nav > li > a:hover, .subfooter .navbar-default .nav > .active > a:hover, .subfooter .navbar-default .nav > .active > a:focus { background-color: transparent; text-decoration: underline; color: #795332; } /* 6.3 Offcanvas navigation ------------------------------ */ #offcanvas .nav .open > a, #offcanvas .nav .open > a:hover, #offcanvas .nav .open > a:focus, #offcanvas .nav > li > a:hover, #offcanvas .nav > li > a:focus, #offcanvas .navbar-nav > li.active > a { color: #795332; background-color: #ffffff; } /*Nav arrows*/ #offcanvas .dropdown > a:before { color: #777777; } #offcanvas .dropdown.open > a:before { color: #91633c; } /* 7 Blocks/Widgets ------------------------------------------------- -------------------------------------------------*/ /* Social icons block ------------------------------ */ .social-links li a { border: 1px solid #e7e7e7; color: #cdcdcd; } .social-links li a:hover { background-color: transparent; border-color: #795332; color: #795332; } .dark.social-links li a { background-color: rgba(0, 0, 0, 0.6); border: 1px solid rgba(0, 0, 0, 0.1); color: #ffffff; } .social-links.animated-effect-1 li a:hover { color: #ffffff !important; } .social-links.animated-effect-1 li a:after { background-color: #91633c; } .default.social-links li a { background-color: #91633c; border: 1px solid #795332; color: #ffffff; } .default.social-links li a:hover { background-color: #ffffff; color: #777777; } /*Header Top Dropdowns*/ .header-top:not(.dark) .social-links .dropdown.open > button > i:before { color: #91633c; } .header-top.colored .social-links .dropdown > button > i:before, .header-top.colored .social-links .dropdown.open > button > i:before { color: #ffffff; } .dark.header-top .dropdown-menu { border: 1px solid #2a2623; border-top: none; background: #37322e; } .header-dropdown-buttons .btn-group .dropdown-menu { border: none; } /* Full Width Content ---------------------------------- */ /* Medium devices (tablets, phones) */ @media (max-width: 1199px) { .full-width-section:not(.no-image) .full-text-container.default-bg { background-color: rgba(145, 99, 60, 0.6); } } /*Header Dropdowns (search, cart etc) ---------------------------------- */ .header-dropdown-buttons .btn-group > .btn { background-color: #f2f2f2; border: 1px solid #e9e9e9; } .header-dropdown-buttons .btn-group > .btn:hover { background-color: #91633c; color: #ffffff; border-color: #795332; } .header-dropdown-buttons .btn-group.open > .btn { background-color: #91633c; color: #ffffff; border-color: #795332; } .colored.header-top .dropdown-menu { border: 1px solid #91633c; background: #91633c; } /* Media ---------------------------------- */ .media:hover .icon { background-color: transparent; color: #91633c; border: 1px solid #91633c; } /* Pricing tables ---------------------------------- */ .popover-title { background-color: #91633c; color: #ffffff; } /* Tags cloud block ---------------------------------- */ .tag a { color: #ffffff; background-color: #91633c; border: 1px solid #795332; } .tag a:hover { color: #91633c; background-color: #ffffff; border-color: #795332; text-decoration: none; } /* 8 Main Slideshow ------------------------------------------------- -------------------------------------------------*/ .tp-bannertimer { background-color: rgba(145, 99, 60, 0.8); } /* 9 Owl Carousel ------------------------------------------------- -------------------------------------------------*/ .content-slider-with-large-controls .owl-nav .owl-prev:after, .content-slider-with-large-controls-autoplay .owl-nav .owl-prev:after { color: #91633c; } .content-slider-with-large-controls .owl-nav .owl-next:after, .content-slider-with-large-controls-autoplay .owl-nav .owl-next:after { color: #91633c; } /* 10 Full Page ------------------------------------------------- -------------------------------------------------*/ #fp-nav ul li a span, .fp-slidesNav ul li a span { background: rgba(145, 99, 60, 0.8); }
myured/ledex
css/skins/brown.css
CSS
apache-2.0
21,990
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.machinelearning.model.transform; import java.io.ByteArrayInputStream; import java.util.Collections; import java.util.Map; import java.util.List; import java.util.regex.Pattern; import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.DefaultRequest; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.machinelearning.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.util.BinaryUtils; import com.amazonaws.util.StringUtils; import com.amazonaws.util.IdempotentUtils; import com.amazonaws.util.StringInputStream; import com.amazonaws.protocol.json.*; /** * UpdateEvaluationRequest Marshaller */ public class UpdateEvaluationRequestMarshaller implements Marshaller<Request<UpdateEvaluationRequest>, UpdateEvaluationRequest> { private final SdkJsonProtocolFactory protocolFactory; public UpdateEvaluationRequestMarshaller( SdkJsonProtocolFactory protocolFactory) { this.protocolFactory = protocolFactory; } public Request<UpdateEvaluationRequest> marshall( UpdateEvaluationRequest updateEvaluationRequest) { if (updateEvaluationRequest == null) { throw new AmazonClientException( "Invalid argument passed to marshall(...)"); } Request<UpdateEvaluationRequest> request = new DefaultRequest<UpdateEvaluationRequest>( updateEvaluationRequest, "AmazonMachineLearning"); request.addHeader("X-Amz-Target", "AmazonML_20141212.UpdateEvaluation"); request.setHttpMethod(HttpMethodName.POST); request.setResourcePath(""); try { final StructuredJsonGenerator jsonGenerator = protocolFactory .createGenerator(); jsonGenerator.writeStartObject(); if (updateEvaluationRequest.getEvaluationId() != null) { jsonGenerator.writeFieldName("EvaluationId").writeValue( updateEvaluationRequest.getEvaluationId()); } if (updateEvaluationRequest.getEvaluationName() != null) { jsonGenerator.writeFieldName("EvaluationName").writeValue( updateEvaluationRequest.getEvaluationName()); } jsonGenerator.writeEndObject(); byte[] content = jsonGenerator.getBytes(); request.setContent(new ByteArrayInputStream(content)); request.addHeader("Content-Length", Integer.toString(content.length)); request.addHeader("Content-Type", jsonGenerator.getContentType()); } catch (Throwable t) { throw new AmazonClientException( "Unable to marshall request to JSON: " + t.getMessage(), t); } return request; } }
flofreud/aws-sdk-java
aws-java-sdk-machinelearning/src/main/java/com/amazonaws/services/machinelearning/model/transform/UpdateEvaluationRequestMarshaller.java
Java
apache-2.0
3,458
/** @file * @brief Internal APIs for Bluetooth L2CAP handling. */ /* * Copyright (c) 2015 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <bluetooth/l2cap.h> #define BT_L2CAP_CID_ATT 0x0004 #define BT_L2CAP_CID_LE_SIG 0x0005 #define BT_L2CAP_CID_SMP 0x0006 struct bt_l2cap_hdr { uint16_t len; uint16_t cid; } __packed; struct bt_l2cap_sig_hdr { uint8_t code; uint8_t ident; uint16_t len; } __packed; #define BT_L2CAP_REJ_NOT_UNDERSTOOD 0x0000 #define BT_L2CAP_REJ_MTU_EXCEEDED 0x0001 #define BT_L2CAP_REJ_INVALID_CID 0x0002 #define BT_L2CAP_CMD_REJECT 0x01 struct bt_l2cap_cmd_reject { uint16_t reason; uint8_t data[0]; } __packed; #define BT_L2CAP_DISCONN_REQ 0x06 struct bt_l2cap_disconn_req { uint16_t dcid; uint16_t scid; } __packed; #define BT_L2CAP_DISCONN_RSP 0x07 struct bt_l2cap_disconn_rsp { uint16_t dcid; uint16_t scid; } __packed; #define BT_L2CAP_CONN_PARAM_REQ 0x12 struct bt_l2cap_conn_param_req { uint16_t min_interval; uint16_t max_interval; uint16_t latency; uint16_t timeout; } __packed; #define BT_L2CAP_CONN_PARAM_ACCEPTED 0x0000 #define BT_L2CAP_CONN_PARAM_REJECTED 0x0001 #define BT_L2CAP_CONN_PARAM_RSP 0x13 struct bt_l2cap_conn_param_rsp { uint16_t result; } __packed; #define BT_L2CAP_LE_CONN_REQ 0x14 struct bt_l2cap_le_conn_req { uint16_t psm; uint16_t scid; uint16_t mtu; uint16_t mps; uint16_t credits; } __packed; #define BT_L2CAP_SUCCESS 0x0000 #define BT_L2CAP_ERR_PSM_NOT_SUPP 0x0002 #define BT_L2CAP_ERR_NO_RESOURCES 0x0004 #define BT_L2CAP_ERR_AUTHENTICATION 0x0005 #define BT_L2CAP_ERR_AUTHORIZATION 0x0006 #define BT_L2CAP_ERR_KEY_SIZE 0x0007 #define BT_L2CAP_ERR_ENCRYPTION 0x0008 #define BT_L2CAP_ERR_INVALID_SCID 0x0009 #define BT_L2CAP_ERR_SCID_IN_USE 0x000A #define BT_L2CAP_LE_CONN_RSP 0x15 struct bt_l2cap_le_conn_rsp { uint16_t dcid; uint16_t mtu; uint16_t mps; uint16_t credits; uint16_t result; }; #define BT_L2CAP_LE_CREDITS 0x16 struct bt_l2cap_le_credits { uint16_t cid; uint16_t credits; } __packed; #define BT_L2CAP_SDU_HDR_LEN 2 /* Helper to calculate needed outgoing buffer size */ #define BT_L2CAP_BUF_SIZE(mtu) (CONFIG_BLUETOOTH_HCI_SEND_RESERVE + \ sizeof(struct bt_hci_acl_hdr) + \ sizeof(struct bt_l2cap_hdr) + (mtu)) struct bt_l2cap_fixed_chan { uint16_t cid; int (*accept)(struct bt_conn *conn, struct bt_l2cap_chan **chan); struct bt_l2cap_fixed_chan *_next; }; /* Register a fixed L2CAP channel for L2CAP */ void bt_l2cap_fixed_chan_register(struct bt_l2cap_fixed_chan *chan); /* Notify L2CAP channels of a new connection */ void bt_l2cap_connected(struct bt_conn *conn); /* Notify L2CAP channels of a disconnect event */ void bt_l2cap_disconnected(struct bt_conn *conn); /* Notify L2CAP channels of a change in encryption state */ void bt_l2cap_encrypt_change(struct bt_conn *conn); /* Prepare an L2CAP PDU to be sent over a connection */ struct net_buf *bt_l2cap_create_pdu(struct nano_fifo *fifo); /* Send L2CAP PDU over a connection */ void bt_l2cap_send(struct bt_conn *conn, uint16_t cid, struct net_buf *buf); /* Receive a new L2CAP PDU from a connection */ void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf); /* Perform connection parameter update request */ int bt_l2cap_update_conn_param(struct bt_conn *conn, const struct bt_le_conn_param *param); /* Initialize L2CAP and supported channels */ void bt_l2cap_init(void); /* Lookup channel by Transmission CID */ struct bt_l2cap_chan *bt_l2cap_lookup_tx_cid(struct bt_conn *conn, uint16_t cid); /* Lookup channel by Receiver CID */ struct bt_l2cap_chan *bt_l2cap_lookup_rx_cid(struct bt_conn *conn, uint16_t cid);
coldnew/zephyr-project-fork
net/bluetooth/l2cap_internal.h
C
apache-2.0
4,214
package gov.va.medora.mdws.emrsvc; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for ConsultTO complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="ConsultTO"> * &lt;complexContent> * &lt;extension base="{http://mdws.medora.va.gov/EmrSvc}OrderTO"> * &lt;sequence> * &lt;element name="toService" type="{http://mdws.medora.va.gov/EmrSvc}TaggedText" minOccurs="0"/> * &lt;element name="title" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "ConsultTO", propOrder = { "toService", "title" }) public class ConsultTO extends OrderTO { protected TaggedText toService; protected String title; /** * Gets the value of the toService property. * * @return * possible object is * {@link TaggedText } * */ public TaggedText getToService() { return toService; } /** * Sets the value of the toService property. * * @param value * allowed object is * {@link TaggedText } * */ public void setToService(TaggedText value) { this.toService = value; } /** * Gets the value of the title property. * * @return * possible object is * {@link String } * */ public String getTitle() { return title; } /** * Sets the value of the title property. * * @param value * allowed object is * {@link String } * */ public void setTitle(String value) { this.title = value; } }
VHAINNOVATIONS/TheDailyPlan
LegacyApp/tdpWeb/src/main/java/gov/va/medora/mdws/emrsvc/ConsultTO.java
Java
apache-2.0
2,068
mongoimport -d blog -c posts --drop < posts.json
hemmerling/nosql-mongodb2013
src/m101p/week05/hw5-1/hemmerling_week5_hw1.bat
Batchfile
apache-2.0
48
# frozen_string_literal: true # Module related to container's helpers stuff module Containers # Checks whether it's running inside of a Docker container or not def self.dockerized? @dockerized ||= File.read("/proc/1/cgroup").include?("docker") end end
mssola/Portus
spec/support/containers.rb
Ruby
apache-2.0
263
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.4 -------------------------------------------------*/ /* date: 1/17/2014 ---------------------------------------------*/ /* authors: Aydin Buluc ([email protected]), Adam Lugowski --------*/ /****************************************************************/ /* Copyright (c) 2010-2014, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _SP_TUPLES_H #define _SP_TUPLES_H #include <iostream> #include <fstream> #include <cmath> #include <cassert> #include "CombBLAS.h" #include "SpMat.h" #include "SpDefs.h" #include "StackEntry.h" #include "Compare.h" using namespace std; template <class IU, class NU> class SpDCCols; template <class IU, class NU> class Dcsc; /** * Triplets are represented using the boost::tuple class of the Boost library * Number of entries are 64-bit addressible, but each entry is only <class IT> addressible * Therefore, size is int64_t but nrows/ncols (representing range of first two entries in tuple) is of type IT * \remarks Indices start from 0 in this class * \remarks Sorted with respect to columns (Column-sorted triples) */ template <class IT, class NT> class SpTuples: public SpMat<IT, NT, SpTuples<IT,NT> > { public: // Constructors SpTuples (int64_t size, IT nRow, IT nCol); SpTuples (int64_t size, IT nRow, IT nCol, tuple<IT, IT, NT> * mytuples, bool sorted = false); SpTuples (int64_t maxnnz, IT nRow, IT nCol, vector<IT> & edges, bool removeloops = true); // Graph500 contructor SpTuples (int64_t size, IT nRow, IT nCol, StackEntry<NT, pair<IT,IT> > * & multstack); SpTuples (const SpTuples<IT,NT> & rhs); // Actual Copy constructor SpTuples (const SpDCCols<IT,NT> & rhs); // Copy constructor for conversion from SpDCCols ~SpTuples(); SpTuples<IT,NT> & operator=(const SpTuples<IT,NT> & rhs); IT & rowindex (IT i) { return joker::get<0>(tuples[i]); } IT & colindex (IT i) { return joker::get<1>(tuples[i]); } NT & numvalue (IT i) { return joker::get<2>(tuples[i]); } IT rowindex (IT i) const { return joker::get<0>(tuples[i]); } IT colindex (IT i) const { return joker::get<1>(tuples[i]); } NT numvalue (IT i) const { return joker::get<2>(tuples[i]); } template <typename BINFUNC> void RemoveDuplicates(BINFUNC BinOp); void SortRowBased() { RowLexiCompare<IT,NT> rowlexicogcmp; if(!SpHelper::is_sorted(tuples, tuples+nnz, rowlexicogcmp)) sort(tuples , tuples+nnz, rowlexicogcmp); // Default "operator<" for tuples uses lexicographical ordering // However, cray compiler complains about it, so we use rowlexicogcmp } void SortColBased() { ColLexiCompare<IT,NT> collexicogcmp; if(!SpHelper::is_sorted(tuples, tuples+nnz, collexicogcmp)) sort(tuples , tuples+nnz, collexicogcmp ); } /** * @pre {should only be called on diagonal processors (others will remove non-loop nonzeros)} **/ IT RemoveLoops() { IT loop = 0; for(IT i=0; i< nnz; ++i) { if(joker::get<0>(tuples[i]) == joker::get<1>(tuples[i])) ++loop; } tuple<IT, IT, NT> * ntuples = new tuple<IT,IT,NT>[nnz-loop]; IT ni = 0; for(IT i=0; i< nnz; ++i) { if(joker::get<0>(tuples[i]) != joker::get<1>(tuples[i])) { ntuples[ni++] = tuples[i]; } } delete [] tuples; tuples = ntuples; nnz = nnz-loop; return loop; } pair<IT,IT> RowLimits() { if(nnz > 0) { RowCompare<IT,NT> rowcmp; tuple<IT,IT,NT> * maxit = max_element(tuples, tuples+nnz, rowcmp); tuple<IT,IT,NT> * minit = min_element(tuples, tuples+nnz, rowcmp); return make_pair(joker::get<0>(*minit), joker::get<0>(*maxit)); } else return make_pair(0,0); } pair<IT,IT> ColLimits() { if(nnz > 0) { ColCompare<IT,NT> colcmp; tuple<IT,IT,NT> * maxit = max_element(tuples, tuples+nnz, colcmp); tuple<IT,IT,NT> * minit = min_element(tuples, tuples+nnz, colcmp); return make_pair(joker::get<1>(*minit), joker::get<1>(*maxit)); } else return make_pair(0,0); } tuple<IT, IT, NT> front() { return tuples[0]; }; tuple<IT, IT, NT> back() { return tuples[nnz-1]; }; // Performs a balanced merge of the array of SpTuples template<typename SR, typename IU, typename NU> friend SpTuples<IU,NU> MergeAll(const vector<SpTuples<IU,NU> *> & ArrSpTups, IU mstar, IU nstar, bool delarrs); template<typename SR, typename IU, typename NU> friend SpTuples<IU,NU> * MergeAllRec(const vector<SpTuples<IU,NU> *> & ArrSpTups, IU mstar, IU nstar); ofstream& putstream (ofstream& outfile) const; ofstream& put (ofstream& outfile) const { return putstream(outfile); } ifstream& getstream (ifstream& infile); ifstream& get (ifstream& infile) { return getstream(infile); } bool isZero() const { return (nnz == 0); } IT getnrow() const { return m; } IT getncol() const { return n; } int64_t getnnz() const { return nnz; } void PrintInfo(); tuple<IT, IT, NT> * tuples; private: IT m; IT n; int64_t nnz; SpTuples (){}; // Default constructor does nothing, hide it void FillTuples (Dcsc<IT,NT> * mydcsc); template <class IU, class NU> friend class SpDCCols; }; // At this point, complete type of of SpTuples is known, safe to declare these specialization (but macros won't work as they are preprocessed) template <> struct promote_trait< SpTuples<int,int> , SpTuples<int,int> > { typedef SpTuples<int,int> T_promote; }; template <> struct promote_trait< SpTuples<int,float> , SpTuples<int,float> > { typedef SpTuples<int,float> T_promote; }; template <> struct promote_trait< SpTuples<int,double> , SpTuples<int,double> > { typedef SpTuples<int,double> T_promote; }; template <> struct promote_trait< SpTuples<int,bool> , SpTuples<int,int> > { typedef SpTuples<int,int> T_promote; }; template <> struct promote_trait< SpTuples<int,int> , SpTuples<int,bool> > { typedef SpTuples<int,int> T_promote; }; template <> struct promote_trait< SpTuples<int,int> , SpTuples<int,float> > { typedef SpTuples<int,float> T_promote; }; template <> struct promote_trait< SpTuples<int,float> , SpTuples<int,int> > { typedef SpTuples<int,float> T_promote; }; template <> struct promote_trait< SpTuples<int,int> , SpTuples<int,double> > { typedef SpTuples<int,double> T_promote; }; template <> struct promote_trait< SpTuples<int,double> , SpTuples<int,int> > { typedef SpTuples<int,double> T_promote; }; template <> struct promote_trait< SpTuples<int,unsigned> , SpTuples<int,bool> > { typedef SpTuples<int,unsigned> T_promote; }; template <> struct promote_trait< SpTuples<int,bool> , SpTuples<int,unsigned> > { typedef SpTuples<int,unsigned> T_promote; }; template <> struct promote_trait< SpTuples<int,bool> , SpTuples<int,double> > { typedef SpTuples<int,double> T_promote; }; template <> struct promote_trait< SpTuples<int,bool> , SpTuples<int,float> > { typedef SpTuples<int,float> T_promote; }; template <> struct promote_trait< SpTuples<int,double> , SpTuples<int,bool> > { typedef SpTuples<int,double> T_promote; }; template <> struct promote_trait< SpTuples<int,float> , SpTuples<int,bool> > { typedef SpTuples<int,float> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,int> , SpTuples<int64_t,int> > { typedef SpTuples<int64_t,int> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,float> , SpTuples<int64_t,float> > { typedef SpTuples<int64_t,float> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,double> , SpTuples<int64_t,double> > { typedef SpTuples<int64_t,double> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,int64_t> , SpTuples<int64_t,int64_t> > { typedef SpTuples<int64_t,int64_t> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,bool> , SpTuples<int64_t,int> > { typedef SpTuples<int64_t,int> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,int> , SpTuples<int64_t,bool> > { typedef SpTuples<int64_t,int> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,int> , SpTuples<int64_t,float> > { typedef SpTuples<int64_t,float> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,float> , SpTuples<int64_t,int> > { typedef SpTuples<int64_t,float> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,int> , SpTuples<int64_t,double> > { typedef SpTuples<int64_t,double> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,double> , SpTuples<int64_t,int> > { typedef SpTuples<int64_t,double> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,unsigned> , SpTuples<int64_t,bool> > { typedef SpTuples<int64_t,unsigned> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,bool> , SpTuples<int64_t,unsigned> > { typedef SpTuples<int64_t,unsigned> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,bool> , SpTuples<int64_t,double> > { typedef SpTuples<int64_t,double> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,bool> , SpTuples<int64_t,float> > { typedef SpTuples<int64_t,float> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,double> , SpTuples<int64_t,bool> > { typedef SpTuples<int64_t,double> T_promote; }; template <> struct promote_trait< SpTuples<int64_t,float> , SpTuples<int64_t,bool> > { typedef SpTuples<int64_t,float> T_promote; }; #include "SpTuples.cpp" #endif
ParBLiSS/parconnect
ext/CombBLAS/SpTuples.h
C
apache-2.0
13,007
package kjkrol.voronoidiagram; import javafx.geometry.Point2D; import org.junit.Test; import java.util.Arrays; /** * @author Karol Krol */ public class VoronoiDiagramTest { @Test public void startTest() { final VoronoiDiagram voronoiDiagram = VoronoiDiagram.builder() .width(10) .height(10) .givenPoints(Arrays.asList(new Point2D(2, 2), new Point2D(5, 5), new Point2D(8, 8))) .build(); voronoiDiagram.start(); voronoiDiagram.getRegions().stream().peek(System.out::println).count(); } }
kjkrol/voronoi-diagram
src/test/java/kjkrol/voronoidiagram/VoronoiDiagramTest.java
Java
apache-2.0
592
# Copyright 2016 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import itertools from c7n.actions import Action from c7n.manager import resources from c7n.query import QueryResourceManager from c7n.utils import local_session, chunks, type_schema @resources.register('simpledb') class SimpleDB(QueryResourceManager): class resource_type(object): service = "sdb" enum_spec = ("list_domains", "DomainNames", None) id = name = "DomainName" dimension = None permissions = ('sdb:DomainMetadata',) def augment(self, resources): def _augment(resource_set): client = local_session(self.session_factory).client('sdb') results = [] for r in resources: info = client.domain_metadata(DomainName=r) info.pop('ResponseMetadata') info['DomainName'] = r results.append(info) return results with self.executor_factory(max_workers=3) as w: return list(itertools.chain( *w.map(_augment, chunks(resources, 20)))) @SimpleDB.action_registry.register('delete') class Delete(Action): schema = type_schema('delete') permissions = ('sdb:DeleteDomain',) def process(self, resources): client = local_session(self.manager.session_factory).client('sdb') for r in resources: client.delete_domain(DomainName=r['DomainName'])
VeritasOS/cloud-custodian
c7n/resources/simpledb.py
Python
apache-2.0
2,048
package com.thed.service.soap; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for getUsersByCriteria complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="getUsersByCriteria"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="searchCriterias" type="{http://soap.service.thed.com/}remoteCriteria" maxOccurs="unbounded" minOccurs="0"/> * &lt;element name="returnAllDataFlag" type="{http://www.w3.org/2001/XMLSchema}boolean" minOccurs="0"/> * &lt;element name="token" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "getUsersByCriteria", propOrder = { "searchCriterias", "returnAllDataFlag", "token" }) public class GetUsersByCriteria { protected List<RemoteCriteria> searchCriterias; protected Boolean returnAllDataFlag; protected String token; /** * Gets the value of the searchCriterias property. * * <p> * This accessor method returns a reference to the live list, * not a snapshot. Therefore any modification you make to the * returned list will be present inside the JAXB object. * This is why there is not a <CODE>set</CODE> method for the searchCriterias property. * * <p> * For example, to add a new item, do as follows: * <pre> * getSearchCriterias().add(newItem); * </pre> * * * <p> * Objects of the following type(s) are allowed in the list * {@link RemoteCriteria } * * */ public List<RemoteCriteria> getSearchCriterias() { if (searchCriterias == null) { searchCriterias = new ArrayList<RemoteCriteria>(); } return this.searchCriterias; } /** * Gets the value of the returnAllDataFlag property. * * @return * possible object is * {@link Boolean } * */ public Boolean isReturnAllDataFlag() { return returnAllDataFlag; } /** * Sets the value of the returnAllDataFlag property. * * @param value * allowed object is * {@link Boolean } * */ public void setReturnAllDataFlag(Boolean value) { this.returnAllDataFlag = value; } /** * Gets the value of the token property. * * @return * possible object is * {@link String } * */ public String getToken() { return token; } /** * Sets the value of the token property. * * @param value * allowed object is * {@link String } * */ public void setToken(String value) { this.token = value; } }
zeedeveloper/zee-jenkins
src/main/java/com/thed/service/soap/GetUsersByCriteria.java
Java
apache-2.0
3,180
// Copyright © 2017-2019 The OpenEBS Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package volumecontroller import ( "fmt" "os" "reflect" "testing" "time" "github.com/openebs/maya/cmd/cstor-volume-mgmt/controller/common" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" openebsFakeClientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned/fake" informers "github.com/openebs/maya/pkg/client/generated/informers/externalversions" "github.com/openebs/maya/pkg/client/k8s" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func fakeStrToQuantity(capacity string) resource.Quantity { qntCapacity, _ := resource.ParseQuantity(capacity) return qntCapacity } // TestGetVolumeResource checks if volume resource created is successfully got. func TestGetVolumeResource(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset() fakeOpenebsClient := openebsFakeClientset.NewSimpleClientset() kubeInformerFactory := kubeinformers.NewSharedInformerFactory(fakeKubeClient, time.Second*30) openebsInformerFactory := informers.NewSharedInformerFactory(fakeOpenebsClient, time.Second*30) // Instantiate the cStor Volume controllers. volumeController := NewCStorVolumeController(fakeKubeClient, fakeOpenebsClient, kubeInformerFactory, openebsInformerFactory) testVolumeResource := map[string]struct { expectedVolumeName string test *apis.CStorVolume }{ "img1VolumeResource": { expectedVolumeName: "abc", test: &apis.CStorVolume{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "volume1", UID: types.UID("abc"), Namespace: string(common.DefaultNameSpace), }, Spec: apis.CStorVolumeSpec{ TargetIP: "0.0.0.0", Capacity: fakeStrToQuantity("5G"), Status: "init", }, Status: apis.CStorVolumeStatus{}, }, }, "img2VolumeResource": { expectedVolumeName: "abcd", test: &apis.CStorVolume{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "volume2", UID: types.UID("abcd"), Namespace: string(common.DefaultNameSpace), }, Spec: apis.CStorVolumeSpec{ TargetIP: "0.0.0.0", Capacity: fakeStrToQuantity("15G"), Status: "init", }, Status: apis.CStorVolumeStatus{}, }, }, } for desc, ut := range testVolumeResource { // Create Volume resource _, err := volumeController.clientset.OpenebsV1alpha1().CStorVolumes(string(common.DefaultNameSpace)).Create(ut.test) if err != nil { t.Fatalf("Desc:%v, Unable to create resource : %v", desc, ut.test.ObjectMeta.Name) } // Get the created volume resource using name cStorVolumeObtained, err := volumeController.getVolumeResource(ut.test.ObjectMeta.Name) if string(cStorVolumeObtained.ObjectMeta.UID) != ut.expectedVolumeName { t.Fatalf("Desc:%v, VolumeName mismatch, Expected:%v, Got:%v", desc, ut.expectedVolumeName, string(cStorVolumeObtained.ObjectMeta.UID)) } } } // TestIsValidCStorVolumeMgmt is to check if right sidecar does operation with env match. func TestIsValidCStorVolumeMgmt(t *testing.T) { testVolumeResource := map[string]struct { expectedOutput bool test *apis.CStorVolume }{ "img2VolumeResource": { expectedOutput: true, test: &apis.CStorVolume{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "volume2", UID: types.UID("abcd"), Finalizers: []string{"cstorvolume.openebs.io/finalizer"}, Namespace: string(common.DefaultNameSpace), }, Spec: apis.CStorVolumeSpec{ TargetIP: "0.0.0.0", Capacity: fakeStrToQuantity("15G"), Status: "init", }, Status: apis.CStorVolumeStatus{}, }, }, } for desc, ut := range testVolumeResource { os.Setenv("OPENEBS_IO_CSTOR_VOLUME_ID", string(ut.test.UID)) obtainedOutput := IsValidCStorVolumeMgmt(ut.test) if obtainedOutput != ut.expectedOutput { t.Fatalf("Desc:%v, Expected:%v, Got:%v", desc, ut.expectedOutput, obtainedOutput) } os.Setenv("OPENEBS_IO_CSTOR_VOLUME_ID", "") } } // TestIsValidCStorVolumeMgmtNegative is to check if right sidecar does operation with env match. func TestIsValidCStorVolumeMgmtNegative(t *testing.T) { testVolumeResource := map[string]struct { expectedOutput bool test *apis.CStorVolume }{ "img2VolumeResource": { expectedOutput: false, test: &apis.CStorVolume{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "volume2", UID: types.UID("abcd"), Finalizers: []string{"cstorvolume.openebs.io/finalizer"}, Namespace: string(common.DefaultNameSpace), }, Spec: apis.CStorVolumeSpec{ TargetIP: "0.0.0.0", Capacity: fakeStrToQuantity("15G"), Status: "init", }, Status: apis.CStorVolumeStatus{}, }, }, } for desc, ut := range testVolumeResource { os.Setenv("OPENEBS_IO_CSTOR_VOLUME_ID", string("awer")) obtainedOutput := IsValidCStorVolumeMgmt(ut.test) if obtainedOutput != ut.expectedOutput { t.Fatalf("Desc:%v, Expected:%v, Got:%v", desc, ut.expectedOutput, obtainedOutput) } os.Setenv("OPENEBS_IO_CSTOR_VOLUME_ID", "") } } func TestCreateEventObj(t *testing.T) { tests := map[string]struct { cstorVolume *apis.CStorVolume event *v1.Event podName, nodeName string clientset kubernetes.Interface }{ "event": { cstorVolume: &apis.CStorVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "csv-1", UID: types.UID("abcd"), Namespace: string(common.DefaultNameSpace), ResourceVersion: "1111", }, TypeMeta: metav1.TypeMeta{ Kind: "CstorVolume", APIVersion: "v1alpha1", }, Status: apis.CStorVolumeStatus{ Phase: apis.CStorVolumePhase(common.CVStatusHealthy), }, }, event: &v1.Event{ ObjectMeta: metav1.ObjectMeta{ Name: "csv-1.Healthy", Namespace: string(common.DefaultNameSpace), }, InvolvedObject: v1.ObjectReference{ Kind: string(k8s.CStorVolumeCRKK), APIVersion: string(k8s.OEV1alpha1KA), Name: "csv-1", Namespace: string(common.DefaultNameSpace), UID: types.UID("abcd"), ResourceVersion: "1111", }, FirstTimestamp: metav1.Time{Time: time.Now()}, LastTimestamp: metav1.Time{Time: time.Now()}, Count: 1, Message: fmt.Sprintf(common.EventMsgFormatter, "Healthy"), Reason: "Healthy", Type: getEventType(common.CStorVolumeStatus("Healthy")), Source: v1.EventSource{ Component: "mypod", Host: "mynode", }, }, podName: "mypod", nodeName: "mynode", clientset: fake.NewSimpleClientset(), }, } for desc, ut := range tests { t.Run(desc, func(t *testing.T) { os.Setenv("POD_NAME", ut.podName) os.Setenv("NODE_NAME", ut.nodeName) cvController := CStorVolumeController{kubeclientset: ut.clientset} event := cvController.createEventObj(ut.cstorVolume) if !reflect.DeepEqual(event.ObjectMeta, ut.event.ObjectMeta) { t.Errorf("Failed to create event, invalid ObjectMeta: want=%v got=%v", ut.event.ObjectMeta, event.ObjectMeta) } if !reflect.DeepEqual(event.InvolvedObject, ut.event.InvolvedObject) { t.Errorf("Failed to create event, invalid InvolvedObject: want=%v got=%v", ut.event.InvolvedObject, event.InvolvedObject) } if !reflect.DeepEqual(event.Count, ut.event.Count) { t.Errorf("Failed to create event, invalid Count: want=%v got=%v", ut.event.Count, event.Count) } if !reflect.DeepEqual(event.Message, ut.event.Message) { t.Errorf("Failed to create event, invalid Message: want=%v got=%v", ut.event.Message, event.Message) } if !reflect.DeepEqual(event.Reason, ut.event.Reason) { t.Errorf("Failed to create event, invalid Reason: want=%v got=%v", ut.event.Reason, event.Reason) } if !reflect.DeepEqual(event.Type, ut.event.Type) { t.Errorf("Failed to create event, invalid Type: want=%v got=%v", ut.event.Type, event.Type) } if !reflect.DeepEqual(event.Source, ut.event.Source) { t.Errorf("Failed to create event, invalid Source: want=%v got=%v", ut.event.Source, event.Source) } // = %v, want %v", "got, ut.event os.Unsetenv("POD_NAME") os.Unsetenv("NODE_NAME") }) } } func TestGetEventType(t *testing.T) { tests := map[string]struct { phase common.CStorVolumeStatus eventType string }{ "Normal event Init": {phase: common.CVStatusInit, eventType: v1.EventTypeNormal}, "Normal event Healthy": {phase: common.CVStatusHealthy, eventType: v1.EventTypeNormal}, "Normal event Degraded": {phase: common.CVStatusDegraded, eventType: v1.EventTypeNormal}, "Warning event Error": {phase: common.CVStatusError, eventType: v1.EventTypeWarning}, "Warning event Invalid": {phase: common.CVStatusInvalid, eventType: v1.EventTypeWarning}, "Warning event Offline": {phase: common.CVStatusOffline, eventType: v1.EventTypeWarning}, } for desc, ut := range tests { t.Run(desc, func(t *testing.T) { if got := getEventType(ut.phase); got != ut.eventType { t.Errorf("Incorrect event type= %v, want %v", got, ut.eventType) } }) } }
AmitKumarDas/maya
cmd/cstor-volume-mgmt/controller/volume-controller/handler_test.go
GO
apache-2.0
9,976
import static org.junit.Assert.assertEquals; import org.junit.Test; import org.springframework.test.context.ContextConfiguration; /** * Basic integration test for DMS sample when security has been added. * * @author Ben Alex * */ @ContextConfiguration(locations={"classpath:applicationContext-dms-shared.xml", "classpath:applicationContext-dms-secure.xml"}) public class SecureDmsIntegrationTests extends DmsIntegrationTests { @Test public void testBasePopulation() { assertEquals(9, jdbcTemplate.queryForInt("select count(id) from DIRECTORY")); assertEquals(90, jdbcTemplate.queryForInt("select count(id) from FILE")); assertEquals(4, jdbcTemplate.queryForInt("select count(id) from ACL_SID")); // 3 users + 1 role assertEquals(2, jdbcTemplate.queryForInt("select count(id) from ACL_CLASS")); // Directory and File assertEquals(100, jdbcTemplate.queryForInt("select count(id) from ACL_OBJECT_IDENTITY")); assertEquals(115, jdbcTemplate.queryForInt("select count(id) from ACL_ENTRY")); } public void testMarissaRetrieval() { process("rod", "koala", true); } public void testScottRetrieval() { process("scott", "wombat", true); } public void testDianneRetrieval() { process("dianne", "emu", true); } }
izeye/spring-security
samples/dms-xml/src/test/java/SecureDmsIntegrationTests.java
Java
apache-2.0
1,323
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package test.poa.POAManagerProxyPackage; // // IDL:POAManagerProxy/State:1.0 // final public class StateHolder implements org.omg.CORBA.portable.Streamable { public State value; public StateHolder() { } public StateHolder(State initial) { value = initial; } public void _read(org.omg.CORBA.portable.InputStream in) { value = StateHelper.read(in); } public void _write(org.omg.CORBA.portable.OutputStream out) { StateHelper.write(out, value); } public org.omg.CORBA.TypeCode _type() { return StateHelper.type(); } }
apache/geronimo-yoko
yoko-core/src/test/java/test/poa/POAManagerProxyPackage/StateHolder.java
Java
apache-2.0
1,446
/* * This file is part of the Heritrix web crawler (crawler.archive.org). * * Licensed to the Internet Archive (IA) by one or more individual * contributors. * * The IA licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.archive.io.arc; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; import org.apache.commons.httpclient.Header; import org.apache.commons.httpclient.StatusLine; import org.apache.commons.httpclient.util.EncodingUtil; import org.apache.commons.lang.StringUtils; import org.archive.io.ArchiveRecord; import org.archive.io.ArchiveRecordHeader; import org.archive.io.RecoverableIOException; import org.archive.util.InetAddressUtil; import org.archive.util.LaxHttpParser; import org.archive.util.TextUtils; /** * An ARC file record. * Does not compass the ARCRecord metadata line, just the record content. * @author stack */ public class ARCRecord extends ArchiveRecord implements ARCConstants { /** * Http status line object. * * May be null if record is not http. */ private StatusLine httpStatus = null; /** * Http header bytes. * * If non-null and bytes available, give out its contents before we * go back to the underlying stream. */ private InputStream httpHeaderStream = null; /** * Http headers. * * Only populated after reading of headers. */ private Header [] httpHeaders = null; /** * Array of field names. * * Used to initialize <code>headerFieldNameKeys</code>. */ private final String [] headerFieldNameKeysArray = { URL_FIELD_KEY, IP_HEADER_FIELD_KEY, DATE_FIELD_KEY, MIMETYPE_FIELD_KEY, LENGTH_FIELD_KEY }; /** * An array of the header field names found in the ARC file header on * the 3rd line. * * We used to read these in from the arc file first record 3rd line but * now we hardcode them for sake of improved performance. */ private final List<String> headerFieldNameKeys = Arrays.asList(this.headerFieldNameKeysArray); /** * Http header bytes read while trying to read http header */ public long httpHeaderBytesRead = -1; /** * record length from metadata line */ public long recordDeclaredLength; /** * null if source was not compressed */ public long compressedBytes; /** * actual payload data (not including trailing newline), * should match record-declared-length */ public long uncompressedBytes; /** * content-length header, iff HTTP and present, null otherwise */ public long httpPayloadDeclaredLength; /** * actual http payload length, should match http-payload-declared-length */ public long httpPayloadActualLength; /** * errors encountered reading record */ public List<ArcRecordErrors> errors = new ArrayList<ArcRecordErrors>(); /** * verbatim ARC record header string */ private String headerString; public String getHeaderString() { return this.headerString; } /** * Constructor. * * @param in Stream cue'd up to be at the start of the record this instance * is to represent. * @param metaData Meta data. * @throws IOException */ public ARCRecord(InputStream in, ArchiveRecordHeader metaData) throws IOException { this(in, metaData, 0, true, false, true); } /** * Constructor. * * @param in Stream cue'd up to be at the start of the record this instance * is to represent. * @param metaData Meta data. * @param bodyOffset Offset into the body. Usually 0. * @param digest True if we're to calculate digest for this record. Not * digesting saves about ~15% of cpu during an ARC parse. * @param strict Be strict parsing (Parsing stops if ARC inproperly * formatted). * @param parseHttpHeaders True if we are to parse HTTP headers. Costs * about ~20% of CPU during an ARC parse. * @throws IOException */ public ARCRecord(InputStream in, ArchiveRecordHeader metaData, int bodyOffset, boolean digest, boolean strict, final boolean parseHttpHeaders) throws IOException { super(in, metaData, bodyOffset, digest, strict); if (parseHttpHeaders) { this.httpHeaderStream = readHttpHeader(); } } /** * Constructor. * * @param in Stream cue'd up to be at the start of the records metadata * this instance is to represent. * @param identifier Identifier for this the hosting Reader. * @param offset Current offset into <code>in</code> (Used to keep * <code>position</code> properly aligned). Usually 0. * @param digest True if we're to calculate digest for this record. Not * digesting saves about ~15% of cpu during an ARC parse. * @param strict Be strict parsing (Parsing stops if ARC inproperly * formatted). * @param parseHttpHeaders True if we are to parse HTTP headers. Costs * about ~20% of CPU during an ARC parse. * @param isAllignedOnFirstRecord True if this is the first record to be * read from an archive * @param String version Version information to be returned to the * ARCReader constructing this record * * @throws IOException */ public ARCRecord(InputStream in, final String identifier, final long offset, boolean digest, boolean strict, final boolean parseHttpHeaders, final boolean isAlignedOnFirstRecord, String version) throws IOException { super(in, null, 0, digest, strict); setHeader(parseHeaders(in, identifier, offset, strict, isAlignedOnFirstRecord, version)); if (parseHttpHeaders) { this.httpHeaderStream = readHttpHeader(); } } /** * Constructor. * * @param in Stream cue'd up to be at the start of the records metadata * this instance is to represent. * @param identifier Identifier for this the hosting Reader. * @param offset Current offset into <code>in</code> (Used to keep * <code>position</code> properly aligned). Usually 0. * @param digest True if we're to calculate digest for this record. Not * digesting saves about ~15% of cpu during an ARC parse. * @param strict Be strict parsing (Parsing stops if ARC inproperly * formatted). * @param parseHttpHeaders True if we are to parse HTTP headers. Costs * about ~20% of CPU during an ARC parse. * * @throws IOException */ public ARCRecord(InputStream in, final String identifier, final long offset, boolean digest, boolean strict, final boolean parseHttpHeaders) throws IOException { this(in, identifier, offset, digest, strict, parseHttpHeaders, false, null); } private ArchiveRecordHeader parseHeaders(final InputStream in, final String identifier, final long offset, final boolean strict, final boolean isAlignedOnFirstRecord, String version) throws IOException { ArrayList<String> firstLineValues = new ArrayList<String>(20); getTokenizedHeaderLine(in, firstLineValues); int bodyOffset = 0; String origin = ""; if (offset == 0 && isAlignedOnFirstRecord) { // If offset is zero and we were aligned at first record on // creation (See #alignedOnFirstRecord for more on this), then no // records have been read yet and we're reading our first one, the // record of ARC file meta info. Its special. In ARC versions // 1.x, first record has three lines of meta info. We've just read // the first line. There are two more. The second line has misc. // info. We're only interested in the first field, the version // number. The third line is the list of field names. Here's what // ARC file version 1.x meta content looks like: // // filedesc://testIsBoundary-JunitIAH200401070157520.arc 0.0.0.0 \\ // 20040107015752 text/plain 77 // 1 0 InternetArchive // URL IP-address Archive-date Content-type Archive-length // ArrayList<String> secondLineValues = new ArrayList<String>(20); bodyOffset += getTokenizedHeaderLine(in, secondLineValues); version = ((String)secondLineValues.get(0) + "." + (String)secondLineValues.get(1)); origin = (String)secondLineValues.get(2); // Just read over the 3rd line. We used to parse it and use // values found here but now we just hardcode them to avoid // having to read this 3rd line even for random arc file accesses. bodyOffset += getTokenizedHeaderLine(in, null); // this.position = bodyOffset; } setBodyOffset(bodyOffset); return computeMetaData(this.headerFieldNameKeys, firstLineValues, version, origin, offset, identifier); } /** * Get a record header line as list of tokens. * * We keep reading till we find a LINE_SEPARATOR or we reach the end * of file w/o finding a LINE_SEPARATOR or the line length is crazy. * * @param stream InputStream to read from. * @param list Empty list that gets filled w/ string tokens. * @return Count of characters read. * @exception IOException If problem reading stream or no line separator * found or EOF before EOL or we didn't get minimum header fields. */ private int getTokenizedHeaderLine(final InputStream stream, List<String> list) throws IOException { // Preallocate usual line size. StringBuilder buffer = new StringBuilder(2048 + 20); int read = 0; int previous = -1; for (int c = -1; true;) { previous = c; c = stream.read(); if (c == -1) { throw new RecoverableIOException("Hit EOF before header EOL."); } c &= 0xff; read++; if (read > MAX_HEADER_LINE_LENGTH) { throw new IOException("Header line longer than max allowed " + " -- " + String.valueOf(MAX_HEADER_LINE_LENGTH) + " -- or passed buffer doesn't contain a line (Read: " + buffer.length() + "). Here's" + " some of what was read: " + buffer.substring(0, Math.min(buffer.length(), 256))); } if (c == LINE_SEPARATOR) { if (buffer.length() == 0) { // Empty line at start of buffer. Skip it and try again. continue; } if (list != null) { list.add(buffer.toString()); } // LOOP TERMINATION. break; } else if (c == HEADER_FIELD_SEPARATOR) { if (!isStrict() && previous == HEADER_FIELD_SEPARATOR) { // Early ARCs sometimes had multiple spaces between fields. continue; } if (list != null) { list.add(buffer.toString()); } // reset to empty buffer.setLength(0); } else { buffer.append((char)c); } } // List must have at least 3 elements in it and no more than 10. If // it has other than this, then bogus parse. if (list != null && (list.size() < 3 || list.size() > 100)) { throw new IOException("Unparseable header line: " + list); } // save verbatim header String this.headerString = StringUtils.join(list," "); return read; } /** * Compute metadata fields. * * Here we check the meta field has right number of items in it. * * @param keys Keys to use composing headerFields map. * @param values Values to set into the headerFields map. * @param v The version of this ARC file. * @param offset Offset into arc file. * * @return Metadata structure for this record. * * @exception IOException If no. of keys doesn't match no. of values. */ private ARCRecordMetaData computeMetaData(List<String> keys, List<String> values, String v, String origin, long offset, final String identifier) throws IOException { if (keys.size() != values.size()) { List<String> originalValues = values; if (!isStrict()) { values = fixSpaceInURL(values, keys.size()); // If values still doesn't match key size, try and do // further repair. if (keys.size() != values.size()) { // Early ARCs had a space in mimetype. if (values.size() == (keys.size() + 1) && values.get(4).toLowerCase().startsWith("charset=")) { List<String> nuvalues = new ArrayList<String>(keys.size()); nuvalues.add(0, values.get(0)); nuvalues.add(1, values.get(1)); nuvalues.add(2, values.get(2)); nuvalues.add(3, values.get(3) + values.get(4)); nuvalues.add(4, values.get(5)); values = nuvalues; } else if((values.size() + 1) == keys.size() && isLegitimateIPValue(values.get(1)) && isDate(values.get(2)) && isNumber(values.get(3))) { // Mimetype is empty. List<String> nuvalues = new ArrayList<String>(keys.size()); nuvalues.add(0, values.get(0)); nuvalues.add(1, values.get(1)); nuvalues.add(2, values.get(2)); nuvalues.add(3, "-"); nuvalues.add(4, values.get(3)); values = nuvalues; } } } if (keys.size() != values.size()) { throw new IOException("Size of field name keys does" + " not match count of field values: " + values); } // Note that field was fixed on stderr. System.err.println(Level.WARNING.toString() + "Fixed spaces in metadata line at " + "offset " + offset + " Original: " + originalValues + ", New: " + values); } Map<String, Object> headerFields = new HashMap<String, Object>(keys.size() + 2); for (int i = 0; i < keys.size(); i++) { headerFields.put(keys.get(i), values.get(i)); } // Add a check for tabs in URLs. If any, replace with '%09'. // See https://sourceforge.net/tracker/?group_id=73833&atid=539099&func=detail&aid=1010966, // [ 1010966 ] crawl.log has URIs with spaces in them. String url = (String)headerFields.get(URL_FIELD_KEY); if (url != null && url.indexOf('\t') >= 0) { headerFields.put(URL_FIELD_KEY, TextUtils.replaceAll("\t", url, "%09")); } headerFields.put(VERSION_FIELD_KEY, v); headerFields.put(ORIGIN_FIELD_KEY, origin); headerFields.put(ABSOLUTE_OFFSET_KEY, new Long(offset)); return new ARCRecordMetaData(identifier, headerFields); } /** * Fix space in URLs. * The ARCWriter used to write into the ARC URLs with spaces in them. * See <a * href="https://sourceforge.net/tracker/?group_id=73833&atid=539099&func=detail&aid=1010966">[ 1010966 ] * crawl.log has URIs with spaces in them</a>. * This method does fix up on such headers converting all spaces found * to '%20'. * @param values List of metadata values. * @param requiredSize Expected size of resultant values list. * @return New list if we successfully fixed up values or original if * fixup failed. */ private List<String> fixSpaceInURL(List<String> values, int requiredSize) { // Do validity check. 3rd from last is a date of 14 numeric // characters. The 4th from last is IP, all before the IP // should be concatenated together with a '%20' joiner. // In the below, '4' is 4th field from end which has the IP. if (!(values.size() > requiredSize) || values.size() < 4) { return values; } // Test 3rd field is valid date. if (!isDate((String) values.get(values.size() - 3))) { return values; } // Test 4th field is valid IP. if (!isLegitimateIPValue((String) values.get(values.size() - 4))) { return values; } List<String> newValues = new ArrayList<String>(requiredSize); StringBuffer url = new StringBuffer(); for (int i = 0; i < (values.size() - 4); i++) { if (i > 0) { url.append("%20"); } url.append(values.get(i)); } newValues.add(url.toString()); for (int i = values.size() - 4; i < values.size(); i++) { newValues.add(values.get(i)); } return newValues; } private boolean isDate(final String date) { if (date.length() != 14) { return false; } return isNumber(date); } private boolean isNumber(final String n) { for (int i = 0; i < n.length(); i++) { if (!Character.isDigit(n.charAt(i))) { return false; } } return true; } private boolean isLegitimateIPValue(final String ip) { if ("-".equals(ip)) { return true; } Matcher m = InetAddressUtil.IPV4_QUADS.matcher(ip); return m != null && m.matches(); } /** * Skip over the the http header if one present. * * Subsequent reads will get the body. * * <p>Calling this method in the midst of reading the header * will make for strange results. Otherwise, safe to call * at any time though before reading any of the arc record * content is only time that it makes sense. * * <p>After calling this method, you can call * {@link #getHttpHeaders()} to get the read http header. * * @throws IOException */ public void skipHttpHeader() throws IOException { if (this.httpHeaderStream != null) { // Empty the httpHeaderStream for (int available = this.httpHeaderStream.available(); this.httpHeaderStream != null && (available = this.httpHeaderStream.available()) > 0;) { // We should be in this loop once only we should only do this // buffer allocation once. byte [] buffer = new byte[available]; // The read nulls out httpHeaderStream when done with it so // need check for null in the loop control line. read(buffer, 0, available); } } } public void dumpHttpHeader() throws IOException { if (this.httpHeaderStream == null) { return; } // Dump the httpHeaderStream to STDOUT for (int available = this.httpHeaderStream.available(); this.httpHeaderStream != null && (available = this.httpHeaderStream.available()) > 0;) { // We should be in this loop only once and should do this // buffer allocation once. byte[] buffer = new byte[available]; // The read nulls out httpHeaderStream when done with it so // need check for null in the loop control line. int read = read(buffer, 0, available); System.out.write(buffer, 0, read); } } /** * Read http header if present. Technique borrowed from HttpClient HttpParse * class. set errors when found. * * @return ByteArrayInputStream with the http header in it or null if no * http header. * @throws IOException */ private InputStream readHttpHeader() throws IOException { // this can be helpful when simply iterating over records, // looking for problems. Logger logger = Logger.getLogger(this.getClass().getName()); ArchiveRecordHeader h = this.getHeader(); // If judged a record that doesn't have an http header, return // immediately. String url = getHeader().getUrl(); if(!url.startsWith("http") || getHeader().getLength() <= MIN_HTTP_HEADER_LENGTH) { return null; } String statusLine; byte[] statusBytes; int eolCharCount = 0; int errOffset = 0; // Read status line, skipping any errant http headers found before it // This allows a larger number of 'corrupt' arcs -- where headers were accidentally // inserted before the status line to be readable while (true) { statusBytes = LaxHttpParser.readRawLine(getIn()); eolCharCount = getEolCharsCount(statusBytes); if (eolCharCount <= 0) { throw new RecoverableIOException( "Failed to read http status where one was expected: " + ((statusBytes == null) ? "" : new String(statusBytes))); } statusLine = EncodingUtil.getString(statusBytes, 0, statusBytes.length - eolCharCount, ARCConstants.DEFAULT_ENCODING); // If a null or DELETED break immediately if ((statusLine == null) || statusLine.startsWith("DELETED")) { break; } // If it's actually the status line, break, otherwise continue skipping any // previous header values if (!statusLine.contains(":") && StatusLine.startsWithHTTP(statusLine)) { break; } // Add bytes read to error "offset" to add to position errOffset += statusBytes.length; } if (errOffset > 0) { this.incrementPosition(errOffset); } if ((statusLine == null) || !StatusLine.startsWithHTTP(statusLine)) { if (statusLine.startsWith("DELETED")) { // Some old ARCs have deleted records like following: // http://vireo.gatech.edu:80/ebt-bin/nph-dweb/dynaweb/SGI_Developer/SGITCL_PG/@Generic__BookTocView/11108%3Btd%3D2 130.207.168.42 19991010131803 text/html 29202 // DELETED_TIME=20000425001133_DELETER=Kurt_REASON=alexalist // (follows ~29K spaces) // For now, throw a RecoverableIOException so if iterating over // records, we keep going. TODO: Later make a legitimate // ARCRecord from the deleted record rather than throw // exception. throw new DeletedARCRecordIOException(statusLine); } else { this.errors.add(ArcRecordErrors.HTTP_STATUS_LINE_INVALID); } } try { this.httpStatus = new StatusLine(statusLine); } catch(IOException e) { logger.warning(e.getMessage() + " at offset: " + h.getOffset()); this.errors.add(ArcRecordErrors.HTTP_STATUS_LINE_EXCEPTION); } // Save off all bytes read. Keep them as bytes rather than // convert to strings so we don't have to worry about encodings // though this should never be a problem doing http headers since // its all supposed to be ascii. ByteArrayOutputStream baos = new ByteArrayOutputStream(statusBytes.length + 4 * 1024); baos.write(statusBytes); // Now read rest of the header lines looking for the separation // between header and body. for (byte [] lineBytes = null; true;) { lineBytes = LaxHttpParser.readRawLine(getIn()); eolCharCount = getEolCharsCount(lineBytes); if (eolCharCount <= 0) { if (getIn().available() == 0) { httpHeaderBytesRead += statusBytes.length; logger.warning("HTTP header truncated at offset: " + h.getOffset()); this.errors.add(ArcRecordErrors.HTTP_HEADER_TRUNCATED); this.setEor(true); break; } else { throw new IOException("Failed reading http headers: " + ((lineBytes != null)? new String(lineBytes): null)); } } else { httpHeaderBytesRead += lineBytes.length; } // Save the bytes read. baos.write(lineBytes); if ((lineBytes.length - eolCharCount) <= 0) { // We've finished reading the http header. break; } } byte [] headerBytes = baos.toByteArray(); // Save off where body starts. this.getMetaData().setContentBegin(headerBytes.length); ByteArrayInputStream bais = new ByteArrayInputStream(headerBytes); if (!bais.markSupported()) { throw new IOException("ByteArrayInputStream does not support mark"); } bais.mark(headerBytes.length); // Read the status line. Don't let it into the parseHeaders function. // It doesn't know what to do with it. bais.read(statusBytes, 0, statusBytes.length); this.httpHeaders = LaxHttpParser.parseHeaders(bais, ARCConstants.DEFAULT_ENCODING); this.getMetaData().setStatusCode(Integer.toString(getStatusCode())); bais.reset(); return bais; } private static class DeletedARCRecordIOException extends RecoverableIOException { private static final long serialVersionUID = 1L; public DeletedARCRecordIOException(final String reason) { super(reason); } } /** * Return status code for this record. * * This method will return -1 until the http header has been read. * @return Status code. */ public int getStatusCode() { return (this.httpStatus == null)? -1: this.httpStatus.getStatusCode(); } /** * @param bytes Array of bytes to examine for an EOL. * @return Count of end-of-line characters or zero if none. */ private int getEolCharsCount(byte [] bytes) { int count = 0; if (bytes != null && bytes.length >=1 && bytes[bytes.length - 1] == '\n') { count++; if (bytes.length >=2 && bytes[bytes.length -2] == '\r') { count++; } } return count; } /** * @return Meta data for this record. */ public ARCRecordMetaData getMetaData() { return (ARCRecordMetaData)getHeader(); } /** * @return http headers (Only available after header has been read). */ public Header [] getHttpHeaders() { return this.httpHeaders; } /** * @return ArcRecordErrors encountered when reading */ public List<ArcRecordErrors> getErrors() { return this.errors; } /** * @return true if ARC record errors found */ public boolean hasErrors() { return !this.errors.isEmpty(); } /** * @return Next character in this ARCRecord's content else -1 if at end of * this record. * @throws IOException */ public int read() throws IOException { int c = -1; if (this.httpHeaderStream != null && (this.httpHeaderStream.available() > 0)) { // If http header, return bytes from it before we go to underlying // stream. c = this.httpHeaderStream.read(); // If done with the header stream, null it out. if (this.httpHeaderStream.available() <= 0) { this.httpHeaderStream = null; } incrementPosition(); } else { c = super.read(); } return c; } public int read(byte [] b, int offset, int length) throws IOException { int read = -1; if (this.httpHeaderStream != null && (this.httpHeaderStream.available() > 0)) { // If http header, return bytes from it before we go to underlying // stream. read = Math.min(length, this.httpHeaderStream.available()); if (read == 0) { read = -1; } else { read = this.httpHeaderStream.read(b, offset, read); } // If done with the header stream, null it out. if (this.httpHeaderStream.available() <= 0) { this.httpHeaderStream = null; } incrementPosition(read); } else { read = super.read(b, offset, length); } return read; } /** * @return Offset at which the body begins (Only known after * header has been read) or -1 if none or if we haven't read * headers yet. Usually length of HTTP headers (does not include ARC * metadata line length). */ public int getBodyOffset() { return this.getMetaData().getContentBegin(); } @Override protected String getIp4Cdx(ArchiveRecordHeader h) { String result = null; if (h instanceof ARCRecordMetaData) { result = ((ARCRecordMetaData)h).getIp(); } return (result != null)? result: super.getIp4Cdx(h); } @Override protected String getStatusCode4Cdx(ArchiveRecordHeader h) { String result = null; if (h instanceof ARCRecordMetaData) { result = ((ARCRecordMetaData) h).getStatusCode(); } return (result != null) ? result: super.getStatusCode4Cdx(h); } @Override protected String getDigest4Cdx(ArchiveRecordHeader h) { String result = null; if (h instanceof ARCRecordMetaData) { result = ((ARCRecordMetaData) h).getDigest(); } return (result != null) ? result: super.getDigest4Cdx(h); } }
ukwa/webarchive-commons
src/main/java/org/archive/io/arc/ARCRecord.java
Java
apache-2.0
32,130
/* * ==================================================================== * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation. For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. * */ package ch.boye.httpclientandroidlib.message; import java.util.NoSuchElementException; import ch.boye.httpclientandroidlib.Header; import ch.boye.httpclientandroidlib.HeaderIterator; /** * Basic implementation of a {@link HeaderIterator}. * * @since 4.0 */ public class BasicHeaderIterator implements HeaderIterator { /** * An array of headers to iterate over. * Not all elements of this array are necessarily part of the iteration. * This array will never be modified by the iterator. * Derived implementations are expected to adhere to this restriction. */ protected final Header[] allHeaders; /** * The position of the next header in {@link #allHeaders allHeaders}. * Negative if the iteration is over. */ protected int currentIndex; /** * The header name to filter by. * <code>null</code> to iterate over all headers in the array. */ protected String headerName; /** * Creates a new header iterator. * * @param headers an array of headers over which to iterate * @param name the name of the headers over which to iterate, or * <code>null</code> for any */ public BasicHeaderIterator(Header[] headers, String name) { if (headers == null) { throw new IllegalArgumentException ("Header array must not be null."); } this.allHeaders = headers; this.headerName = name; this.currentIndex = findNext(-1); } /** * Determines the index of the next header. * * @param from one less than the index to consider first, * -1 to search for the first header * * @return the index of the next header that matches the filter name, * or negative if there are no more headers */ protected int findNext(int from) { if (from < -1) return -1; final int to = this.allHeaders.length-1; boolean found = false; while (!found && (from < to)) { from++; found = filterHeader(from); } return found ? from : -1; } /** * Checks whether a header is part of the iteration. * * @param index the index of the header to check * * @return <code>true</code> if the header should be part of the * iteration, <code>false</code> to skip */ protected boolean filterHeader(int index) { return (this.headerName == null) || this.headerName.equalsIgnoreCase(this.allHeaders[index].getName()); } // non-javadoc, see interface HeaderIterator public boolean hasNext() { return (this.currentIndex >= 0); } /** * Obtains the next header from this iteration. * * @return the next header in this iteration * * @throws NoSuchElementException if there are no more headers */ public Header nextHeader() throws NoSuchElementException { final int current = this.currentIndex; if (current < 0) { throw new NoSuchElementException("Iteration already finished."); } this.currentIndex = findNext(current); return this.allHeaders[current]; } /** * Returns the next header. * Same as {@link #nextHeader nextHeader}, but not type-safe. * * @return the next header in this iteration * * @throws NoSuchElementException if there are no more headers */ public final Object next() throws NoSuchElementException { return nextHeader(); } /** * Removing headers is not supported. * * @throws UnsupportedOperationException always */ public void remove() throws UnsupportedOperationException { throw new UnsupportedOperationException ("Removing headers is not supported."); } }
sergecodd/FireFox-OS
B2G/gecko/mobile/android/base/httpclientandroidlib/message/BasicHeaderIterator.java
Java
apache-2.0
5,137
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import axios, { AxiosResponse } from 'axios'; import { Api } from '../singletons'; import { assemble } from './general'; import { RowColumn } from './query-cursor'; const CANCELED_MESSAGE = 'Query canceled by user.'; export interface DruidErrorResponse { error?: string; errorMessage?: string; errorClass?: string; host?: string; } export interface QuerySuggestion { label: string; fn: (query: string) => string | undefined; } export function parseHtmlError(htmlStr: string): string | undefined { const startIndex = htmlStr.indexOf('</h3><pre>'); const endIndex = htmlStr.indexOf('\n\tat'); if (startIndex === -1 || endIndex === -1) return; return htmlStr .substring(startIndex + 10, endIndex) .replace(/&quot;/g, '"') .replace(/&apos;/g, `'`) .replace(/&gt;/g, '>'); } export function getDruidErrorMessage(e: any): string { const data: DruidErrorResponse | string = (e.response || {}).data || {}; switch (typeof data) { case 'object': return ( assemble( data.error, data.errorMessage, data.errorClass, data.host ? `on host ${data.host}` : undefined, ).join(' / ') || e.message ); case 'string': const htmlResp = parseHtmlError(data); return htmlResp ? `HTML Error: ${htmlResp}` : e.message; default: return e.message; } } export class DruidError extends Error { static parsePosition(errorMessage: string): RowColumn | undefined { const range = String(errorMessage).match( /from line (\d+), column (\d+) to line (\d+), column (\d+)/i, ); if (range) { return { match: range[0], row: Number(range[1]) - 1, column: Number(range[2]) - 1, endRow: Number(range[3]) - 1, endColumn: Number(range[4]), // No -1 because we need to include the last char }; } const single = String(errorMessage).match(/at line (\d+), column (\d+)/i); if (single) { return { match: single[0], row: Number(single[1]) - 1, column: Number(single[2]) - 1, }; } return; } static positionToIndex(str: string, line: number, column: number): number { const lines = str.split('\n').slice(0, line); const lastLineIndex = lines.length - 1; lines[lastLineIndex] = lines[lastLineIndex].slice(0, column - 1); return lines.join('\n').length; } static getSuggestion(errorMessage: string): QuerySuggestion | undefined { // == is used instead of = // ex: Encountered "= =" at line 3, column 15. Was expecting one of const matchEquals = errorMessage.match(/Encountered "= =" at line (\d+), column (\d+)./); if (matchEquals) { const line = Number(matchEquals[1]); const column = Number(matchEquals[2]); return { label: `Replace == with =`, fn: str => { const index = DruidError.positionToIndex(str, line, column); if (!str.slice(index).startsWith('==')) return; return `${str.slice(0, index)}=${str.slice(index + 2)}`; }, }; } // Incorrect quoting on table // ex: org.apache.calcite.runtime.CalciteContextException: From line 3, column 17 to line 3, column 31: Column '#ar.wikipedia' not found in any table const matchQuotes = errorMessage.match( /org.apache.calcite.runtime.CalciteContextException: From line (\d+), column (\d+) to line \d+, column \d+: Column '([^']+)' not found in any table/, ); if (matchQuotes) { const line = Number(matchQuotes[1]); const column = Number(matchQuotes[2]); const literalString = matchQuotes[3]; return { label: `Replace "${literalString}" with '${literalString}'`, fn: str => { const index = DruidError.positionToIndex(str, line, column); if (!str.slice(index).startsWith(`"${literalString}"`)) return; return `${str.slice(0, index)}'${literalString}'${str.slice( index + literalString.length + 2, )}`; }, }; } // , before FROM const matchComma = errorMessage.match(/Encountered "(FROM)" at/i); if (matchComma) { const fromKeyword = matchComma[1]; return { label: `Remove , before ${fromKeyword}`, fn: str => { const newQuery = str.replace(/,(\s+FROM)/gim, '$1'); if (newQuery === str) return; return newQuery; }, }; } return; } public canceled?: boolean; public error?: string; public errorMessage?: string; public errorMessageWithoutExpectation?: string; public expectation?: string; public position?: RowColumn; public errorClass?: string; public host?: string; public suggestion?: QuerySuggestion; constructor(e: any) { super(axios.isCancel(e) ? CANCELED_MESSAGE : getDruidErrorMessage(e)); if (axios.isCancel(e)) { this.canceled = true; } else { const data: DruidErrorResponse | string = (e.response || {}).data || {}; let druidErrorResponse: DruidErrorResponse; switch (typeof data) { case 'object': druidErrorResponse = data; break; case 'string': druidErrorResponse = { errorClass: 'HTML error', }; break; default: druidErrorResponse = {}; break; } Object.assign(this, druidErrorResponse); if (this.errorMessage) { this.position = DruidError.parsePosition(this.errorMessage); this.suggestion = DruidError.getSuggestion(this.errorMessage); const expectationIndex = this.errorMessage.indexOf('Was expecting one of'); if (expectationIndex >= 0) { this.errorMessageWithoutExpectation = this.errorMessage.slice(0, expectationIndex).trim(); this.expectation = this.errorMessage.slice(expectationIndex).trim(); } else { this.errorMessageWithoutExpectation = this.errorMessage; } } } } } export async function queryDruidRune(runeQuery: Record<string, any>): Promise<any> { let runeResultResp: AxiosResponse<any>; try { runeResultResp = await Api.instance.post('/druid/v2', runeQuery); } catch (e) { throw new Error(getDruidErrorMessage(e)); } return runeResultResp.data; } export async function queryDruidSql<T = any>(sqlQueryPayload: Record<string, any>): Promise<T[]> { let sqlResultResp: AxiosResponse<any>; try { sqlResultResp = await Api.instance.post('/druid/v2/sql', sqlQueryPayload); } catch (e) { throw new Error(getDruidErrorMessage(e)); } return sqlResultResp.data; } export interface BasicQueryExplanation { query: any; signature: string | null; } export interface SemiJoinQueryExplanation { mainQuery: BasicQueryExplanation; subQueryRight: BasicQueryExplanation; } function parseQueryPlanResult(queryPlanResult: string): BasicQueryExplanation { if (!queryPlanResult) { return { query: null, signature: null, }; } const queryAndSignature = queryPlanResult.split(', signature='); const queryValue = new RegExp(/query=(.+)/).exec(queryAndSignature[0]); const signatureValue = queryAndSignature[1]; let parsedQuery: any; if (queryValue && queryValue[1]) { try { parsedQuery = JSON.parse(queryValue[1]); } catch (e) {} } return { query: parsedQuery || queryPlanResult, signature: signatureValue || null, }; } export function parseQueryPlan( raw: string, ): BasicQueryExplanation | SemiJoinQueryExplanation | string { let plan: string = raw; plan = plan.replace(/\n/g, ''); if (plan.includes('DruidOuterQueryRel(')) { return plan; // don't know how to parse this } let queryArgs: string; const queryRelFnStart = 'DruidQueryRel('; const semiJoinFnStart = 'DruidSemiJoin('; if (plan.startsWith(queryRelFnStart)) { queryArgs = plan.substring(queryRelFnStart.length, plan.length - 1); } else if (plan.startsWith(semiJoinFnStart)) { queryArgs = plan.substring(semiJoinFnStart.length, plan.length - 1); const leftExpressionsArgs = ', leftExpressions='; const keysArgumentIdx = queryArgs.indexOf(leftExpressionsArgs); if (keysArgumentIdx !== -1) { return { mainQuery: parseQueryPlanResult(queryArgs.substring(0, keysArgumentIdx)), subQueryRight: parseQueryPlan(queryArgs.substring(queryArgs.indexOf(queryRelFnStart))), } as SemiJoinQueryExplanation; } } else { return plan; } return parseQueryPlanResult(queryArgs); }
gianm/druid
web-console/src/utils/druid-query.ts
TypeScript
apache-2.0
9,335
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ (function () { 'use strict'; angular .module('refstackApp') .controller('GuidelinesController', GuidelinesController); GuidelinesController.$inject = ['$filter', '$http', '$uibModal', 'refstackApiUrl']; /** * RefStack Guidelines Controller * This controller is for the '/guidelines' page where a user can browse * through tests belonging to Interop WG defined capabilities. */ function GuidelinesController($filter ,$http, $uibModal, refstackApiUrl) { var ctrl = this; ctrl.getVersionList = getVersionList; ctrl.update = update; ctrl.updateTargetCapabilities = updateTargetCapabilities; ctrl.filterStatus = filterStatus; ctrl.getObjectLength = getObjectLength; ctrl.openTestListModal = openTestListModal; ctrl.updateVersionList = updateVersionList; ctrl.gl_type = 'powered'; /** The target OpenStack marketing program to show capabilities for. */ ctrl.target = 'platform'; /** The various possible capability statuses. */ ctrl.status = { required: true, advisory: false, deprecated: false, removed: false }; /** * The template to load for displaying capability details. */ ctrl.detailsTemplate = 'components/guidelines/partials/' + 'guidelineDetails.html'; /** * Update the array of dictionary objects which stores data * pertaining to each guideline, sorting them in descending * order by guideline name. After these are sorted, the * function to update the capabilities is called. */ function updateVersionList() { let gl_files = ctrl.guidelineData[ctrl.gl_type]; ctrl.versionList = $filter('orderBy')(gl_files, 'name', true); // Default to the first approved guideline which is expected // to be at index 1. ctrl.version = ctrl.versionList[1]; update(); } /** * Retrieve a dictionary object comprised of available guideline types * and and an array of dictionary objects containing file info about * each guideline file pertaining to that particular guideline type. * After a successful API call, the function to sort and update the * version list is called. */ function getVersionList() { var content_url = refstackApiUrl + '/guidelines'; ctrl.versionsRequest = $http.get(content_url).success(function (data) { ctrl.guidelineData = data; updateVersionList(); }).error(function (error) { ctrl.showError = true; ctrl.error = 'Error retrieving version list: ' + angular.toJson(error); }); } /** * This will contact the Refstack API server to retrieve the JSON * content of the guideline file corresponding to the selected * version. */ function update() { ctrl.content_url = refstackApiUrl + '/guidelines/' + ctrl.version.file; let get_params = {'gl_file': ctrl.version.file}; ctrl.capsRequest = $http.get(ctrl.content_url, get_params).success( function (data) { ctrl.guidelines = data; if ('metadata' in data && data.metadata.schema >= '2.0') { ctrl.schema = data.metadata.schema; ctrl.criteria = data.metadata.scoring.criteria; ctrl.releases = data.metadata.os_trademark_approval.releases; ctrl.guidelineStatus = data.metadata.os_trademark_approval.status; } else { ctrl.schema = data.schema; ctrl.criteria = data.criteria; ctrl.releases = data.releases; ctrl.guidelineStatus = data.status; } ctrl.updateTargetCapabilities(); }).error(function (error) { ctrl.showError = true; ctrl.guidelines = null; ctrl.error = 'Error retrieving guideline content: ' + angular.toJson(error); }); } /** * This will update the scope's 'targetCapabilities' object with * capabilities belonging to the selected OpenStack marketing program * (programs typically correspond to 'components' in the Interop WG * schema). Each capability will have its status mapped to it. */ function updateTargetCapabilities() { ctrl.targetCapabilities = {}; var components = ctrl.guidelines.components; var targetCaps = ctrl.targetCapabilities; var targetComponents = null; var old_type = ctrl.gl_type; if (ctrl.target === 'dns' || ctrl.target === 'orchestration') { ctrl.gl_type = ctrl.target; } else { ctrl.gl_type = 'powered'; } // If it has not been updated since the last program type change, // will need to update the list if (old_type !== ctrl.gl_type) { updateVersionList(); return; } // The 'platform' target is comprised of multiple components, so // we need to get the capabilities belonging to each of its // components. if (ctrl.target === 'platform' || ctrl.schema >= '2.0') { if ('add-ons' in ctrl.guidelines) { targetComponents = ['os_powered_' + ctrl.target]; } else if (ctrl.schema >= '2.0') { var platformsMap = { 'platform': 'OpenStack Powered Platform', 'compute': 'OpenStack Powered Compute', 'object': 'OpenStack Powered Storage' }; targetComponents = ctrl.guidelines.platforms[ platformsMap[ctrl.target]].components.map( function(c) { return c.name; } ); } else { targetComponents = ctrl.guidelines.platform.required; } // This will contain status priority values, where lower // values mean higher priorities. var statusMap = { required: 1, advisory: 2, deprecated: 3, removed: 4 }; // For each component required for the platform program. angular.forEach(targetComponents, function (component) { // Get each capability list belonging to each status. var componentList = components[component]; if (ctrl.schema >= '2.0') { componentList = componentList.capabilities; } angular.forEach(componentList, function (caps, status) { // For each capability. angular.forEach(caps, function(cap) { // If the capability has already been added. if (cap in targetCaps) { // If the status priority value is less // than the saved priority value, update // the value. if (statusMap[status] < statusMap[targetCaps[cap]]) { targetCaps[cap] = status; } } else { targetCaps[cap] = status; } }); }); }); } else { angular.forEach(components[ctrl.target], function (caps, status) { angular.forEach(caps, function(cap) { targetCaps[cap] = status; }); }); } } /** * This filter will check if a capability's status corresponds * to a status that is checked/selected in the UI. This filter * is meant to be used with the ng-repeat directive. * @param {Object} capability * @returns {Boolean} True if capability's status is selected */ function filterStatus(capability) { var caps = ctrl.targetCapabilities; return ctrl.status.required && caps[capability.id] === 'required' || ctrl.status.advisory && caps[capability.id] === 'advisory' || ctrl.status.deprecated && caps[capability.id] === 'deprecated' || ctrl.status.removed && caps[capability.id] === 'removed'; } /** * This function will get the length of an Object/dict based on * the number of keys it has. * @param {Object} object * @returns {Number} length of object */ function getObjectLength(object) { return Object.keys(object).length; } /** * This will open the modal that will show a list of all tests * belonging to capabilities with the selected status(es). */ function openTestListModal() { $uibModal.open({ templateUrl: '/components/guidelines/partials' + '/testListModal.html', backdrop: true, windowClass: 'modal', animation: true, controller: 'TestListModalController as modal', size: 'lg', resolve: { version: function () { return ctrl.version.name.slice(0, -5); }, version_file: function() { return ctrl.version.file; }, target: function () { return ctrl.target; }, status: function () { return ctrl.status; } } }); } ctrl.getVersionList(); } angular .module('refstackApp') .controller('TestListModalController', TestListModalController); TestListModalController.$inject = [ '$uibModalInstance', '$http', 'version', 'version_file', 'target', 'status', 'refstackApiUrl' ]; /** * Test List Modal Controller * This controller is for the modal that appears if a user wants to see the * test list corresponding to Interop WG capabilities with the selected * statuses. */ function TestListModalController($uibModalInstance, $http, version, version_file, target, status, refstackApiUrl) { var ctrl = this; ctrl.version = version; ctrl.version_file = version_file; ctrl.target = target; ctrl.status = status; ctrl.close = close; ctrl.updateTestListString = updateTestListString; ctrl.aliases = true; ctrl.flagged = false; // Check if the API URL is absolute or relative. if (refstackApiUrl.indexOf('http') > -1) { ctrl.url = refstackApiUrl; } else { ctrl.url = location.protocol + '//' + location.host + refstackApiUrl; } /** * This function will close/dismiss the modal. */ function close() { $uibModalInstance.dismiss('exit'); } /** * This function will return a list of statuses based on which ones * are selected. */ function getStatusList() { var statusList = []; angular.forEach(ctrl.status, function(value, key) { if (value) { statusList.push(key); } }); return statusList; } /** * This will get the list of tests from the API and update the * controller's test list string variable. */ function updateTestListString() { var statuses = getStatusList(); if (!statuses.length) { ctrl.error = 'No tests matching selected criteria.'; return; } ctrl.testListUrl = [ ctrl.url, '/guidelines/', ctrl.version_file, '/tests?', 'target=', ctrl.target, '&', 'type=', statuses.join(','), '&', 'alias=', ctrl.aliases.toString(), '&', 'flag=', ctrl.flagged.toString() ].join(''); ctrl.testListRequest = $http.get(ctrl.testListUrl). then(function successCallback(response) { ctrl.error = null; ctrl.testListString = response.data; if (!ctrl.testListString) { ctrl.testListCount = 0; } else { ctrl.testListCount = ctrl.testListString.split('\n').length; } }, function errorCallback(response) { ctrl.testListString = null; ctrl.testListCount = null; if (angular.isObject(response.data) && response.data.message) { ctrl.error = 'Error retrieving test list: ' + response.data.message; } else { ctrl.error = 'Unknown error retrieving test list.'; } }); } updateTestListString(); } })();
stackforge/refstack
refstack-ui/app/components/guidelines/guidelinesController.js
JavaScript
apache-2.0
15,200
package net.spy.memcached; import net.spy.memcached.compat.SpyObject; import net.spy.memcached.transcoders.Transcoder; /** * Object that provides mutation via CAS over a given memcache client. * * <p>Example usage (reinventing incr):</p> * * <pre> * // Get or create a client. * MemcachedClient client=[...]; * * // Get a Transcoder. * Transcoder<Long> tc = new LongTranscoder(); * * // Get a mutator instance that uses that client. * CASMutator&lt;Long&gt; mutator=new CASMutator&lt;Long&gt;(client, tc); * * // Get a mutation that knows what to do when a value is found. * CASMutation&lt;Long&gt; mutation=new CASMutation&lt;Long&gt;() { * public Long getNewValue(Long current) { * return current + 1; * } * }; * * // Do a mutation. * long currentValue=mutator.cas(someKey, 0L, 0, mutation); * </pre> */ public class CASMutator<T> extends SpyObject { private static final int MAX_TRIES=8192; private final MemcachedClientIF client; private final Transcoder<T> transcoder; private final int max; /** * Construct a CASMutator that uses the given client. * * @param c the client * @param tc the Transcoder to use * @param max_tries the maximum number of attempts to get a CAS to succeed */ public CASMutator(MemcachedClientIF c, Transcoder<T> tc, int max_tries) { super(); client=c; transcoder=tc; max=max_tries; } /** * Construct a CASMutator that uses the given client. * * @param c the client * @param tc the Transcoder to use */ public CASMutator(MemcachedClientIF c, Transcoder<T> tc) { this(c, tc, MAX_TRIES); } /** * CAS a new value in for a key. * * <p> * Note that if initial is null, this method will only update existing * values. * </p> * * @param key the key to be CASed * @param initial the value to use when the object is not cached * @param initialExp the expiration time to use when initializing * @param m the mutation to perform on an object if a value exists for the * key * @return the new value that was set */ public T cas(final String key, final T initial, int initialExp, final CASMutation<T> m) throws Exception { T rv=initial; boolean done=false; for(int i=0; !done && i<max; i++) { CASValue<T> casval=client.gets(key, transcoder); T current=null; // If there were a CAS value, check to see if it's compatible. if(casval != null) { T tmp = casval.getValue(); current=tmp; } // If we have anything mutate and CAS, else add. if(current != null) { // Declaring this impossible since the only way current can // be non-null is if casval was set. assert casval != null : "casval was null with a current value"; rv=m.getNewValue(current); // There are three possibilities here: // 1) It worked and we're done. // 2) It collided and we need to reload and try again. // 3) It disappeared between our fetch and our cas. // We're ignoring #3 because it's *extremely* unlikely and the // behavior will be fine in this code -- we'll do another gets // and follow it up with either an add or another cas depending // on whether it exists the next time. if(client.cas(key, casval.getCas(), initialExp, rv, transcoder) == CASResponse.OK) { done=true; } } else { // No value found, try an add. if(initial == null) { done = true; rv = null; } else if(client.add(key, initialExp, initial, transcoder).get()) { done=true; rv=initial; } } } if(!done) { throw new RuntimeException("Couldn't get a CAS in " + max + " attempts"); } return rv; } }
whchoi83/arcus-java-client
src/main/java/net/spy/memcached/CASMutator.java
Java
apache-2.0
3,648
/* * Copyright 2020 NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package test.pinpoint.plugin.kafka; import kafka.server.KafkaConfig; import kafka.server.KafkaServerStartable; import org.apache.commons.io.FileUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.util.Properties; /** * Copy of https://github.com/chbatey/kafka-unit/blob/master/src/main/java/info/batey/kafka/unit/KafkaUnit.java * Some codes have been modified for testing from the copied code. */ public class KafkaUnitServer { private static final Logger logger = LogManager.getLogger(KafkaUnitServer.class); private String zookeeperString; private String brokerString; private int zkPort; private int brokerPort; private Properties kafkaBrokerConfig; private int zkMaxConnections; private KafkaServerStartable broker; private ZookeeperUnitServer zookeeper; private File logDir; public KafkaUnitServer(int zkPort, int brokerPort) { this(zkPort, brokerPort, 16); } public KafkaUnitServer(int zkPort, int brokerPort, int zkMaxConnections) { this.kafkaBrokerConfig = new Properties(); this.zkPort = zkPort; this.brokerPort = brokerPort; this.zookeeperString = "localhost:" + zkPort; this.brokerString = "localhost:" + brokerPort; this.zkMaxConnections = zkMaxConnections; } public void startup() { zookeeper = new ZookeeperUnitServer(zkPort, zkMaxConnections); zookeeper.startup(); try { logDir = Files.createTempDirectory("kafka").toFile(); } catch (IOException e) { throw new RuntimeException("Unable to start Kafka", e); } logDir.deleteOnExit(); Runtime.getRuntime().addShutdownHook(new Thread(getDeleteLogDirectoryAction())); kafkaBrokerConfig.setProperty("zookeeper.connect", zookeeperString); kafkaBrokerConfig.setProperty("broker.id", "1"); kafkaBrokerConfig.setProperty("host.name", "localhost"); kafkaBrokerConfig.setProperty("port", Integer.toString(brokerPort)); kafkaBrokerConfig.setProperty("log.dir", logDir.getAbsolutePath()); kafkaBrokerConfig.setProperty("log.flush.interval.messages", String.valueOf(1)); kafkaBrokerConfig.setProperty("delete.topic.enable", String.valueOf(true)); kafkaBrokerConfig.setProperty("offsets.topic.replication.factor", String.valueOf(1)); kafkaBrokerConfig.setProperty("auto.create.topics.enable", String.valueOf(true)); broker = new KafkaServerStartable(new KafkaConfig(kafkaBrokerConfig)); broker.startup(); } public void shutdown() { if (broker != null) { broker.shutdown(); broker.awaitShutdown(); } if (zookeeper != null) { zookeeper.shutdown(); } } private Runnable getDeleteLogDirectoryAction() { return new Runnable() { @Override public void run() { if (logDir != null) { try { FileUtils.deleteDirectory(logDir); } catch (IOException e) { logger.warn("Problems deleting temporary directory " + logDir.getAbsolutePath(), e); } } } }; } }
emeroad/pinpoint
plugins-it/kafka-it/src/test/java/test/pinpoint/plugin/kafka/KafkaUnitServer.java
Java
apache-2.0
3,986
""" Support for Ubiquiti's UVC cameras. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/camera.uvc/ """ import logging import socket import requests import voluptuous as vol from homeassistant.const import CONF_PORT from homeassistant.components.camera import Camera, PLATFORM_SCHEMA import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['uvcclient==0.10.0'] _LOGGER = logging.getLogger(__name__) CONF_NVR = 'nvr' CONF_KEY = 'key' DEFAULT_PORT = 7080 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_NVR): cv.string, vol.Required(CONF_KEY): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Discover cameras on a Unifi NVR.""" addr = config[CONF_NVR] key = config[CONF_KEY] port = config[CONF_PORT] from uvcclient import nvr nvrconn = nvr.UVCRemote(addr, port, key) try: cameras = nvrconn.index() except nvr.NotAuthorized: _LOGGER.error("Authorization failure while connecting to NVR") return False except nvr.NvrError: _LOGGER.error("NVR refuses to talk to me") return False except requests.exceptions.ConnectionError as ex: _LOGGER.error("Unable to connect to NVR: %s", str(ex)) return False identifier = nvrconn.server_version >= (3, 2, 0) and 'id' or 'uuid' # Filter out airCam models, which are not supported in the latest # version of UnifiVideo and which are EOL by Ubiquiti cameras = [ camera for camera in cameras if 'airCam' not in nvrconn.get_camera(camera[identifier])['model']] add_devices([UnifiVideoCamera(nvrconn, camera[identifier], camera['name']) for camera in cameras]) return True class UnifiVideoCamera(Camera): """A Ubiquiti Unifi Video Camera.""" def __init__(self, nvr, uuid, name): """Initialize an Unifi camera.""" super(UnifiVideoCamera, self).__init__() self._nvr = nvr self._uuid = uuid self._name = name self.is_streaming = False self._connect_addr = None self._camera = None @property def name(self): """Return the name of this camera.""" return self._name @property def is_recording(self): """Return true if the camera is recording.""" caminfo = self._nvr.get_camera(self._uuid) return caminfo['recordingSettings']['fullTimeRecordEnabled'] @property def brand(self): """Return the brand of this camera.""" return 'Ubiquiti' @property def model(self): """Return the model of this camera.""" caminfo = self._nvr.get_camera(self._uuid) return caminfo['model'] def _login(self): """Login to the camera.""" from uvcclient import camera as uvc_camera from uvcclient import store as uvc_store caminfo = self._nvr.get_camera(self._uuid) if self._connect_addr: addrs = [self._connect_addr] else: addrs = [caminfo['host'], caminfo['internalHost']] store = uvc_store.get_info_store() password = store.get_camera_password(self._uuid) if password is None: _LOGGER.debug("Logging into camera %(name)s with default password", dict(name=self._name)) password = 'ubnt' if self._nvr.server_version >= (3, 2, 0): client_cls = uvc_camera.UVCCameraClientV320 else: client_cls = uvc_camera.UVCCameraClient camera = None for addr in addrs: try: camera = client_cls( addr, caminfo['username'], password) camera.login() _LOGGER.debug("Logged into UVC camera %(name)s via %(addr)s", dict(name=self._name, addr=addr)) self._connect_addr = addr break except socket.error: pass except uvc_camera.CameraConnectError: pass except uvc_camera.CameraAuthError: pass if not self._connect_addr: _LOGGER.error("Unable to login to camera") return None self._camera = camera return True def camera_image(self): """Return the image of this camera.""" from uvcclient import camera as uvc_camera if not self._camera: if not self._login(): return def _get_image(retry=True): try: return self._camera.get_snapshot() except uvc_camera.CameraConnectError: _LOGGER.error("Unable to contact camera") except uvc_camera.CameraAuthError: if retry: self._login() return _get_image(retry=False) else: _LOGGER.error( "Unable to log into camera, unable to get snapshot") raise return _get_image()
JshWright/home-assistant
homeassistant/components/camera/uvc.py
Python
apache-2.0
5,259
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Add ``map_index`` column to TaskInstance to identify task-mapping, and a ``task_map`` table to track mapping values from XCom. Revision ID: e655c0453f75 Revises: f9da662e7089 Create Date: 2021-12-13 22:59:41.052584 """ from alembic import op from sqlalchemy import CheckConstraint, Column, ForeignKeyConstraint, Integer, text from airflow.models.base import StringID from airflow.utils.sqlalchemy import ExtendedJSON # Revision identifiers, used by Alembic. revision = "e655c0453f75" down_revision = "f9da662e7089" branch_labels = None depends_on = None airflow_version = '2.3.0' def upgrade(): """ Add ``map_index`` column to TaskInstance to identify task-mapping, and a ``task_map`` table to track mapping values from XCom. """ # We need to first remove constraints on task_reschedule since they depend on task_instance. with op.batch_alter_table("task_reschedule") as batch_op: batch_op.drop_constraint("task_reschedule_ti_fkey", "foreignkey") batch_op.drop_index("idx_task_reschedule_dag_task_run") # Change task_instance's primary key. with op.batch_alter_table("task_instance") as batch_op: # I think we always use this name for TaskInstance after 7b2661a43ba3? batch_op.drop_constraint("task_instance_pkey", type_="primary") batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1"))) batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id", "map_index"]) # Re-create task_reschedule's constraints. with op.batch_alter_table("task_reschedule") as batch_op: batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1"))) batch_op.create_foreign_key( "task_reschedule_ti_fkey", "task_instance", ["dag_id", "task_id", "run_id", "map_index"], ["dag_id", "task_id", "run_id", "map_index"], ondelete="CASCADE", ) batch_op.create_index( "idx_task_reschedule_dag_task_run", ["dag_id", "task_id", "run_id", "map_index"], unique=False, ) # Create task_map. op.create_table( "task_map", Column("dag_id", StringID(), primary_key=True), Column("task_id", StringID(), primary_key=True), Column("run_id", StringID(), primary_key=True), Column("map_index", Integer, primary_key=True), Column("length", Integer, nullable=False), Column("keys", ExtendedJSON, nullable=True), CheckConstraint("length >= 0", name="task_map_length_not_negative"), ForeignKeyConstraint( ["dag_id", "task_id", "run_id", "map_index"], [ "task_instance.dag_id", "task_instance.task_id", "task_instance.run_id", "task_instance.map_index", ], name="task_map_task_instance_fkey", ondelete="CASCADE", ), ) def downgrade(): """Remove TaskMap and map_index on TaskInstance.""" op.drop_table("task_map") with op.batch_alter_table("task_reschedule") as batch_op: batch_op.drop_constraint("task_reschedule_ti_fkey", "foreignkey") batch_op.drop_index("idx_task_reschedule_dag_task_run") batch_op.drop_column("map_index", mssql_drop_default=True) op.execute("DELETE FROM task_instance WHERE map_index != -1") with op.batch_alter_table("task_instance") as batch_op: batch_op.drop_constraint("task_instance_pkey", type_="primary") batch_op.drop_column("map_index", mssql_drop_default=True) batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id"]) with op.batch_alter_table("task_reschedule") as batch_op: batch_op.create_foreign_key( "task_reschedule_ti_fkey", "task_instance", ["dag_id", "task_id", "run_id"], ["dag_id", "task_id", "run_id"], ondelete="CASCADE", ) batch_op.create_index( "idx_task_reschedule_dag_task_run", ["dag_id", "task_id", "run_id"], unique=False, )
apache/airflow
airflow/migrations/versions/e655c0453f75_add_taskmap_and_map_id_on_taskinstance.py
Python
apache-2.0
4,986
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ namespace Google\Service\Testing; class ApkDetail extends \Google\Model { protected $apkManifestType = ApkManifest::class; protected $apkManifestDataType = ''; /** * @param ApkManifest */ public function setApkManifest(ApkManifest $apkManifest) { $this->apkManifest = $apkManifest; } /** * @return ApkManifest */ public function getApkManifest() { return $this->apkManifest; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(ApkDetail::class, 'Google_Service_Testing_ApkDetail');
googleapis/google-api-php-client-services
src/Testing/ApkDetail.php
PHP
apache-2.0
1,169
# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon import six from marconi.openstack.common.gettextutils import _ import marconi.openstack.common.log as logging from marconi.queues.storage import errors as storage_errors from marconi.queues.transport import utils from marconi.queues.transport import validation from marconi.queues.transport.wsgi import errors as wsgi_errors from marconi.queues.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) MESSAGE_POST_SPEC = (('ttl', int), ('body', '*')) class CollectionResource(object): __slots__ = ('message_controller', '_wsgi_conf', '_validate') def __init__(self, wsgi_conf, validate, message_controller): self._wsgi_conf = wsgi_conf self._validate = validate self.message_controller = message_controller #----------------------------------------------------------------------- # Helpers #----------------------------------------------------------------------- def _get_by_id(self, base_path, project_id, queue_name, ids): """Returns one or more messages from the queue by ID.""" try: self._validate.message_listing(limit=len(ids)) messages = self.message_controller.bulk_get( queue_name, message_ids=ids, project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) except Exception as ex: LOG.exception(ex) description = _(u'Message could not be retrieved.') raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response messages = list(messages) if not messages: return None base_path += '/' for each_message in messages: each_message['href'] = base_path + each_message['id'] del each_message['id'] return messages def _get(self, req, project_id, queue_name): client_uuid = wsgi_utils.get_client_uuid(req) kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) req.get_param_as_bool('echo', store=kwargs) req.get_param_as_bool('include_claimed', store=kwargs) try: self._validate.message_listing(**kwargs) results = self.message_controller.list( queue_name, project=project_id, client_uuid=client_uuid, **kwargs) # Buffer messages cursor = next(results) messages = list(cursor) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise falcon.HTTPNotFound() except Exception as ex: LOG.exception(ex) description = _(u'Messages could not be listed.') raise wsgi_errors.HTTPServiceUnavailable(description) if not messages: return None # Found some messages, so prepare the response kwargs['marker'] = next(results) for each_message in messages: each_message['href'] = req.path + '/' + each_message['id'] del each_message['id'] return { 'messages': messages, 'links': [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] } #----------------------------------------------------------------------- # Interface #----------------------------------------------------------------------- def on_post(self, req, resp, project_id, queue_name): LOG.debug(u'Messages collection POST - queue: %(queue)s, ' u'project: %(project)s', {'queue': queue_name, 'project': project_id}) client_uuid = wsgi_utils.get_client_uuid(req) try: # Place JSON size restriction before parsing self._validate.message_length(req.content_length) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) # Pull out just the fields we care about messages = wsgi_utils.filter_stream( req.stream, req.content_length, MESSAGE_POST_SPEC, doctype=wsgi_utils.JSONArray) # Enqueue the messages partial = False try: self._validate.message_posting(messages) message_ids = self.message_controller.post( queue_name, messages=messages, project=project_id, client_uuid=client_uuid) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise falcon.HTTPNotFound() except storage_errors.MessageConflict as ex: LOG.exception(ex) partial = True message_ids = ex.succeeded_ids if not message_ids: # TODO(kgriffs): Include error code that is different # from the code used in the generic case, below. description = _(u'No messages could be enqueued.') raise wsgi_errors.HTTPServiceUnavailable(description) except Exception as ex: LOG.exception(ex) description = _(u'Messages could not be enqueued.') raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare the response ids_value = ','.join(message_ids) resp.location = req.path + '?ids=' + ids_value hrefs = [req.path + '/' + id for id in message_ids] body = {'resources': hrefs, 'partial': partial} resp.body = utils.to_json(body) resp.status = falcon.HTTP_201 def on_get(self, req, resp, project_id, queue_name): LOG.debug(u'Messages collection GET - queue: %(queue)s, ' u'project: %(project)s', {'queue': queue_name, 'project': project_id}) resp.content_location = req.relative_uri ids = req.get_param_as_list('ids') if ids is None: response = self._get(req, project_id, queue_name) else: response = self._get_by_id(req.path, project_id, queue_name, ids) if response is None: resp.status = falcon.HTTP_204 return resp.body = utils.to_json(response) # status defaults to 200 def on_delete(self, req, resp, project_id, queue_name): # NOTE(zyuan): Attempt to delete the whole message collection # (without an "ids" parameter) is not allowed ids = req.get_param_as_list('ids', required=True) try: self._validate.message_listing(limit=len(ids)) self.message_controller.bulk_delete( queue_name, message_ids=ids, project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex)) except Exception as ex: LOG.exception(ex) description = _(u'Messages could not be deleted.') raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 class ItemResource(object): __slots__ = ('message_controller') def __init__(self, message_controller): self.message_controller = message_controller def on_get(self, req, resp, project_id, queue_name, message_id): LOG.debug(u'Messages item GET - message: %(message)s, ' u'queue: %(queue)s, project: %(project)s', {'message': message_id, 'queue': queue_name, 'project': project_id}) try: message = self.message_controller.get( queue_name, message_id, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise falcon.HTTPNotFound() except Exception as ex: LOG.exception(ex) description = _(u'Message could not be retrieved.') raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response message['href'] = req.path del message['id'] resp.content_location = req.relative_uri resp.body = utils.to_json(message) # status defaults to 200 def on_delete(self, req, resp, project_id, queue_name, message_id): LOG.debug(u'Messages item DELETE - message: %(message)s, ' u'queue: %(queue)s, project: %(project)s', {'message': message_id, 'queue': queue_name, 'project': project_id}) try: self.message_controller.delete( queue_name, message_id=message_id, project=project_id, claim=req.get_param('claim_id')) except storage_errors.NotPermitted as ex: LOG.exception(ex) title = _(u'Unable to delete') description = _(u'This message is claimed; it cannot be ' u'deleted without a valid claim_id.') raise falcon.HTTPForbidden(title, description) except Exception as ex: LOG.exception(ex) description = _(u'Message could not be deleted.') raise wsgi_errors.HTTPServiceUnavailable(description) # Alles guete resp.status = falcon.HTTP_204
rackerlabs/marconi
marconi/queues/transport/wsgi/v1_1/messages.py
Python
apache-2.0
10,604
package com.example.http4s.blaze.demo import cats.effect.Sync import fs2.Stream trait StreamUtils[F[_]] { def evalF[A](thunk: => A)(implicit F: Sync[F]): Stream[F, A] = Stream.eval(F.delay(thunk)) def putStrLn(value: String)(implicit F: Sync[F]): Stream[F, Unit] = evalF(println(value)) def putStr(value: String)(implicit F: Sync[F]): Stream[F, Unit] = evalF(print(value)) def env(name: String)(implicit F: Sync[F]): Stream[F, Option[String]] = evalF(sys.env.get(name)) def error(msg: String)(implicit F: Sync[F]): Stream[F, String] = Stream.raiseError(new Exception(msg)).covary[F] } object StreamUtils { implicit def syncInstance[F[_]: Sync]: StreamUtils[F] = new StreamUtils[F] {} }
aeons/http4s
examples/blaze/src/main/scala/com/example/http4s/blaze/demo/StreamUtils.scala
Scala
apache-2.0
705
from __future__ import absolute_import, unicode_literals from eventlet import spawn_n, monkey_patch, Timeout from eventlet.queue import LightQueue from eventlet.event import Event monkey_patch() class Receipt(object): result = None def __init__(self, callback=None): self.callback = callback self.ready = Event() def finished(self, result): self.result = result if self.callback: self.callback(result) self.ready.send() def wait(self, timeout=None): with Timeout(timeout): return self.ready.wait() class ProducerPool(object): """Usage:: >>> app = Celery(broker='amqp://') >>> ProducerPool(app) """ Receipt = Receipt def __init__(self, app, size=20): self.app = app self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [ spawn_n(self._producer) for _ in range(self.size) ] def _producer(self): inqueue = self.inqueue with self.app.producer_or_acquire() as producer: while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async(args, kwargs, producer=producer, **options) receipt.finished(result)
kawamon/hue
desktop/core/ext-py/celery-4.2.1/examples/eventlet/bulk_task_producer.py
Python
apache-2.0
1,746
from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from django.contrib.auth import views as auth_views from django.contrib.staticfiles import views from refreshtoken.views import delegate_jwt_token from rest_framework_jwt.views import obtain_jwt_token, verify_jwt_token from apiv2.views import generate_basic_auth_token from bestiary.autocomplete import * from data_log.autocomplete import SummonItemAutocomplete from herders import views as herder_views from herders.autocomplete import * from herders.forms import CrispyAuthenticationForm, CrispyPasswordChangeForm, CrispyPasswordResetForm, \ CrispySetPasswordForm urlpatterns = [ # AJAX-y stuff first url(r'^autocomplete/', include([ url(r'^bestiary/$', BestiaryAutocomplete.as_view(), name='bestiary-monster-autocomplete'), url(r'^dungeon/$', DungeonAutocomplete.as_view(), name='bestiary-dungeon-autocomplete'), url(r'^item/$', DungeonAutocomplete.as_view(), name='bestiary-game-item-autocomplete'), url(r'#summon_item/$', SummonItemAutocomplete.as_view(), name='summon-game-item-autocomplete'), url(r'^quick-search/$', QuickSearchAutocomplete.as_view(), name='bestiary-quicksearch-autocomplete'), url(r'^monster-tag/$', MonsterTagAutocomplete.as_view(), name='monster-tag-autocomplete'), url(r'^monster-instance/$', MonsterInstanceAutocomplete.as_view(), name='monster-instance-autocomplete'), url(r'^monster-instance-follower/$', MonsterInstanceFollowerAutocomplete.as_view(), name='monster-instance-follower-autocomplete'), ])), url(r'^api/v2/', include('apiv2.urls', namespace='v2')), url(r'^api/', include('api.urls')), url(r'^api(/v\d+)?/auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api(/v\d+)?/auth/generate-basic-token/', generate_basic_auth_token), url(r'^api(/v\d+)?/auth/get-token/', obtain_jwt_token), url(r'^api(/v\d+)?/auth/delegate-token', delegate_jwt_token), url(r'^api(/v\d+)?/auth/verify-token/', verify_jwt_token), # Bestiary url(r'^bestiary/', include('bestiary.urls', namespace='bestiary')), # SWARFARM app url(r'^feedback/', include('feedback.urls', namespace='feedback')), url(r'^data/', include('data_log.urls', namespace='data_log')), url(r'^', include('herders.urls', namespace='herders')), url(r'^', include('news.urls', namespace='news')), # Django auth/admin stuff url(r'^admin/', admin.site.urls), url(r'^login/$', auth_views.LoginView.as_view(form_class=CrispyAuthenticationForm), name='login'), url(r'^logout/$', auth_views.LogoutView.as_view(), {'next_page': 'news:latest_news'}, name='logout'), url(r'^password_change/$', auth_views.PasswordChangeView.as_view( form_class=CrispyPasswordChangeForm, success_url='password_change_done' ), name='password_change'), url(r'^password_change/password_change_done', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'), url(r'^password_reset/$', auth_views.PasswordResetView.as_view(form_class=CrispyPasswordResetForm), name='password_reset'), url(r'^password_reset/done$', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'), url( r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.PasswordResetConfirmView.as_view(form_class=CrispySetPasswordForm), name='password_reset_confirm', ), url( r'^reset/done/$', auth_views.PasswordResetCompleteView.as_view(extra_context={'form': CrispyAuthenticationForm}), name='password_reset_complete' ), url(r'^username_change/$', herder_views.profile.change_username, name="username_change"), url(r'^username_change/done/$', herder_views.profile.change_username_complete, name="username_change_complete"), ] if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^static/(?P<path>.*)$', views.serve), url(r'^__debug__/', include(debug_toolbar.urls)), ]
PeteAndersen/swarfarm
swarfarm/urls.py
Python
apache-2.0
4,099
CC = gcc CXX = g++ CFLAGS = -W -Wall -Wextra -ansi -pedantic -lm -O2 -Wno-unused-function CXXFLAGS = -W -Wall -Wextra -ansi -pedantic -O2 ZOPFLILIB_SRC = src/zopfli/blocksplitter.c src/zopfli/cache.c\ src/zopfli/deflate.c src/zopfli/gzip_container.c\ src/zopfli/hash.c src/zopfli/katajainen.c\ src/zopfli/lz77.c src/zopfli/squeeze.c\ src/zopfli/tree.c src/zopfli/util.c\ src/zopfli/zlib_container.c src/zopfli/zopfli_lib.c ZOPFLILIB_OBJ := $(patsubst src/zopfli/%.c,%.o,$(ZOPFLILIB_SRC)) ZOPFLIBIN_SRC := src/zopfli/zopfli_bin.c LODEPNG_SRC := src/zopflipng/lodepng/lodepng.cpp src/zopflipng/lodepng/lodepng_util.cpp ZOPFLIPNGLIB_SRC := src/zopflipng/zopflipng_lib.cc ZOPFLIPNGBIN_SRC := src/zopflipng/zopflipng_bin.cc .PHONY: zopfli zopflipng # Zopfli binary zopfli: $(CC) $(ZOPFLILIB_SRC) $(ZOPFLIBIN_SRC) $(CFLAGS) -o zopfli # Zopfli shared library libzopfli: $(CC) $(ZOPFLILIB_SRC) $(CFLAGS) -fPIC -c $(CC) $(ZOPFLILIB_OBJ) $(CFLAGS) -shared -Wl,-soname,libzopfli.so.1 -o libzopfli.so.1.0.2 # ZopfliPNG binary zopflipng: $(CC) $(ZOPFLILIB_SRC) $(CFLAGS) -c $(CXX) $(ZOPFLILIB_OBJ) $(LODEPNG_SRC) $(ZOPFLIPNGLIB_SRC) $(ZOPFLIPNGBIN_SRC) $(CFLAGS) -o zopflipng # ZopfliPNG shared library libzopflipng: $(CC) $(ZOPFLILIB_SRC) $(CFLAGS) -fPIC -c $(CXX) $(ZOPFLILIB_OBJ) $(LODEPNG_SRC) $(ZOPFLIPNGLIB_SRC) $(CFLAGS) -fPIC --shared -Wl,-soname,libzopflipng.so.1 -o libzopflipng.so.1.0.2 # Remove all libraries and binaries clean: rm -f zopflipng zopfli $(ZOPFLILIB_OBJ) libzopfli*
ImageOptim/zopfli
Makefile
Makefile
apache-2.0
1,575
using Esri.ArcGISRuntime.Controls; using Esri.ArcGISRuntime.Layers; using Esri.ArcGISRuntime.Tasks.Query; using System; using System.Windows; using System.Windows.Controls; namespace ArcGISRuntime.Samples.Desktop { /// <summary> /// This sample demonstrates performing identify operations. To use the sample, click anywhere in the United States to identify features. The results will be shown in the combo box and list view on the right side of the application. View the data for different results by selecting them from the combo box. In the code-behind, an IdentifyTask is used to perform the identify operation. The tasks IdentifyParameters specify to query the geometry of the map click and to query all the layers in the target map service, which enables returning results from multiple layers. /// </summary> /// <title>Identify</title> /// <category>Tasks</category> /// <subcategory>Query</subcategory> public partial class IdentifySample : UserControl { /// <summary>Construct Identify sample control</summary> public IdentifySample() { InitializeComponent(); } // Identify features at the click point private async void MyMapView_MapViewTapped(object sender, MapViewInputEventArgs e) { try { progress.Visibility = Visibility.Visible; resultsGrid.DataContext = null; GraphicsOverlay graphicsOverlay = MyMapView.GraphicsOverlays["graphicsOverlay"]; graphicsOverlay.Graphics.Clear(); graphicsOverlay.Graphics.Add(new Graphic(e.Location)); // Get current viewpoints extent from the MapView var currentViewpoint = MyMapView.GetCurrentViewpoint(ViewpointType.BoundingGeometry); var viewpointExtent = currentViewpoint.TargetGeometry.Extent; IdentifyParameters identifyParams = new IdentifyParameters(e.Location, viewpointExtent, 2, (int)MyMapView.ActualHeight, (int)MyMapView.ActualWidth) { LayerOption = LayerOption.Visible, SpatialReference = MyMapView.SpatialReference, }; IdentifyTask identifyTask = new IdentifyTask( new Uri("http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Census_USA/MapServer")); var result = await identifyTask.ExecuteAsync(identifyParams); resultsGrid.DataContext = result.Results; if (result != null && result.Results != null && result.Results.Count > 0) titleComboBox.SelectedIndex = 0; } catch (Exception ex) { MessageBox.Show(ex.Message, "Identify Sample"); } finally { progress.Visibility = Visibility.Collapsed; } } } }
sharifulgeo/arcgis-runtime-samples-dotnet
src/Desktop/ArcGISRuntimeSamplesDesktop/Samples/QueryTasks/IdentifySample.xaml.cs
C#
apache-2.0
2,538
package cmd // k8s.io/helm/cmd/helm/install.go // import ( "bytes" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "text/template" "github.com/Masterminds/sprig" "github.com/ghodss/yaml" "github.com/spf13/cobra" "k8s.io/helm/pkg/helm" "k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/timeconv" ) const installDesc = ` This command installs a chart archive. The install argument must be either a relative path to a chart directory or the name of a chart in the current working directory. To override values in a chart, use either the '--values' flag and pass in a file or use the '--set' flag and pass configuration from the command line. $ helm install -f myvalues.yaml redis or $ helm install --set name=prod redis To check the generated manifests of a release without installing the chart, the '--debug' and '--dry-run' flags can be combined. This will still require a round-trip to the Tiller server. If --verify is set, the chart MUST have a provenance file, and the provenenace fall MUST pass all verification steps. ` type installCmd struct { name string namespace string valuesFile string chartPath string dryRun bool disableHooks bool replace bool verify bool keyring string out io.Writer client helm.Interface values *values nameTemplate string } func newInstallCmd(c helm.Interface, out io.Writer) *cobra.Command { inst := &installCmd{ out: out, client: c, values: new(values), } cmd := &cobra.Command{ Use: "install [CHART]", Short: "install a chart archive", Long: installDesc, PersistentPreRunE: setupConnection, RunE: func(cmd *cobra.Command, args []string) error { if err := checkArgsLength(len(args), "chart name"); err != nil { return err } cp, err := locateChartPath(args[0], inst.verify, inst.keyring) if err != nil { return err } inst.chartPath = cp inst.client = ensureHelmClient(inst.client) return inst.run() }, } f := cmd.Flags() f.StringVarP(&inst.valuesFile, "values", "f", "", "specify values in a YAML file") f.StringVarP(&inst.name, "name", "n", "", "the release name. If unspecified, it will autogenerate one for you") // TODO use kubeconfig default f.StringVar(&inst.namespace, "namespace", "default", "the namespace to install the release into") f.BoolVar(&inst.dryRun, "dry-run", false, "simulate an install") f.BoolVar(&inst.disableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&inst.replace, "replace", false, "re-use the given name, even if that name is already used. This is unsafe in production") f.Var(inst.values, "set", "set values on the command line. Separate values with commas: key1=val1,key2=val2") f.StringVar(&inst.nameTemplate, "name-template", "", "specify template used to name the release") f.BoolVar(&inst.verify, "verify", false, "verify the package before installing it") f.StringVar(&inst.keyring, "keyring", defaultKeyring(), "location of public keys used for verification") return cmd } func (i *installCmd) run() error { if flagDebug { fmt.Fprintf(i.out, "Chart path: %s\n", i.chartPath) } rawVals, err := i.vals() if err != nil { return err } // If template is specified, try to run the template. if i.nameTemplate != "" { i.name, err = generateName(i.nameTemplate) if err != nil { return err } // Print the final name so the user knows what the final name of the release is. fmt.Printf("Final name: %s\n", i.name) } res, err := i.client.InstallRelease( i.chartPath, i.namespace, helm.ValueOverrides(rawVals), helm.ReleaseName(i.name), helm.InstallDryRun(i.dryRun), helm.InstallReuseName(i.replace), helm.InstallDisableHooks(i.disableHooks)) if err != nil { return prettyError(err) } rel := res.GetRelease() if rel == nil { return nil } i.printRelease(rel) // If this is a dry run, we can't display status. if i.dryRun { return nil } // Print the status like status command does status, err := i.client.ReleaseStatus(rel.Name) if err != nil { return prettyError(err) } PrintStatus(i.out, status) return nil } func (i *installCmd) vals() ([]byte, error) { var buffer bytes.Buffer // User specified a values file via -f/--values if i.valuesFile != "" { bytes, err := ioutil.ReadFile(i.valuesFile) if err != nil { return []byte{}, err } buffer.Write(bytes) } // User specified value pairs via --set // These override any values in the specified file if len(i.values.pairs) > 0 { bytes, err := i.values.yaml() if err != nil { return []byte{}, err } buffer.Write(bytes) } return buffer.Bytes(), nil } // printRelease prints info about a release if the flagDebug is true. func (i *installCmd) printRelease(rel *release.Release) { if rel == nil { return } // TODO: Switch to text/template like everything else. if flagDebug { fmt.Fprintf(i.out, "NAME: %s\n", rel.Name) fmt.Fprintf(i.out, "NAMESPACE: %s\n", rel.Namespace) fmt.Fprintf(i.out, "INFO: %s %s\n", timeconv.String(rel.Info.LastDeployed), rel.Info.Status) fmt.Fprintf(i.out, "CHART: %s %s\n", rel.Chart.Metadata.Name, rel.Chart.Metadata.Version) fmt.Fprintf(i.out, "MANIFEST: %s\n", rel.Manifest) } else { fmt.Fprintln(i.out, rel.Name) } } // values represents the command-line value pairs type values struct { pairs map[string]interface{} } func (v *values) yaml() ([]byte, error) { return yaml.Marshal(v.pairs) } func (v *values) String() string { out, _ := v.yaml() return string(out) } func (v *values) Type() string { // Added to pflags.Value interface, but not documented there. return "struct" } func (v *values) Set(data string) error { v.pairs = map[string]interface{}{} items := strings.Split(data, ",") for _, item := range items { n, val := splitPair(item) names := strings.Split(n, ".") ln := len(names) current := &v.pairs for i := 0; i < ln; i++ { if i+1 == ln { // We're at the last element. Set it. (*current)[names[i]] = val } else { // if e, ok := (*current)[names[i]]; !ok { m := map[string]interface{}{} (*current)[names[i]] = m current = &m } else if m, ok := e.(map[string]interface{}); ok { current = &m } } } } return nil } func splitPair(item string) (name string, value interface{}) { pair := strings.SplitN(item, "=", 2) if len(pair) == 1 { return pair[0], true } return pair[0], pair[1] } // locateChartPath looks for a chart directory in known places, and returns either the full path or an error. // // This does not ensure that the chart is well-formed; only that the requested filename exists. // // Order of resolution: // - current working directory // - if path is absolute or begins with '.', error out here // - chart repos in $HELM_HOME // // If 'verify' is true, this will attempt to also verify the chart. func locateChartPath(name string, verify bool, keyring string) (string, error) { if fi, err := os.Stat(name); err == nil { abs, err := filepath.Abs(name) if err != nil { return abs, err } if verify { if fi.IsDir() { return "", errors.New("cannot verify a directory") } if err := verifyChart(abs, keyring); err != nil { return "", err } } return abs, nil } if filepath.IsAbs(name) || strings.HasPrefix(name, ".") { return name, fmt.Errorf("path %q not found", name) } crepo := filepath.Join(repositoryDirectory(), name) if _, err := os.Stat(crepo); err == nil { return filepath.Abs(crepo) } // Try fetching the chart from a remote repo into a tmpdir origname := name if filepath.Ext(name) != ".tgz" { name += ".tgz" } if err := downloadChart(name, false, ".", verify, keyring); err == nil { lname, err := filepath.Abs(filepath.Base(name)) if err != nil { return lname, err } fmt.Printf("Fetched %s to %s\n", origname, lname) return lname, nil } return name, fmt.Errorf("file %q not found", origname) } func generateName(nameTemplate string) (string, error) { t, err := template.New("name-template").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate) if err != nil { return "", err } var b bytes.Buffer err = t.Execute(&b, nil) if err != nil { return "", err } return b.String(), nil }
tangfeixiong/go-for-kubernetes
pkg/cmd/helm_cmd_helm_install.go
GO
apache-2.0
8,314
/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_ldm.h" #include "debug.h" #include "zstd_fast.h" /* ZSTD_fillHashTable() */ #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ #define LDM_BUCKET_SIZE_LOG 3 #define LDM_MIN_MATCH_LENGTH 64 #define LDM_HASH_RLOG 7 #define LDM_HASH_CHAR_OFFSET 10 void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams) { params->windowLog = cParams->windowLog; ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; if (cParams->strategy >= ZSTD_btopt) { /* Get out of the way of the optimal parser */ U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength); assert(minMatch >= ZSTD_LDM_MINMATCH_MIN); assert(minMatch <= ZSTD_LDM_MINMATCH_MAX); params->minMatchLength = minMatch; } if (params->hashLog == 0) { params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); assert(params->hashLog <= ZSTD_HASHLOG_MAX); } if (params->hashRateLog == 0) { params->hashRateLog = params->windowLog < params->hashLog ? 0 : params->windowLog - params->hashLog; } params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); } size_t ZSTD_ldm_getTableSize(ldmParams_t params) { size_t const ldmHSize = ((size_t)1) << params.hashLog; size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); return params.enableLdm ? totalSize : 0; } size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) { return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; } /** ZSTD_ldm_getSmallHash() : * numBits should be <= 32 * If numBits==0, returns 0. * @return : the most significant numBits of value. */ static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits) { assert(numBits <= 32); return numBits == 0 ? 0 : (U32)(value >> (64 - numBits)); } /** ZSTD_ldm_getChecksum() : * numBitsToDiscard should be <= 32 * @return : the next most significant 32 bits after numBitsToDiscard */ static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard) { assert(numBitsToDiscard <= 32); return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF; } /** ZSTD_ldm_getTag() ; * Given the hash, returns the most significant numTagBits bits * after (32 + hbits) bits. * * If there are not enough bits remaining, return the last * numTagBits bits. */ static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits) { assert(numTagBits < 32 && hbits <= 32); if (32 - hbits < numTagBits) { return hash & (((U32)1 << numTagBits) - 1); } else { return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1); } } /** ZSTD_ldm_getBucket() : * Returns a pointer to the start of the bucket associated with hash. */ static ldmEntry_t* ZSTD_ldm_getBucket( ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) { return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); } /** ZSTD_ldm_insertEntry() : * Insert the entry with corresponding hash into the hash table */ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, size_t const hash, const ldmEntry_t entry, ldmParams_t const ldmParams) { BYTE* const bucketOffsets = ldmState->bucketOffsets; *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry; bucketOffsets[hash]++; bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1; } /** ZSTD_ldm_makeEntryAndInsertByTag() : * * Gets the small hash, checksum, and tag from the rollingHash. * * If the tag matches (1 << ldmParams.hashRateLog)-1, then * creates an ldmEntry from the offset, and inserts it into the hash table. * * hBits is the length of the small hash, which is the most significant hBits * of rollingHash. The checksum is the next 32 most significant bits, followed * by ldmParams.hashRateLog bits that make up the tag. */ static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, U64 const rollingHash, U32 const hBits, U32 const offset, ldmParams_t const ldmParams) { U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog); U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1; if (tag == tagMask) { U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits); U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); ldmEntry_t entry; entry.offset = offset; entry.checksum = checksum; ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams); } } /** ZSTD_ldm_countBackwardsMatch() : * Returns the number of bytes that match backwards before pIn and pMatch. * * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ static size_t ZSTD_ldm_countBackwardsMatch( const BYTE* pIn, const BYTE* pAnchor, const BYTE* pMatch, const BYTE* pBase) { size_t matchLength = 0; while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) { pIn--; pMatch--; matchLength++; } return matchLength; } /** ZSTD_ldm_fillFastTables() : * * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. * This is similar to ZSTD_loadDictionaryContent. * * The tables for the other strategies are filled within their * block compressors. */ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, void const* end) { const BYTE* const iend = (const BYTE*)end; switch(ms->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; default: assert(0); /* not possible : not a valid strategy id */ } return 0; } /** ZSTD_ldm_fillLdmHashTable() : * * Fills hashTable from (lastHashed + 1) to iend (non-inclusive). * lastHash is the rolling hash that corresponds to lastHashed. * * Returns the rolling hash corresponding to position iend-1. */ static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, U64 lastHash, const BYTE* lastHashed, const BYTE* iend, const BYTE* base, U32 hBits, ldmParams_t const ldmParams) { U64 rollingHash = lastHash; const BYTE* cur = lastHashed + 1; while (cur < iend) { rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1], cur[ldmParams.minMatchLength-1], state->hashPower); ZSTD_ldm_makeEntryAndInsertByTag(state, rollingHash, hBits, (U32)(cur - base), ldmParams); ++cur; } return rollingHash; } void ZSTD_ldm_fillHashTable( ldmState_t* state, const BYTE* ip, const BYTE* iend, ldmParams_t const* params) { DEBUGLOG(5, "ZSTD_ldm_fillHashTable"); if ((size_t)(iend - ip) >= params->minMatchLength) { U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength); ZSTD_ldm_fillLdmHashTable( state, startingHash, ip, iend - params->minMatchLength, state->window.base, params->hashLog - params->bucketSizeLog, *params); } } /** ZSTD_ldm_limitTableUpdate() : * * Sets cctx->nextToUpdate to a position corresponding closer to anchor * if it is far way * (after a long match, only update tables a limited amount). */ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) { U32 const current = (U32)(anchor - ms->window.base); if (current > ms->nextToUpdate + 1024) { ms->nextToUpdate = current - MIN(512, current - ms->nextToUpdate - 1024); } } static size_t ZSTD_ldm_generateSequences_internal( ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, ldmParams_t const* params, void const* src, size_t srcSize) { /* LDM parameters */ int const extDict = ZSTD_window_hasExtDict(ldmState->window); U32 const minMatchLength = params->minMatchLength; U64 const hashPower = ldmState->hashPower; U32 const hBits = params->hashLog - params->bucketSizeLog; U32 const ldmBucketSize = 1U << params->bucketSizeLog; U32 const hashRateLog = params->hashRateLog; U32 const ldmTagMask = (1U << params->hashRateLog) - 1; /* Prefix and extDict parameters */ U32 const dictLimit = ldmState->window.dictLimit; U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; BYTE const* const base = ldmState->window.base; BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL; BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL; BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL; BYTE const* const lowPrefixPtr = base + dictLimit; /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE); /* Input positions */ BYTE const* anchor = istart; BYTE const* ip = istart; /* Rolling hash */ BYTE const* lastHashed = NULL; U64 rollingHash = 0; while (ip <= ilimit) { size_t mLength; U32 const current = (U32)(ip - base); size_t forwardMatchLength = 0, backwardMatchLength = 0; ldmEntry_t* bestEntry = NULL; if (ip != istart) { rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0], lastHashed[minMatchLength], hashPower); } else { rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength); } lastHashed = ip; /* Do not insert and do not look for a match */ if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) { ip++; continue; } /* Get the best entry and compute the match lengths */ { ldmEntry_t* const bucket = ZSTD_ldm_getBucket(ldmState, ZSTD_ldm_getSmallHash(rollingHash, hBits), *params); ldmEntry_t* cur; size_t bestMatchLength = 0; U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { size_t curForwardMatchLength, curBackwardMatchLength, curTotalMatchLength; if (cur->checksum != checksum || cur->offset <= lowestIndex) { continue; } if (extDict) { BYTE const* const curMatchBase = cur->offset < dictLimit ? dictBase : base; BYTE const* const pMatch = curMatchBase + cur->offset; BYTE const* const matchEnd = cur->offset < dictLimit ? dictEnd : iend; BYTE const* const lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; curForwardMatchLength = ZSTD_count_2segments( ip, pMatch, iend, matchEnd, lowPrefixPtr); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, lowMatchPtr); curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; } else { /* !extDict */ BYTE const* const pMatch = base + cur->offset; curForwardMatchLength = ZSTD_count(ip, pMatch, iend); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, lowPrefixPtr); curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; } if (curTotalMatchLength > bestMatchLength) { bestMatchLength = curTotalMatchLength; forwardMatchLength = curForwardMatchLength; backwardMatchLength = curBackwardMatchLength; bestEntry = cur; } } } /* No match found -- continue searching */ if (bestEntry == NULL) { ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, current, *params); ip++; continue; } /* Match found */ mLength = forwardMatchLength + backwardMatchLength; ip -= backwardMatchLength; { /* Store the sequence: * ip = current - backwardMatchLength * The match is at (bestEntry->offset - backwardMatchLength) */ U32 const matchIndex = bestEntry->offset; U32 const offset = current - matchIndex; rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; /* Out of sequence storage */ if (rawSeqStore->size == rawSeqStore->capacity) return ERROR(dstSize_tooSmall); seq->litLength = (U32)(ip - anchor); seq->matchLength = (U32)mLength; seq->offset = offset; rawSeqStore->size++; } /* Insert the current entry into the hash table */ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, (U32)(lastHashed - base), *params); assert(ip + backwardMatchLength == lastHashed); /* Fill the hash table from lastHashed+1 to ip+mLength*/ /* Heuristic: don't need to fill the entire table at end of block */ if (ip + mLength <= ilimit) { rollingHash = ZSTD_ldm_fillLdmHashTable( ldmState, rollingHash, lastHashed, ip + mLength, base, hBits, *params); lastHashed = ip + mLength - 1; } ip += mLength; anchor = ip; } return iend - anchor; } /*! ZSTD_ldm_reduceTable() : * reduce table indexes by `reducerValue` */ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, U32 const reducerValue) { U32 u; for (u = 0; u < size; u++) { if (table[u].offset < reducerValue) table[u].offset = 0; else table[u].offset -= reducerValue; } } size_t ZSTD_ldm_generateSequences( ldmState_t* ldmState, rawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize) { U32 const maxDist = 1U << params->windowLog; BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; size_t const kMaxChunkSize = 1 << 20; size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0); size_t chunk; size_t leftoverSize = 0; assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); /* Check that ZSTD_window_update() has been called for this chunk prior * to passing it to this function. */ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); /* The input could be very large (in zstdmt), so it must be broken up into * chunks to enforce the maximum distance and handle overflow correction. */ assert(sequences->pos <= sequences->size); assert(sequences->size <= sequences->capacity); for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { BYTE const* const chunkStart = istart + chunk * kMaxChunkSize; size_t const remaining = (size_t)(iend - chunkStart); BYTE const *const chunkEnd = (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize; size_t const chunkSize = chunkEnd - chunkStart; size_t newLeftoverSize; size_t const prevSize = sequences->size; assert(chunkStart < iend); /* 1. Perform overflow correction if necessary. */ if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) { U32 const ldmHSize = 1U << params->hashLog; U32 const correction = ZSTD_window_correctOverflow( &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart); ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); /* invalidate dictionaries on overflow correction */ ldmState->loadedDictEnd = 0; } /* 2. We enforce the maximum offset allowed. * * kMaxChunkSize should be small enough that we don't lose too much of * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the * the offset against maxDist directly. * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may * be split into two sequences. This condition holds when using * ZSTD_window_enforceMaxDist(), but if we move to checking offsets * against maxDist directly, we'll have to carefully handle that case. */ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ newLeftoverSize = ZSTD_ldm_generateSequences_internal( ldmState, sequences, params, chunkStart, chunkSize); if (ZSTD_isError(newLeftoverSize)) return newLeftoverSize; /* 4. We add the leftover literals from previous iterations to the first * newly generated sequence, or add the `newLeftoverSize` if none are * generated. */ /* Prepend the leftover literals from the last call */ if (prevSize < sequences->size) { sequences->seq[prevSize].litLength += (U32)leftoverSize; leftoverSize = newLeftoverSize; } else { assert(newLeftoverSize == chunkSize); leftoverSize += chunkSize; } } return 0; } void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; if (srcSize <= seq->litLength) { /* Skip past srcSize literals */ seq->litLength -= (U32)srcSize; return; } srcSize -= seq->litLength; seq->litLength = 0; if (srcSize < seq->matchLength) { /* Skip past the first srcSize of the match */ seq->matchLength -= (U32)srcSize; if (seq->matchLength < minMatch) { /* The match is too short, omit it */ if (rawSeqStore->pos + 1 < rawSeqStore->size) { seq[1].litLength += seq[0].matchLength; } rawSeqStore->pos++; } return; } srcSize -= seq->matchLength; seq->matchLength = 0; rawSeqStore->pos++; } } /** * If the sequence length is longer than remaining then the sequence is split * between this block and the next. * * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0. */ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, U32 const remaining, U32 const minMatch) { rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; assert(sequence.offset > 0); /* Likely: No partial sequence */ if (remaining >= sequence.litLength + sequence.matchLength) { rawSeqStore->pos++; return sequence; } /* Cut the sequence short (offset == 0 ==> rest is literals). */ if (remaining <= sequence.litLength) { sequence.offset = 0; } else if (remaining < sequence.litLength + sequence.matchLength) { sequence.matchLength = remaining - sequence.litLength; if (sequence.matchLength < minMatch) { sequence.offset = 0; } } /* Skip past `remaining` bytes for the future sequences. */ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); return sequence; } size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; unsigned const minMatch = cParams->minMatch; ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms)); /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; /* Input positions */ BYTE const* ip = istart; DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); assert(rawSeqStore->pos <= rawSeqStore->size); assert(rawSeqStore->size <= rawSeqStore->capacity); /* Loop through each sequence and apply the block compressor to the lits */ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { /* maybeSplitSequence updates rawSeqStore->pos */ rawSeq const sequence = maybeSplitSequence(rawSeqStore, (U32)(iend - ip), minMatch); int i; /* End signal */ if (sequence.offset == 0) break; assert(ip + sequence.litLength + sequence.matchLength <= iend); /* Fill tables for block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Run the block compressor */ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); { size_t const newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); ip += sequence.litLength; /* Update the repcodes */ for (i = ZSTD_REP_NUM - 1; i > 0; i--) rep[i] = rep[i-1]; rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, sequence.offset + ZSTD_REP_MOVE, sequence.matchLength - MINMATCH); ip += sequence.matchLength; } } /* Fill the tables for the block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Compress the last literals */ return blockCompressor(ms, seqStore, rep, ip, iend - ip); }
moisseev/rspamd
contrib/zstd/zstd_ldm.c
C
apache-2.0
24,942
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd"> <helpset version="2.0" xml:lang="el-GR"> <title>TLS Debug | ZAP Extension</title> <maps> <homeID>top</homeID> <mapref location="map.jhm"/> </maps> <view> <name>TOC</name> <label>Contents</label> <type>org.zaproxy.zap.extension.help.ZapTocView</type> <data>toc.xml</data> </view> <view> <name>Index</name> <label>Index</label> <type>javax.help.IndexView</type> <data>index.xml</data> </view> <view> <name>Search</name> <label>Search</label> <type>javax.help.SearchView</type> <data engine="com.sun.java.help.search.DefaultSearchEngine"> JavaHelpSearch </data> </view> <view> <name>Favorites</name> <label>Favorites</label> <type>javax.help.FavoritesView</type> </view> </helpset>
veggiespam/zap-extensions
addOns/tlsdebug/src/main/javahelp/org/zaproxy/zap/extension/tlsdebug/resources/help_el_GR/helpset_el_GR.hs
Haskell
apache-2.0
971
"""The MusicCast integration.""" from __future__ import annotations from datetime import timedelta import logging from aiomusiccast import MusicCastConnectionException from aiomusiccast.capabilities import Capability from aiomusiccast.musiccast_device import MusicCastData, MusicCastDevice from homeassistant.components import ssdp from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_HOST, Platform from homeassistant.core import HomeAssistant from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC, format_mac from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, DataUpdateCoordinator, UpdateFailed, ) from .const import ( BRAND, CONF_SERIAL, CONF_UPNP_DESC, DEFAULT_ZONE, DOMAIN, ENTITY_CATEGORY_MAPPING, ) PLATFORMS = [Platform.MEDIA_PLAYER, Platform.NUMBER, Platform.SELECT] _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=60) async def get_upnp_desc(hass: HomeAssistant, host: str): """Get the upnp description URL for a given host, using the SSPD scanner.""" ssdp_entries = await ssdp.async_get_discovery_info_by_st(hass, "upnp:rootdevice") matches = [w for w in ssdp_entries if w.ssdp_headers.get("_host", "") == host] upnp_desc = None for match in matches: if upnp_desc := match.ssdp_location: break if not upnp_desc: _LOGGER.warning( "The upnp_description was not found automatically, setting a default one" ) upnp_desc = f"http://{host}:49154/MediaRenderer/desc.xml" return upnp_desc async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up MusicCast from a config entry.""" if entry.data.get(CONF_UPNP_DESC) is None: hass.config_entries.async_update_entry( entry, data={ CONF_HOST: entry.data[CONF_HOST], CONF_SERIAL: entry.data["serial"], CONF_UPNP_DESC: await get_upnp_desc(hass, entry.data[CONF_HOST]), }, ) client = MusicCastDevice( entry.data[CONF_HOST], async_get_clientsession(hass), entry.data[CONF_UPNP_DESC], ) coordinator = MusicCastDataUpdateCoordinator(hass, client=client) await coordinator.async_config_entry_first_refresh() coordinator.musiccast.build_capabilities() hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = coordinator await coordinator.musiccast.device.enable_polling() hass.config_entries.async_setup_platforms(entry, PLATFORMS) entry.async_on_unload(entry.add_update_listener(async_reload_entry)) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) if unload_ok: hass.data[DOMAIN][entry.entry_id].musiccast.device.disable_polling() hass.data[DOMAIN].pop(entry.entry_id) return unload_ok async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None: """Reload config entry.""" await hass.config_entries.async_reload(entry.entry_id) class MusicCastDataUpdateCoordinator(DataUpdateCoordinator[MusicCastData]): """Class to manage fetching data from the API.""" def __init__(self, hass: HomeAssistant, client: MusicCastDevice) -> None: """Initialize.""" self.musiccast = client super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL) self.entities: list[MusicCastDeviceEntity] = [] async def _async_update_data(self) -> MusicCastData: """Update data via library.""" try: await self.musiccast.fetch() except MusicCastConnectionException as exception: raise UpdateFailed() from exception return self.musiccast.data class MusicCastEntity(CoordinatorEntity): """Defines a base MusicCast entity.""" coordinator: MusicCastDataUpdateCoordinator def __init__( self, *, name: str, icon: str, coordinator: MusicCastDataUpdateCoordinator, enabled_default: bool = True, ) -> None: """Initialize the MusicCast entity.""" super().__init__(coordinator) self._enabled_default = enabled_default self._icon = icon self._name = name @property def name(self) -> str: """Return the name of the entity.""" return self._name @property def icon(self) -> str: """Return the mdi icon of the entity.""" return self._icon @property def entity_registry_enabled_default(self) -> bool: """Return if the entity should be enabled when first added to the entity registry.""" return self._enabled_default class MusicCastDeviceEntity(MusicCastEntity): """Defines a MusicCast device entity.""" _zone_id: str = DEFAULT_ZONE @property def device_id(self): """Return the ID of the current device.""" if self._zone_id == DEFAULT_ZONE: return self.coordinator.data.device_id return f"{self.coordinator.data.device_id}_{self._zone_id}" @property def device_name(self): """Return the name of the current device.""" return self.coordinator.data.zones[self._zone_id].name @property def device_info(self) -> DeviceInfo: """Return device information about this MusicCast device.""" device_info = DeviceInfo( name=self.device_name, identifiers={ ( DOMAIN, self.device_id, ) }, manufacturer=BRAND, model=self.coordinator.data.model_name, sw_version=self.coordinator.data.system_version, ) if self._zone_id == DEFAULT_ZONE: device_info["connections"] = { (CONNECTION_NETWORK_MAC, format_mac(mac)) for mac in self.coordinator.data.mac_addresses.values() } else: device_info["via_device"] = (DOMAIN, self.coordinator.data.device_id) return device_info class MusicCastCapabilityEntity(MusicCastDeviceEntity): """Base Entity type for all capabilities.""" def __init__( self, coordinator: MusicCastDataUpdateCoordinator, capability: Capability, zone_id: str = None, ) -> None: """Initialize a capability based entity.""" if zone_id is not None: self._zone_id = zone_id self.capability = capability super().__init__(name=capability.name, icon="", coordinator=coordinator) self._attr_entity_category = ENTITY_CATEGORY_MAPPING.get(capability.entity_type) async def async_added_to_hass(self): """Run when this Entity has been added to HA.""" await super().async_added_to_hass() # All capability based entities should register callbacks to update HA when their state changes self.coordinator.musiccast.register_callback(self.async_write_ha_state) async def async_will_remove_from_hass(self): """Entity being removed from hass.""" await super().async_added_to_hass() self.coordinator.musiccast.remove_callback(self.async_write_ha_state) @property def unique_id(self) -> str: """Return the unique ID for this entity.""" return f"{self.device_id}_{self.capability.id}"
mezz64/home-assistant
homeassistant/components/yamaha_musiccast/__init__.py
Python
apache-2.0
7,689
#!/bin/sh ## This script takes count of the instance and the name of the cloud. ## It only generates and writes /etc/weave.env, and doesn't run anything. count=$1 crypt=$2 shift 2 cat << ENVIRON | sudo tee /etc/weave.env WEAVE_PEERS="${@}" WEAVE_PASSWORD="${crypt}" WEAVEDNS_ADDR="10.10.2.1${count}/16" ENVIRON
errordeveloper/weave-demos
terraform-example/genenv-aws-only.sh
Shell
apache-2.0
312
/* * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. */ /* * Copyright 2001-2004 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sun.org.apache.xerces.internal.impl.xs.opti; import java.io.IOException; import java.util.Locale; import com.sun.org.apache.xerces.internal.impl.Constants; import com.sun.org.apache.xerces.internal.impl.XML11DTDScannerImpl; import com.sun.org.apache.xerces.internal.impl.XML11NSDocumentScannerImpl; import com.sun.org.apache.xerces.internal.impl.XMLDTDScannerImpl; import com.sun.org.apache.xerces.internal.impl.XMLEntityHandler; import com.sun.org.apache.xerces.internal.impl.XMLEntityManager; import com.sun.org.apache.xerces.internal.impl.XMLErrorReporter; import com.sun.org.apache.xerces.internal.impl.XMLNSDocumentScannerImpl; import com.sun.org.apache.xerces.internal.impl.XMLVersionDetector; import com.sun.org.apache.xerces.internal.impl.dv.DTDDVFactory; import com.sun.org.apache.xerces.internal.impl.msg.XMLMessageFormatter; import com.sun.org.apache.xerces.internal.impl.validation.ValidationManager; import com.sun.org.apache.xerces.internal.impl.xs.XSMessageFormatter; import com.sun.org.apache.xerces.internal.parsers.BasicParserConfiguration; import com.sun.org.apache.xerces.internal.util.FeatureState; import com.sun.org.apache.xerces.internal.util.PropertyState; import com.sun.org.apache.xerces.internal.util.Status; import com.sun.org.apache.xerces.internal.util.SymbolTable; import com.sun.org.apache.xerces.internal.xni.XMLLocator; import com.sun.org.apache.xerces.internal.xni.XNIException; import com.sun.org.apache.xerces.internal.xni.grammars.XMLGrammarPool; import com.sun.org.apache.xerces.internal.xni.parser.XMLComponent; import com.sun.org.apache.xerces.internal.xni.parser.XMLComponentManager; import com.sun.org.apache.xerces.internal.xni.parser.XMLConfigurationException; import com.sun.org.apache.xerces.internal.xni.parser.XMLDTDScanner; import com.sun.org.apache.xerces.internal.xni.parser.XMLDocumentScanner; import com.sun.org.apache.xerces.internal.xni.parser.XMLInputSource; import com.sun.org.apache.xerces.internal.xni.parser.XMLPullParserConfiguration; /** * @xerces.internal * * @author Rahul Srivastava, Sun Microsystems Inc. * * @version $Id: SchemaParsingConfig.java,v 1.8 2010-11-01 04:40:01 joehw Exp $ */ public class SchemaParsingConfig extends BasicParserConfiguration implements XMLPullParserConfiguration { // // Constants // protected final static String XML11_DATATYPE_VALIDATOR_FACTORY = "com.sun.org.apache.xerces.internal.impl.dv.dtd.XML11DTDDVFactoryImpl"; // feature identifiers /** Feature identifier: warn on duplicate attribute definition. */ protected static final String WARN_ON_DUPLICATE_ATTDEF = Constants.XERCES_FEATURE_PREFIX + Constants.WARN_ON_DUPLICATE_ATTDEF_FEATURE; /** Feature identifier: warn on duplicate entity definition. */ // protected static final String WARN_ON_DUPLICATE_ENTITYDEF = Constants.XERCES_FEATURE_PREFIX + Constants.WARN_ON_DUPLICATE_ENTITYDEF_FEATURE; /** Feature identifier: warn on undeclared element definition. */ protected static final String WARN_ON_UNDECLARED_ELEMDEF = Constants.XERCES_FEATURE_PREFIX + Constants.WARN_ON_UNDECLARED_ELEMDEF_FEATURE; /** Feature identifier: allow Java encodings. */ protected static final String ALLOW_JAVA_ENCODINGS = Constants.XERCES_FEATURE_PREFIX + Constants.ALLOW_JAVA_ENCODINGS_FEATURE; /** Feature identifier: continue after fatal error. */ protected static final String CONTINUE_AFTER_FATAL_ERROR = Constants.XERCES_FEATURE_PREFIX + Constants.CONTINUE_AFTER_FATAL_ERROR_FEATURE; /** Feature identifier: load external DTD. */ protected static final String LOAD_EXTERNAL_DTD = Constants.XERCES_FEATURE_PREFIX + Constants.LOAD_EXTERNAL_DTD_FEATURE; /** Feature identifier: notify built-in refereces. */ protected static final String NOTIFY_BUILTIN_REFS = Constants.XERCES_FEATURE_PREFIX + Constants.NOTIFY_BUILTIN_REFS_FEATURE; /** Feature identifier: notify character refereces. */ protected static final String NOTIFY_CHAR_REFS = Constants.XERCES_FEATURE_PREFIX + Constants.NOTIFY_CHAR_REFS_FEATURE; /** Feature identifier: expose schema normalized value */ protected static final String NORMALIZE_DATA = Constants.XERCES_FEATURE_PREFIX + Constants.SCHEMA_NORMALIZED_VALUE; /** Feature identifier: send element default value via characters() */ protected static final String SCHEMA_ELEMENT_DEFAULT = Constants.XERCES_FEATURE_PREFIX + Constants.SCHEMA_ELEMENT_DEFAULT; /** Feature identifier: generate synthetic annotations. */ protected static final String GENERATE_SYNTHETIC_ANNOTATIONS = Constants.XERCES_FEATURE_PREFIX + Constants.GENERATE_SYNTHETIC_ANNOTATIONS_FEATURE; // property identifiers /** Property identifier: error reporter. */ protected static final String ERROR_REPORTER = Constants.XERCES_PROPERTY_PREFIX + Constants.ERROR_REPORTER_PROPERTY; /** Property identifier: entity manager. */ protected static final String ENTITY_MANAGER = Constants.XERCES_PROPERTY_PREFIX + Constants.ENTITY_MANAGER_PROPERTY; /** Property identifier document scanner: */ protected static final String DOCUMENT_SCANNER = Constants.XERCES_PROPERTY_PREFIX + Constants.DOCUMENT_SCANNER_PROPERTY; /** Property identifier: DTD scanner. */ protected static final String DTD_SCANNER = Constants.XERCES_PROPERTY_PREFIX + Constants.DTD_SCANNER_PROPERTY; /** Property identifier: grammar pool. */ protected static final String XMLGRAMMAR_POOL = Constants.XERCES_PROPERTY_PREFIX + Constants.XMLGRAMMAR_POOL_PROPERTY; /** Property identifier: DTD validator. */ protected static final String DTD_VALIDATOR = Constants.XERCES_PROPERTY_PREFIX + Constants.DTD_VALIDATOR_PROPERTY; /** Property identifier: namespace binder. */ protected static final String NAMESPACE_BINDER = Constants.XERCES_PROPERTY_PREFIX + Constants.NAMESPACE_BINDER_PROPERTY; /** Property identifier: datatype validator factory. */ protected static final String DATATYPE_VALIDATOR_FACTORY = Constants.XERCES_PROPERTY_PREFIX + Constants.DATATYPE_VALIDATOR_FACTORY_PROPERTY; protected static final String VALIDATION_MANAGER = Constants.XERCES_PROPERTY_PREFIX + Constants.VALIDATION_MANAGER_PROPERTY; /** Property identifier: XML Schema validator. */ protected static final String SCHEMA_VALIDATOR = Constants.XERCES_PROPERTY_PREFIX + Constants.SCHEMA_VALIDATOR_PROPERTY; /** Property identifier: locale. */ protected static final String LOCALE = Constants.XERCES_PROPERTY_PREFIX + Constants.LOCALE_PROPERTY; // debugging /** Set to true and recompile to print exception stack trace. */ private static final boolean PRINT_EXCEPTION_STACK_TRACE = false; // // Data // // // XML 1.0 components // /** The XML 1.0 Datatype validator factory. */ protected final DTDDVFactory fDatatypeValidatorFactory; /** The XML 1.0 Document scanner. */ protected final XMLNSDocumentScannerImpl fNamespaceScanner; /** The XML 1.0 DTD scanner. */ protected final XMLDTDScannerImpl fDTDScanner; // // XML 1.1 components // /** The XML 1.1 Datatype validator factory. */ protected DTDDVFactory fXML11DatatypeFactory = null; /** The XML 1.1 Document scanner. */ protected XML11NSDocumentScannerImpl fXML11NSDocScanner = null; /** The XML 1.1 DTD scanner. **/ protected XML11DTDScannerImpl fXML11DTDScanner = null; // common components (non-configurable) /** Current Datatype validator factory. */ protected DTDDVFactory fCurrentDVFactory; /** Current scanner */ protected XMLDocumentScanner fCurrentScanner; /** Current DTD scanner. */ protected XMLDTDScanner fCurrentDTDScanner; /** Grammar pool. */ protected XMLGrammarPool fGrammarPool; /** XML version detector. */ protected final XMLVersionDetector fVersionDetector; // common components (configurable) /** Error reporter. */ protected final XMLErrorReporter fErrorReporter; /** Entity manager. */ protected final XMLEntityManager fEntityManager; /** Input Source */ protected XMLInputSource fInputSource; protected final ValidationManager fValidationManager; // state /** Locator */ protected XMLLocator fLocator; /** * True if a parse is in progress. This state is needed because * some features/properties cannot be set while parsing (e.g. * validation and namespaces). */ protected boolean fParseInProgress = false; /** * fConfigUpdated is set to true if there has been any change to the configuration settings, * i.e a feature or a property was changed. */ protected boolean fConfigUpdated = false; /** Flag indiciating whether XML11 components have been initialized. */ private boolean f11Initialized = false; // // Constructors // /** Default constructor. */ public SchemaParsingConfig() { this(null, null, null); } // <init>() /** * Constructs a parser configuration using the specified symbol table. * * @param symbolTable The symbol table to use. */ public SchemaParsingConfig(SymbolTable symbolTable) { this(symbolTable, null, null); } // <init>(SymbolTable) /** * Constructs a parser configuration using the specified symbol table and * grammar pool. * <p> * <strong>REVISIT:</strong> * Grammar pool will be updated when the new validation engine is * implemented. * * @param symbolTable The symbol table to use. * @param grammarPool The grammar pool to use. */ public SchemaParsingConfig(SymbolTable symbolTable, XMLGrammarPool grammarPool) { this(symbolTable, grammarPool, null); } // <init>(SymbolTable,XMLGrammarPool) /** * Constructs a parser configuration using the specified symbol table, * grammar pool, and parent settings. * <p> * <strong>REVISIT:</strong> * Grammar pool will be updated when the new validation engine is * implemented. * * @param symbolTable The symbol table to use. * @param grammarPool The grammar pool to use. * @param parentSettings The parent settings. */ public SchemaParsingConfig(SymbolTable symbolTable, XMLGrammarPool grammarPool, XMLComponentManager parentSettings) { super(symbolTable, parentSettings); // add default recognized features final String[] recognizedFeatures = { PARSER_SETTINGS, WARN_ON_DUPLICATE_ATTDEF, WARN_ON_UNDECLARED_ELEMDEF, ALLOW_JAVA_ENCODINGS, CONTINUE_AFTER_FATAL_ERROR, LOAD_EXTERNAL_DTD, NOTIFY_BUILTIN_REFS, NOTIFY_CHAR_REFS, GENERATE_SYNTHETIC_ANNOTATIONS }; addRecognizedFeatures(recognizedFeatures); fFeatures.put(PARSER_SETTINGS, Boolean.TRUE); // set state for default features fFeatures.put(WARN_ON_DUPLICATE_ATTDEF, Boolean.FALSE); //setFeature(WARN_ON_DUPLICATE_ENTITYDEF, false); fFeatures.put(WARN_ON_UNDECLARED_ELEMDEF, Boolean.FALSE); fFeatures.put(ALLOW_JAVA_ENCODINGS, Boolean.FALSE); fFeatures.put(CONTINUE_AFTER_FATAL_ERROR, Boolean.FALSE); fFeatures.put(LOAD_EXTERNAL_DTD, Boolean.TRUE); fFeatures.put(NOTIFY_BUILTIN_REFS, Boolean.FALSE); fFeatures.put(NOTIFY_CHAR_REFS, Boolean.FALSE); fFeatures.put(GENERATE_SYNTHETIC_ANNOTATIONS, Boolean.FALSE); // add default recognized properties final String[] recognizedProperties = { ERROR_REPORTER, ENTITY_MANAGER, DOCUMENT_SCANNER, DTD_SCANNER, DTD_VALIDATOR, NAMESPACE_BINDER, XMLGRAMMAR_POOL, DATATYPE_VALIDATOR_FACTORY, VALIDATION_MANAGER, GENERATE_SYNTHETIC_ANNOTATIONS, LOCALE }; addRecognizedProperties(recognizedProperties); fGrammarPool = grammarPool; if (fGrammarPool != null) { setProperty(XMLGRAMMAR_POOL, fGrammarPool); } fEntityManager = new XMLEntityManager(); fProperties.put(ENTITY_MANAGER, fEntityManager); addComponent(fEntityManager); fErrorReporter = new XMLErrorReporter(); fErrorReporter.setDocumentLocator(fEntityManager.getEntityScanner()); fProperties.put(ERROR_REPORTER, fErrorReporter); addComponent(fErrorReporter); fNamespaceScanner = new XMLNSDocumentScannerImpl(); fProperties.put(DOCUMENT_SCANNER, fNamespaceScanner); addRecognizedParamsAndSetDefaults(fNamespaceScanner); fDTDScanner = new XMLDTDScannerImpl(); fProperties.put(DTD_SCANNER, fDTDScanner); addRecognizedParamsAndSetDefaults(fDTDScanner); fDatatypeValidatorFactory = DTDDVFactory.getInstance(); fProperties.put(DATATYPE_VALIDATOR_FACTORY, fDatatypeValidatorFactory); fValidationManager = new ValidationManager(); fProperties.put(VALIDATION_MANAGER, fValidationManager); fVersionDetector = new XMLVersionDetector(); // add message formatters if (fErrorReporter.getMessageFormatter(XMLMessageFormatter.XML_DOMAIN) == null) { XMLMessageFormatter xmft = new XMLMessageFormatter(); fErrorReporter.putMessageFormatter(XMLMessageFormatter.XML_DOMAIN, xmft); fErrorReporter.putMessageFormatter(XMLMessageFormatter.XMLNS_DOMAIN, xmft); } if (fErrorReporter.getMessageFormatter(XSMessageFormatter.SCHEMA_DOMAIN) == null) { XSMessageFormatter xmft = new XSMessageFormatter(); fErrorReporter.putMessageFormatter(XSMessageFormatter.SCHEMA_DOMAIN, xmft); } // set locale try { setLocale(Locale.getDefault()); } catch (XNIException e) { // do nothing // REVISIT: What is the right thing to do? -Ac } } // <init>(SymbolTable,XMLGrammarPool) // // Public methods // /** * Returns the state of a feature. * * @param featureId The feature identifier. * @return true if the feature is supported * * @throws XMLConfigurationException Thrown for configuration error. * In general, components should * only throw this exception if * it is <strong>really</strong> * a critical error. */ public FeatureState getFeatureState(String featureId) throws XMLConfigurationException { // make this feature special if (featureId.equals(PARSER_SETTINGS)) { return FeatureState.is(fConfigUpdated); } return super.getFeatureState(featureId); } // getFeature(String):boolean /** * Set the state of a feature. * * Set the state of any feature in a SAX2 parser. The parser * might not recognize the feature, and if it does recognize * it, it might not be able to fulfill the request. * * @param featureId The unique identifier (URI) of the feature. * @param state The requested state of the feature (true or false). * * @exception XMLConfigurationException If the * requested feature is not known. */ public void setFeature(String featureId, boolean state) throws XMLConfigurationException { fConfigUpdated = true; // forward to every XML 1.0 component fNamespaceScanner.setFeature(featureId, state); fDTDScanner.setFeature(featureId, state); // forward to every XML 1.1 component if (f11Initialized) { try { fXML11DTDScanner.setFeature(featureId, state); } // ignore the exception. catch (Exception e) {} try { fXML11NSDocScanner.setFeature(featureId, state); } // ignore the exception catch (Exception e) {} } // save state if noone "objects" super.setFeature(featureId, state); } // setFeature(String,boolean) /** * Returns the value of a property. * * @param propertyId The property identifier. * @return the value of the property * * @throws XMLConfigurationException Thrown for configuration error. * In general, components should * only throw this exception if * it is <strong>really</strong> * a critical error. */ public PropertyState getPropertyState(String propertyId) throws XMLConfigurationException { if (LOCALE.equals(propertyId)) { return PropertyState.is(getLocale()); } return super.getPropertyState(propertyId); } /** * setProperty * * @param propertyId * @param value */ public void setProperty(String propertyId, Object value) throws XMLConfigurationException { fConfigUpdated = true; if (LOCALE.equals(propertyId)) { setLocale((Locale) value); } // forward to every XML 1.0 component fNamespaceScanner.setProperty(propertyId, value); fDTDScanner.setProperty(propertyId, value); // forward to every XML 1.1 component if (f11Initialized) { try { fXML11DTDScanner.setProperty(propertyId, value); } // ignore the exception. catch (Exception e) {} try { fXML11NSDocScanner.setProperty(propertyId, value); } // ignore the exception catch (Exception e) {} } // store value if noone "objects" super.setProperty(propertyId, value); } // setProperty(String,Object) /** * Set the locale to use for messages. * * @param locale The locale object to use for localization of messages. * * @exception XNIException Thrown if the parser does not support the * specified locale. */ public void setLocale(Locale locale) throws XNIException { super.setLocale(locale); fErrorReporter.setLocale(locale); } // setLocale(Locale) // // XMLPullParserConfiguration methods // // parsing /** * Sets the input source for the document to parse. * * @param inputSource The document's input source. * * @exception XMLConfigurationException Thrown if there is a * configuration error when initializing the * parser. * @exception IOException Thrown on I/O error. * * @see #parse(boolean) */ public void setInputSource(XMLInputSource inputSource) throws XMLConfigurationException, IOException { // REVISIT: this method used to reset all the components and // construct the pipeline. Now reset() is called // in parse (boolean) just before we parse the document // Should this method still throw exceptions..? fInputSource = inputSource; } // setInputSource(XMLInputSource) /** * Parses the document in a pull parsing fashion. * * @param complete True if the pull parser should parse the * remaining document completely. * * @return True if there is more document to parse. * * @exception XNIException Any XNI exception, possibly wrapping * another exception. * @exception IOException An IO exception from the parser, possibly * from a byte stream or character stream * supplied by the parser. * * @see #setInputSource */ public boolean parse(boolean complete) throws XNIException, IOException { // // reset and configure pipeline and set InputSource. if (fInputSource != null) { try { fValidationManager.reset(); fVersionDetector.reset(this); reset(); short version = fVersionDetector.determineDocVersion(fInputSource); // XML 1.0 if (version == Constants.XML_VERSION_1_0) { configurePipeline(); resetXML10(); } // XML 1.1 else if (version == Constants.XML_VERSION_1_1) { initXML11Components(); configureXML11Pipeline(); resetXML11(); } // Unrecoverable error reported during version detection else { return false; } // mark configuration as fixed fConfigUpdated = false; // resets and sets the pipeline. fVersionDetector.startDocumentParsing((XMLEntityHandler) fCurrentScanner, version); fInputSource = null; } catch (XNIException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (IOException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (RuntimeException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (Exception ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw new XNIException(ex); } } try { return fCurrentScanner.scanDocument(complete); } catch (XNIException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (IOException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (RuntimeException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (Exception ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw new XNIException(ex); } } // parse(boolean):boolean /** * If the application decides to terminate parsing before the xml document * is fully parsed, the application should call this method to free any * resource allocated during parsing. For example, close all opened streams. */ public void cleanup() { fEntityManager.closeReaders(); } // // XMLParserConfiguration methods // /** * Parses the specified input source. * * @param source The input source. * * @exception XNIException Throws exception on XNI error. * @exception IOException Throws exception on i/o error. */ public void parse(XMLInputSource source) throws XNIException, IOException { if (fParseInProgress) { // REVISIT - need to add new error message throw new XNIException("FWK005 parse may not be called while parsing."); } fParseInProgress = true; try { setInputSource(source); parse(true); } catch (XNIException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (IOException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (RuntimeException ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw ex; } catch (Exception ex) { if (PRINT_EXCEPTION_STACK_TRACE) ex.printStackTrace(); throw new XNIException(ex); } finally { fParseInProgress = false; // close all streams opened by xerces this.cleanup(); } } // parse(InputSource) // // Protected methods // /** * Reset all components before parsing. * * @throws XNIException Thrown if an error occurs during initialization. */ public void reset() throws XNIException { // initialize the common components super.reset(); } // reset() /** Configures the XML 1.0 pipeline. */ protected void configurePipeline() { if (fCurrentDVFactory != fDatatypeValidatorFactory) { fCurrentDVFactory = fDatatypeValidatorFactory; // use XML 1.0 datatype library setProperty(DATATYPE_VALIDATOR_FACTORY, fCurrentDVFactory); } // setup document pipeline if (fCurrentScanner != fNamespaceScanner) { fCurrentScanner = fNamespaceScanner; setProperty(DOCUMENT_SCANNER, fCurrentScanner); } fNamespaceScanner.setDocumentHandler(fDocumentHandler); if (fDocumentHandler != null) { fDocumentHandler.setDocumentSource(fNamespaceScanner); } fLastComponent = fNamespaceScanner; // setup dtd pipeline if (fCurrentDTDScanner != fDTDScanner) { fCurrentDTDScanner = fDTDScanner; setProperty(DTD_SCANNER, fCurrentDTDScanner); } fDTDScanner.setDTDHandler(fDTDHandler); if (fDTDHandler != null) { fDTDHandler.setDTDSource(fDTDScanner); } fDTDScanner.setDTDContentModelHandler(fDTDContentModelHandler); if (fDTDContentModelHandler != null) { fDTDContentModelHandler.setDTDContentModelSource(fDTDScanner); } } // configurePipeline() /** Configures the XML 1.1 pipeline. */ protected void configureXML11Pipeline() { if (fCurrentDVFactory != fXML11DatatypeFactory) { fCurrentDVFactory = fXML11DatatypeFactory; // use XML 1.1 datatype library setProperty(DATATYPE_VALIDATOR_FACTORY, fCurrentDVFactory); } // setup document pipeline if (fCurrentScanner != fXML11NSDocScanner) { fCurrentScanner = fXML11NSDocScanner; setProperty(DOCUMENT_SCANNER, fCurrentScanner); } fXML11NSDocScanner.setDocumentHandler(fDocumentHandler); if (fDocumentHandler != null) { fDocumentHandler.setDocumentSource(fXML11NSDocScanner); } fLastComponent = fXML11NSDocScanner; // setup dtd pipeline if (fCurrentDTDScanner != fXML11DTDScanner) { fCurrentDTDScanner = fXML11DTDScanner; setProperty(DTD_SCANNER, fCurrentDTDScanner); } fXML11DTDScanner.setDTDHandler(fDTDHandler); if (fDTDHandler != null) { fDTDHandler.setDTDSource(fXML11DTDScanner); } fXML11DTDScanner.setDTDContentModelHandler(fDTDContentModelHandler); if (fDTDContentModelHandler != null) { fDTDContentModelHandler.setDTDContentModelSource(fXML11DTDScanner); } } // configureXML11Pipeline() // features and properties /** * Check a feature. If feature is know and supported, this method simply * returns. Otherwise, the appropriate exception is thrown. * * @param featureId The unique identifier (URI) of the feature. * * @throws XMLConfigurationException Thrown for configuration error. * In general, components should * only throw this exception if * it is <strong>really</strong> * a critical error. */ protected FeatureState checkFeature(String featureId) throws XMLConfigurationException { // // Xerces Features // if (featureId.startsWith(Constants.XERCES_FEATURE_PREFIX)) { final int suffixLength = featureId.length() - Constants.XERCES_FEATURE_PREFIX.length(); // // http://apache.org/xml/features/validation/dynamic // Allows the parser to validate a document only when it // contains a grammar. Validation is turned on/off based // on each document instance, automatically. // if (suffixLength == Constants.DYNAMIC_VALIDATION_FEATURE.length() && featureId.endsWith(Constants.DYNAMIC_VALIDATION_FEATURE)) { return FeatureState.RECOGNIZED; } // // http://apache.org/xml/features/validation/default-attribute-values // if (suffixLength == Constants.DEFAULT_ATTRIBUTE_VALUES_FEATURE.length() && featureId.endsWith(Constants.DEFAULT_ATTRIBUTE_VALUES_FEATURE)) { // REVISIT return FeatureState.NOT_SUPPORTED; } // // http://apache.org/xml/features/validation/default-attribute-values // if (suffixLength == Constants.VALIDATE_CONTENT_MODELS_FEATURE.length() && featureId.endsWith(Constants.VALIDATE_CONTENT_MODELS_FEATURE)) { // REVISIT return FeatureState.NOT_SUPPORTED; } // // http://apache.org/xml/features/validation/nonvalidating/load-dtd-grammar // if (suffixLength == Constants.LOAD_DTD_GRAMMAR_FEATURE.length() && featureId.endsWith(Constants.LOAD_DTD_GRAMMAR_FEATURE)) { return FeatureState.RECOGNIZED; } // // http://apache.org/xml/features/validation/nonvalidating/load-external-dtd // if (suffixLength == Constants.LOAD_EXTERNAL_DTD_FEATURE.length() && featureId.endsWith(Constants.LOAD_EXTERNAL_DTD_FEATURE)) { return FeatureState.RECOGNIZED; } // // http://apache.org/xml/features/validation/default-attribute-values // if (suffixLength == Constants.VALIDATE_DATATYPES_FEATURE.length() && featureId.endsWith(Constants.VALIDATE_DATATYPES_FEATURE)) { return FeatureState.NOT_SUPPORTED; } } // // Not recognized // return super.checkFeature(featureId); } // checkFeature(String) /** * Check a property. If the property is know and supported, this method * simply returns. Otherwise, the appropriate exception is thrown. * * @param propertyId The unique identifier (URI) of the property * being set. * * @throws XMLConfigurationException Thrown for configuration error. * In general, components should * only throw this exception if * it is <strong>really</strong> * a critical error. */ protected PropertyState checkProperty(String propertyId) throws XMLConfigurationException { // // Xerces Properties // if (propertyId.startsWith(Constants.XERCES_PROPERTY_PREFIX)) { final int suffixLength = propertyId.length() - Constants.XERCES_PROPERTY_PREFIX.length(); if (suffixLength == Constants.DTD_SCANNER_PROPERTY.length() && propertyId.endsWith(Constants.DTD_SCANNER_PROPERTY)) { return PropertyState.RECOGNIZED; } } if (propertyId.startsWith(Constants.JAXP_PROPERTY_PREFIX)) { final int suffixLength = propertyId.length() - Constants.JAXP_PROPERTY_PREFIX.length(); if (suffixLength == Constants.SCHEMA_SOURCE.length() && propertyId.endsWith(Constants.SCHEMA_SOURCE)) { return PropertyState.RECOGNIZED; } } // // Not recognized // return super.checkProperty(propertyId); } // checkProperty(String) /** * Adds all of the component's recognized features and properties * to the list of default recognized features and properties, and * sets default values on the configuration for features and * properties which were previously absent from the configuration. * * @param component The component whose recognized features * and properties will be added to the configuration */ private void addRecognizedParamsAndSetDefaults(XMLComponent component) { // register component's recognized features String[] recognizedFeatures = component.getRecognizedFeatures(); addRecognizedFeatures(recognizedFeatures); // register component's recognized properties String[] recognizedProperties = component.getRecognizedProperties(); addRecognizedProperties(recognizedProperties); // set default values if (recognizedFeatures != null) { for (int i = 0; i < recognizedFeatures.length; ++i) { String featureId = recognizedFeatures[i]; Boolean state = component.getFeatureDefault(featureId); if (state != null) { // Do not overwrite values already set on the configuration. if (!fFeatures.containsKey(featureId)) { fFeatures.put(featureId, state); // For newly added components who recognize this feature // but did not offer a default value, we need to make // sure these components will get an opportunity to read // the value before parsing begins. fConfigUpdated = true; } } } } if (recognizedProperties != null) { for (int i = 0; i < recognizedProperties.length; ++i) { String propertyId = recognizedProperties[i]; Object value = component.getPropertyDefault(propertyId); if (value != null) { // Do not overwrite values already set on the configuration. if (!fProperties.containsKey(propertyId)) { fProperties.put(propertyId, value); // For newly added components who recognize this property // but did not offer a default value, we need to make // sure these components will get an opportunity to read // the value before parsing begins. fConfigUpdated = true; } } } } } /** * Reset all XML 1.0 components before parsing */ protected final void resetXML10() throws XNIException { // Reset XML 1.0 components fNamespaceScanner.reset(this); fDTDScanner.reset(this); } // resetXML10() /** * Reset all XML 1.1 components before parsing */ protected final void resetXML11() throws XNIException { // Reset XML 1.1 components fXML11NSDocScanner.reset(this); fXML11DTDScanner.reset(this); } // resetXML11() // // other methods // /** */ public void resetNodePool() { // REVISIT: to implement: introduce a node pool to reuse DTM nodes. // reset this pool here. } private void initXML11Components() { if (!f11Initialized) { // create datatype factory fXML11DatatypeFactory = DTDDVFactory.getInstance(XML11_DATATYPE_VALIDATOR_FACTORY); // setup XML 1.1 DTD pipeline fXML11DTDScanner = new XML11DTDScannerImpl(); addRecognizedParamsAndSetDefaults(fXML11DTDScanner); // setup XML 1.1. document pipeline - namespace aware fXML11NSDocScanner = new XML11NSDocumentScannerImpl(); addRecognizedParamsAndSetDefaults(fXML11NSDocScanner); f11Initialized = true; } } }
wangsongpeng/jdk-src
src/main/java/com/sun/org/apache/xerces/internal/impl/xs/opti/SchemaParsingConfig.java
Java
apache-2.0
37,653
<!-- Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <kd-info-card> <kd-info-card-header>[[Details|Header in a detail view]]</kd-info-card-header> <kd-info-card-section> <kd-object-meta-info-card object-meta="::$ctrl.tprDetail.objectMeta"> </kd-object-meta-info-card> <kd-info-card-entry title="[[Labels|Third party resource info details section labels entry.]]"> <kd-labels labels="::$ctrl.tprDetail.objectMeta.labels"></kd-labels> </kd-info-card-entry> <kd-info-card-entry title="[[Description|Third party resource details description entry.]]"> {{::$ctrl.tprDetail.description}} </kd-info-card-entry> <kd-info-card-entry title="[[Versions|Third party resource details version entries.]]"> <span ng-repeat="version in ::$ctrl.tprDetail.versions"> {{version.name}}{{$last ? '' : ', '}} </span> </kd-info-card-entry> </kd-info-card-section> </kd-info-card>
IanLewis/dashboard
src/app/frontend/thirdpartyresource/detail/info.html
HTML
apache-2.0
1,457
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.connect.runtime.distributed; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.runtime.WorkerConfig; import javax.crypto.KeyGenerator; import javax.crypto.Mac; import java.security.InvalidParameterException; import java.security.NoSuchAlgorithmException; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.Range.between; public class DistributedConfig extends WorkerConfig { /* * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS * THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ /** * <code>group.id</code> */ public static final String GROUP_ID_CONFIG = CommonClientConfigs.GROUP_ID_CONFIG; private static final String GROUP_ID_DOC = "A unique string that identifies the Connect cluster group this worker belongs to."; /** * <code>session.timeout.ms</code> */ public static final String SESSION_TIMEOUT_MS_CONFIG = CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG; private static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect worker failures. " + "The worker sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are " + "received by the broker before the expiration of this session timeout, then the broker will remove the " + "worker from the group and initiate a rebalance. Note that the value must be in the allowable range as " + "configured in the broker configuration by <code>group.min.session.timeout.ms</code> " + "and <code>group.max.session.timeout.ms</code>."; /** * <code>heartbeat.interval.ms</code> */ public static final String HEARTBEAT_INTERVAL_MS_CONFIG = CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG; private static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the group " + "coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the " + "worker's session stays active and to facilitate rebalancing when new members join or leave the group. " + "The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher " + "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances."; /** * <code>rebalance.timeout.ms</code> */ public static final String REBALANCE_TIMEOUT_MS_CONFIG = CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG; private static final String REBALANCE_TIMEOUT_MS_DOC = CommonClientConfigs.REBALANCE_TIMEOUT_MS_DOC; /** * <code>worker.sync.timeout.ms</code> */ public static final String WORKER_SYNC_TIMEOUT_MS_CONFIG = "worker.sync.timeout.ms"; private static final String WORKER_SYNC_TIMEOUT_MS_DOC = "When the worker is out of sync with other workers and needs" + " to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and" + " waiting a backoff period before rejoining."; /** * <code>group.unsync.timeout.ms</code> */ public static final String WORKER_UNSYNC_BACKOFF_MS_CONFIG = "worker.unsync.backoff.ms"; private static final String WORKER_UNSYNC_BACKOFF_MS_DOC = "When the worker is out of sync with other workers and " + " fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining."; public static final int WORKER_UNSYNC_BACKOFF_MS_DEFAULT = 5 * 60 * 1000; /** * <code>offset.storage.topic</code> */ public static final String OFFSET_STORAGE_TOPIC_CONFIG = "offset.storage.topic"; private static final String OFFSET_STORAGE_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector offsets are stored"; /** * <code>offset.storage.partitions</code> */ public static final String OFFSET_STORAGE_PARTITIONS_CONFIG = "offset.storage.partitions"; private static final String OFFSET_STORAGE_PARTITIONS_CONFIG_DOC = "The number of partitions used when creating the offset storage topic"; /** * <code>offset.storage.replication.factor</code> */ public static final String OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG = "offset.storage.replication.factor"; private static final String OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the offset storage topic"; /** * <code>config.storage.topic</code> */ public static final String CONFIG_TOPIC_CONFIG = "config.storage.topic"; private static final String CONFIG_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector configurations are stored"; /** * <code>config.storage.replication.factor</code> */ public static final String CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG = "config.storage.replication.factor"; private static final String CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the configuration storage topic"; /** * <code>status.storage.topic</code> */ public static final String STATUS_STORAGE_TOPIC_CONFIG = "status.storage.topic"; public static final String STATUS_STORAGE_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector and task status are stored"; /** * <code>status.storage.partitions</code> */ public static final String STATUS_STORAGE_PARTITIONS_CONFIG = "status.storage.partitions"; private static final String STATUS_STORAGE_PARTITIONS_CONFIG_DOC = "The number of partitions used when creating the status storage topic"; /** * <code>status.storage.replication.factor</code> */ public static final String STATUS_STORAGE_REPLICATION_FACTOR_CONFIG = "status.storage.replication.factor"; private static final String STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the status storage topic"; /** * <code>connect.protocol</code> */ public static final String CONNECT_PROTOCOL_CONFIG = "connect.protocol"; public static final String CONNECT_PROTOCOL_DOC = "Compatibility mode for Kafka Connect Protocol"; public static final String CONNECT_PROTOCOL_DEFAULT = ConnectProtocolCompatibility.SESSIONED.toString(); /** * <code>connect.protocol</code> */ public static final String SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG = "scheduled.rebalance.max.delay.ms"; public static final String SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC = "The maximum delay that is " + "scheduled in order to wait for the return of one or more departed workers before " + "rebalancing and reassigning their connectors and tasks to the group. During this " + "period the connectors and tasks of the departed workers remain unassigned"; public static final int SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT = Math.toIntExact(TimeUnit.SECONDS.toMillis(300)); public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG = "inter.worker.key.generation.algorithm"; public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC = "The algorithm to use for generating internal request keys"; public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT = "HmacSHA256"; public static final String INTER_WORKER_KEY_SIZE_CONFIG = "inter.worker.key.size"; public static final String INTER_WORKER_KEY_SIZE_DOC = "The size of the key to use for signing internal requests, in bits. " + "If null, the default key size for the key generation algorithm will be used."; public static final Long INTER_WORKER_KEY_SIZE_DEFAULT = null; public static final String INTER_WORKER_KEY_TTL_MS_CONFIG = "inter.worker.key.ttl.ms"; public static final String INTER_WORKER_KEY_TTL_MS_MS_DOC = "The TTL of generated session keys used for " + "internal request validation (in milliseconds)"; public static final int INTER_WORKER_KEY_TTL_MS_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1)); public static final String INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG = "inter.worker.signature.algorithm"; public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests"; public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT = "HmacSHA256"; public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG = "inter.worker.verification.algorithms"; public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_DOC = "A list of permitted algorithms for verifying internal requests"; public static final List<String> INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = Collections.singletonList(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT); @SuppressWarnings("unchecked") private static final ConfigDef CONFIG = baseConfigDef() .define(GROUP_ID_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, GROUP_ID_DOC) .define(SESSION_TIMEOUT_MS_CONFIG, ConfigDef.Type.INT, Math.toIntExact(TimeUnit.SECONDS.toMillis(10)), ConfigDef.Importance.HIGH, SESSION_TIMEOUT_MS_DOC) .define(REBALANCE_TIMEOUT_MS_CONFIG, ConfigDef.Type.INT, Math.toIntExact(TimeUnit.MINUTES.toMillis(1)), ConfigDef.Importance.HIGH, REBALANCE_TIMEOUT_MS_DOC) .define(HEARTBEAT_INTERVAL_MS_CONFIG, ConfigDef.Type.INT, Math.toIntExact(TimeUnit.SECONDS.toMillis(3)), ConfigDef.Importance.HIGH, HEARTBEAT_INTERVAL_MS_DOC) .define(CommonClientConfigs.METADATA_MAX_AGE_CONFIG, ConfigDef.Type.LONG, TimeUnit.MINUTES.toMillis(5), atLeast(0), ConfigDef.Importance.LOW, CommonClientConfigs.METADATA_MAX_AGE_DOC) .define(CommonClientConfigs.CLIENT_ID_CONFIG, ConfigDef.Type.STRING, "", ConfigDef.Importance.LOW, CommonClientConfigs.CLIENT_ID_DOC) .define(CommonClientConfigs.SEND_BUFFER_CONFIG, ConfigDef.Type.INT, 128 * 1024, atLeast(0), ConfigDef.Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) .define(CommonClientConfigs.RECEIVE_BUFFER_CONFIG, ConfigDef.Type.INT, 32 * 1024, atLeast(0), ConfigDef.Importance.MEDIUM, CommonClientConfigs.RECEIVE_BUFFER_DOC) .define(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG, ConfigDef.Type.LONG, 50L, atLeast(0L), ConfigDef.Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC) .define(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG, ConfigDef.Type.LONG, TimeUnit.SECONDS.toMillis(1), atLeast(0L), ConfigDef.Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC) .define(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG, ConfigDef.Type.LONG, 100L, atLeast(0L), ConfigDef.Importance.LOW, CommonClientConfigs.RETRY_BACKOFF_MS_DOC) .define(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG, ConfigDef.Type.INT, Math.toIntExact(TimeUnit.SECONDS.toMillis(40)), atLeast(0), ConfigDef.Importance.MEDIUM, CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC) /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ .define(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG, ConfigDef.Type.LONG, TimeUnit.MINUTES.toMillis(9), ConfigDef.Importance.MEDIUM, CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) // security support .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, ConfigDef.Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, ConfigDef.Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .withClientSslSupport() .withClientSaslSupport() .define(WORKER_SYNC_TIMEOUT_MS_CONFIG, ConfigDef.Type.INT, 3000, ConfigDef.Importance.MEDIUM, WORKER_SYNC_TIMEOUT_MS_DOC) .define(WORKER_UNSYNC_BACKOFF_MS_CONFIG, ConfigDef.Type.INT, WORKER_UNSYNC_BACKOFF_MS_DEFAULT, ConfigDef.Importance.MEDIUM, WORKER_UNSYNC_BACKOFF_MS_DOC) .define(OFFSET_STORAGE_TOPIC_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, OFFSET_STORAGE_TOPIC_CONFIG_DOC) .define(OFFSET_STORAGE_PARTITIONS_CONFIG, ConfigDef.Type.INT, 25, atLeast(1), ConfigDef.Importance.LOW, OFFSET_STORAGE_PARTITIONS_CONFIG_DOC) .define(OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, (short) 3, atLeast(1), ConfigDef.Importance.LOW, OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC) .define(CONFIG_TOPIC_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, CONFIG_TOPIC_CONFIG_DOC) .define(CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, (short) 3, atLeast(1), ConfigDef.Importance.LOW, CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC) .define(STATUS_STORAGE_TOPIC_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, STATUS_STORAGE_TOPIC_CONFIG_DOC) .define(STATUS_STORAGE_PARTITIONS_CONFIG, ConfigDef.Type.INT, 5, atLeast(1), ConfigDef.Importance.LOW, STATUS_STORAGE_PARTITIONS_CONFIG_DOC) .define(STATUS_STORAGE_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, (short) 3, atLeast(1), ConfigDef.Importance.LOW, STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC) .define(CONNECT_PROTOCOL_CONFIG, ConfigDef.Type.STRING, CONNECT_PROTOCOL_DEFAULT, ConfigDef.LambdaValidator.with( (name, value) -> { try { ConnectProtocolCompatibility.compatibility((String) value); } catch (Throwable t) { throw new ConfigException(name, value, "Invalid Connect protocol " + "compatibility"); } }, () -> "[" + Utils.join(ConnectProtocolCompatibility.values(), ", ") + "]"), ConfigDef.Importance.LOW, CONNECT_PROTOCOL_DOC) .define(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG, ConfigDef.Type.INT, SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT, between(0, Integer.MAX_VALUE), ConfigDef.Importance.LOW, SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC) .define(INTER_WORKER_KEY_TTL_MS_CONFIG, ConfigDef.Type.INT, INTER_WORKER_KEY_TTL_MS_MS_DEFAULT, between(0, Integer.MAX_VALUE), ConfigDef.Importance.LOW, INTER_WORKER_KEY_TTL_MS_MS_DOC) .define(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, ConfigDef.Type.STRING, INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT, ConfigDef.LambdaValidator.with( (name, value) -> validateKeyAlgorithm(name, (String) value), () -> "Any KeyGenerator algorithm supported by the worker JVM" ), ConfigDef.Importance.LOW, INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC) .define(INTER_WORKER_KEY_SIZE_CONFIG, ConfigDef.Type.INT, INTER_WORKER_KEY_SIZE_DEFAULT, ConfigDef.Importance.LOW, INTER_WORKER_KEY_SIZE_DOC) .define(INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG, ConfigDef.Type.STRING, INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT, ConfigDef.LambdaValidator.with( (name, value) -> validateSignatureAlgorithm(name, (String) value), () -> "Any MAC algorithm supported by the worker JVM"), ConfigDef.Importance.LOW, INTER_WORKER_SIGNATURE_ALGORITHM_DOC) .define(INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, ConfigDef.Type.LIST, INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT, ConfigDef.LambdaValidator.with( (name, value) -> validateSignatureAlgorithms(name, (List<String>) value), () -> "A list of one or more MAC algorithms, each supported by the worker JVM" ), ConfigDef.Importance.LOW, INTER_WORKER_VERIFICATION_ALGORITHMS_DOC); @Override public Integer getRebalanceTimeout() { return getInt(DistributedConfig.REBALANCE_TIMEOUT_MS_CONFIG); } public DistributedConfig(Map<String, String> props) { super(CONFIG, props); getInternalRequestKeyGenerator(); // Check here for a valid key size + key algorithm to fail fast if either are invalid validateKeyAlgorithmAndVerificationAlgorithms(); } public static void main(String[] args) { System.out.println(CONFIG.toHtml()); } public KeyGenerator getInternalRequestKeyGenerator() { try { KeyGenerator result = KeyGenerator.getInstance(getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG)); Optional.ofNullable(getInt(INTER_WORKER_KEY_SIZE_CONFIG)).ifPresent(result::init); return result; } catch (NoSuchAlgorithmException | InvalidParameterException e) { throw new ConfigException(String.format( "Unable to create key generator with algorithm %s and key size %d: %s", getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG), getInt(INTER_WORKER_KEY_SIZE_CONFIG), e.getMessage() )); } } private void validateKeyAlgorithmAndVerificationAlgorithms() { String keyAlgorithm = getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG); List<String> verificationAlgorithms = getList(INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG); if (!verificationAlgorithms.contains(keyAlgorithm)) { throw new ConfigException( INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, keyAlgorithm, String.format("Key generation algorithm must be present in %s list", INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG) ); } } private static void validateSignatureAlgorithms(String configName, List<String> algorithms) { if (algorithms.isEmpty()) { throw new ConfigException( configName, algorithms, "At least one signature verification algorithm must be provided" ); } algorithms.forEach(algorithm -> validateSignatureAlgorithm(configName, algorithm)); } private static void validateSignatureAlgorithm(String configName, String algorithm) { try { Mac.getInstance(algorithm); } catch (NoSuchAlgorithmException e) { throw new ConfigException(configName, algorithm, e.getMessage()); } } private static void validateKeyAlgorithm(String configName, String algorithm) { try { KeyGenerator.getInstance(algorithm); } catch (NoSuchAlgorithmException e) { throw new ConfigException(configName, algorithm, e.getMessage()); } } }
sslavic/kafka
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java
Java
apache-2.0
22,574
var searchData= [ ['factoryreset',['factoryReset',['../classdynamixel_1_1PacketHandler.html#abb37963e8da27d32cae5abb0226f37d8',1,'dynamixel::PacketHandler::factoryReset()'],['../classdynamixel_1_1Protocol1PacketHandler.html#a38c23dc0fffdad1444c1bba81cd40552',1,'dynamixel::Protocol1PacketHandler::factoryReset()'],['../classdynamixel_1_1Protocol2PacketHandler.html#a291f10ad0f09de007caf24b8067f4dde',1,'dynamixel::Protocol2PacketHandler::factoryReset()']]] ];
ROBOTIS-GIT/DynamixelSDK
documents/cpp/html/search/functions_3.js
JavaScript
apache-2.0
462
/* * Copyright 2004-2010 the Seasar Foundation and the Others. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.seasar.doma.internal.jdbc.dialect; import org.seasar.doma.internal.jdbc.sql.node.AnonymousNode; import org.seasar.doma.internal.jdbc.sql.node.FragmentNode; import org.seasar.doma.internal.jdbc.sql.node.SelectClauseNode; import org.seasar.doma.internal.jdbc.sql.node.SelectStatementNode; import org.seasar.doma.jdbc.SqlNode; /** * @author taedium * */ public class Mssql2008PagingTransformer extends StandardPagingTransformer { public Mssql2008PagingTransformer(long offset, long limit) { super(offset, limit); } @Override public SqlNode transform(SqlNode sqlNode) { AnonymousNode result = new AnonymousNode(); for (SqlNode child : sqlNode.getChildren()) { result.appendNode(child.accept(this, null)); } return result; } @Override public SqlNode visitSelectStatementNode(SelectStatementNode node, Void p) { if (processed) { return node; } if (offset > 0) { return super.visitSelectStatementNode(node, p); } processed = true; return appendTopNode(node); } protected SqlNode appendTopNode(SelectStatementNode node) { SelectClauseNode select = new SelectClauseNode(node .getSelectClauseNode().getWordNode()); select.appendNode(new FragmentNode(" top (" + limit + ")")); for (SqlNode child : node.getSelectClauseNode().getChildren()) { select.appendNode(child); } SelectStatementNode result = new SelectStatementNode(); result.setSelectClauseNode(select); result.setFromClauseNode(node.getFromClauseNode()); result.setWhereClauseNode(node.getWhereClauseNode()); result.setGroupByClauseNode(node.getGroupByClauseNode()); result.setHavingClauseNode(node.getHavingClauseNode()); result.setOrderByClauseNode(node.getOrderByClauseNode()); result.setForUpdateClauseNode(node.getForUpdateClauseNode()); result.setOptionClauseNode(node.getOptionClauseNode()); return result; } }
backpaper0/doma2
src/main/java/org/seasar/doma/internal/jdbc/dialect/Mssql2008PagingTransformer.java
Java
apache-2.0
2,719
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Copyright 2007 Google Inc. All Rights Reserved. /** * @fileoverview Emoji Palette implementation. This provides a UI widget for * choosing an emoji from a palette of possible choices. EmojiPalettes are * contained within EmojiPickers. * * See ../demos/popupemojipicker.html for an example of how to instantiate * an emoji picker. * * Based on goog.ui.ColorPicker (colorpicker.js). * */ goog.provide('goog.ui.emoji.EmojiPalette'); goog.require('goog.events.Event'); goog.require('goog.events.EventType'); goog.require('goog.net.ImageLoader'); goog.require('goog.ui.Palette'); goog.require('goog.ui.emoji.Emoji'); goog.require('goog.ui.emoji.EmojiPaletteRenderer'); /** * A page of emoji to be displayed in an EmojiPicker. * * @param {Array.<Array>} emoji List of emoji for this page. * @param {?string=} opt_urlPrefix Prefix that should be prepended to all URL. * @param {goog.ui.PaletteRenderer=} opt_renderer Renderer used to render or * decorate the palette; defaults to {@link goog.ui.PaletteRenderer}. * @param {goog.dom.DomHelper=} opt_domHelper Optional DOM helper. * @extends {goog.ui.Palette} * @constructor */ goog.ui.emoji.EmojiPalette = function(emoji, opt_urlPrefix, opt_renderer, opt_domHelper) { goog.ui.Palette.call(this, null, opt_renderer || new goog.ui.emoji.EmojiPaletteRenderer(null), opt_domHelper); /** * All the different emoji that this palette can display. Maps emoji ids * (string) to the goog.ui.emoji.Emoji for that id. * * @type {Object} * @private */ this.emojiCells_ = {}; /** * Map of emoji id to index into this.emojiCells_. * * @type {Object} * @private */ this.emojiMap_ = {}; /** * List of the animated emoji in this palette. Each internal array is of type * [HTMLDivElement, goog.ui.emoji.Emoji], and represents the palette item * for that animated emoji, and the Emoji object. * * @type {Array.<Array.<HTMLDivElement, goog.ui.emoji.Emoji>>} * @private */ this.animatedEmoji_ = []; this.urlPrefix_ = opt_urlPrefix || ''; /** * Palette items that are displayed on this page of the emoji picker. Each * item is a div wrapped around a div or an img. * * @type {Array.<HTMLDivElement>} * @private */ this.emoji_ = this.getEmojiArrayFromProperties_(emoji); /** * The renderer for this emoji palette. * * @type {goog.ui.ControlRenderer|undefined} * @private */ this.renderer_ = this.getRenderer(); this.setContent(this.emoji_); }; goog.inherits(goog.ui.emoji.EmojiPalette, goog.ui.Palette); /** * Indicates a prefix that should be prepended to all URLs of images in this * emojipalette. This provides an optimization if the URLs are long, so that * the client does not have to send a long string for each emoji. * * @type {string} * @private */ goog.ui.emoji.EmojiPalette.prototype.urlPrefix_ = ''; /** * Whether the emoji images have been loaded. * * @type {boolean} * @private */ goog.ui.emoji.EmojiPalette.prototype.imagesLoaded_ = false; /** * Image loader for loading animated emoji. * * @type {goog.net.ImageLoader} * @private */ goog.ui.emoji.EmojiPalette.prototype.imageLoader_; /** * Helps create an array of emoji palette items from an array of emoji * properties. Each element will be either a div with background-image set to * a sprite, or an img element pointing directly to an emoji, and all elements * are wrapped with an outer div for alignment issues (i.e., this allows * centering the inner div). * * @param {Object} emojiGroup The group of emoji for this page. * @return {Array.<HTMLDivElement>} The emoji items. * @private */ goog.ui.emoji.EmojiPalette.prototype.getEmojiArrayFromProperties_ = function(emojiGroup) { var emojiItems = []; for (var i = 0; i < emojiGroup.length; i++) { var url = emojiGroup[i][0]; var id = emojiGroup[i][1]; var spriteInfo = emojiGroup[i][2]; var displayUrl = spriteInfo ? spriteInfo.getUrl() : this.urlPrefix_ + url; var item = this.renderer_.createPaletteItem(this.getDomHelper(), id, spriteInfo, displayUrl); emojiItems.push(item); var emoji = new goog.ui.emoji.Emoji(url, id); this.emojiCells_[id] = emoji; this.emojiMap_[id] = i; // Keep track of sprited emoji that are animated, for later loading. if (spriteInfo && spriteInfo.isAnimated()) { this.animatedEmoji_.push([item, emoji]); } } // Create the image loader now so that tests can access it before it has // started loading images. if (this.animatedEmoji_.length > 0) { this.imageLoader_ = new goog.net.ImageLoader(); } this.imagesLoaded_ = true; return emojiItems; }; /** * Sends off requests for all the animated emoji and replaces their static * sprites when the images are done downloading. */ goog.ui.emoji.EmojiPalette.prototype.loadAnimatedEmoji = function() { if (this.animatedEmoji_.length > 0) { for (var i = 0; i < this.animatedEmoji_.length; i++) { var paletteItem = this.animatedEmoji_[i][0]; var emoji = this.animatedEmoji_[i][1]; var url = this.urlPrefix_ + emoji.getUrl(); this.imageLoader_.addImage(emoji.getId(), url); } this.getHandler().listen(this.imageLoader_, goog.events.EventType.LOAD, this.handleImageLoad_); this.imageLoader_.start(); } }; /** * Handles image load events from the ImageLoader. * * @param {goog.events.Event} e The event object. * @private */ goog.ui.emoji.EmojiPalette.prototype.handleImageLoad_ = function(e) { var id = e.target.id; var url = e.target.src; // Just to be safe, we check to make sure we have an id and src url from // the event target, which the ImageLoader sets to an Image object. if (id && url) { var item = this.emoji_[this.emojiMap_[id]]; if (item) { this.getRenderer().updateAnimatedPaletteItem(item, e.target); } } }; /** * Returns the image loader that this palette uses. Used for testing. * * @return {goog.net.ImageLoader} the image loader. */ goog.ui.emoji.EmojiPalette.prototype.getImageLoader = function() { return this.imageLoader_; }; /** @inheritDoc */ goog.ui.emoji.EmojiPalette.prototype.disposeInternal = function() { goog.ui.emoji.EmojiPalette.superClass_.disposeInternal.call(this); if (this.imageLoader_) { this.imageLoader_.dispose(); this.imageLoader_ = null; } this.animatedEmoji_ = null; this.emojiCells_ = null; this.emojiMap_ = null; this.emoji_ = null; }; /** * Returns a goomoji id from an img or the containing td, or null if none * exists for that element. * * @param {Element} el The element to get the Goomoji id from. * @return {?string} A goomoji id from an img or the containing td, or null if * none exists for that element. * @private */ goog.ui.emoji.EmojiPalette.prototype.getGoomojiIdFromElement_ = function(el) { if (!el) { return null; } var item = this.getRenderer().getContainingItem(this, el); return item ? item.getAttribute('goomoji') : null; }; /** * @return {goog.ui.emoji.Emoji} The currently selected emoji from this palette. */ goog.ui.emoji.EmojiPalette.prototype.getSelectedEmoji = function() { var elem = /** @type {Element} */ (this.getSelectedItem()); var goomojiId = this.getGoomojiIdFromElement_(elem); return this.emojiCells_[goomojiId]; }; /** * @return {number} The number of emoji managed by this palette. */ goog.ui.emoji.EmojiPalette.prototype.getNumberOfEmoji = function() { return this.emojiCells_.length; }; /** * Returns the index of the specified emoji within this palette. * * @param {string} id Id of the emoji to look up. * @return {number} The index of the specified emoji within this palette. */ goog.ui.emoji.EmojiPalette.prototype.getEmojiIndex = function(id) { return this.emojiMap_[id]; };
Sage-Bionetworks/SynapseWebClient
src/main/webapp/js/goog/ui/emoji/emojipalette.js
JavaScript
apache-2.0
8,757
/* * Copyright (c) 2013-2017 Cinchapi Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cinchapi.concourse.lang; import com.cinchapi.concourse.Timestamp; /** * A {@link Symbol} that represents a {@link Timestamp} in a {@link Criteria}. * * @author Jeff Nelson */ public class TimestampSymbol extends AbstractSymbol { /** * Return the {@link TimestampSymbol} for the specified {@code timestamp}. * * @param timestamp * @return the Symbol */ public static TimestampSymbol create(Timestamp timestamp) { return new TimestampSymbol(timestamp); } /** * Return the {@link TimestampSymbol} for the specified {@code timestamp}. * * @param timestamp * @return the Symbol */ public static TimestampSymbol create(long timestamp) { return new TimestampSymbol(timestamp); } /** * Return the {@link TimestampSymbol} that is parsed from {@code string}. * * @param string * @return the Symbol */ public static TimestampSymbol parse(String string) { return new TimestampSymbol(Long.parseLong(string.replace("at ", ""))); } /** * The associated timestamp. */ private final long timestamp; /** * Construct a new instance. * * @param timestamp */ private TimestampSymbol(long timestamp) { this.timestamp = timestamp; } /** * Construct a new instance. * * @param timestamp */ private TimestampSymbol(Timestamp timestamp) { this(timestamp.getMicros()); } /** * Return the timestamp (in microseconds) associated with this Symbol. * * @return the Timestamp */ public long getTimestamp() { return timestamp; } @Override public String toString() { return "at " + Long.toString(timestamp); } }
dubex/concourse
concourse-driver-java/src/main/java/com/cinchapi/concourse/lang/TimestampSymbol.java
Java
apache-2.0
2,410
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ namespace Google\Service\AIPlatformNotebooks; class Runtime extends \Google\Model { protected $accessConfigType = RuntimeAccessConfig::class; protected $accessConfigDataType = ''; /** * @var string */ public $createTime; /** * @var string */ public $healthState; protected $metricsType = RuntimeMetrics::class; protected $metricsDataType = ''; /** * @var string */ public $name; protected $softwareConfigType = RuntimeSoftwareConfig::class; protected $softwareConfigDataType = ''; /** * @var string */ public $state; /** * @var string */ public $updateTime; protected $virtualMachineType = VirtualMachine::class; protected $virtualMachineDataType = ''; /** * @param RuntimeAccessConfig */ public function setAccessConfig(RuntimeAccessConfig $accessConfig) { $this->accessConfig = $accessConfig; } /** * @return RuntimeAccessConfig */ public function getAccessConfig() { return $this->accessConfig; } /** * @param string */ public function setCreateTime($createTime) { $this->createTime = $createTime; } /** * @return string */ public function getCreateTime() { return $this->createTime; } /** * @param string */ public function setHealthState($healthState) { $this->healthState = $healthState; } /** * @return string */ public function getHealthState() { return $this->healthState; } /** * @param RuntimeMetrics */ public function setMetrics(RuntimeMetrics $metrics) { $this->metrics = $metrics; } /** * @return RuntimeMetrics */ public function getMetrics() { return $this->metrics; } /** * @param string */ public function setName($name) { $this->name = $name; } /** * @return string */ public function getName() { return $this->name; } /** * @param RuntimeSoftwareConfig */ public function setSoftwareConfig(RuntimeSoftwareConfig $softwareConfig) { $this->softwareConfig = $softwareConfig; } /** * @return RuntimeSoftwareConfig */ public function getSoftwareConfig() { return $this->softwareConfig; } /** * @param string */ public function setState($state) { $this->state = $state; } /** * @return string */ public function getState() { return $this->state; } /** * @param string */ public function setUpdateTime($updateTime) { $this->updateTime = $updateTime; } /** * @return string */ public function getUpdateTime() { return $this->updateTime; } /** * @param VirtualMachine */ public function setVirtualMachine(VirtualMachine $virtualMachine) { $this->virtualMachine = $virtualMachine; } /** * @return VirtualMachine */ public function getVirtualMachine() { return $this->virtualMachine; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(Runtime::class, 'Google_Service_AIPlatformNotebooks_Runtime');
googleapis/google-api-php-client-services
src/AIPlatformNotebooks/Runtime.php
PHP
apache-2.0
3,646
package com.aqua.sanity.fixures; import jsystem.framework.fixture.Fixture; public class Fixture5 extends Fixture { public static int VALUE = -1; public Fixture5(){ setParentFixture(Fixture4.class); } public void setUp() throws Exception{ VALUE = 1; } public void tearDown() throws Exception{ VALUE=0; } }
liorgins/old-regression-maven-compat
tests/com/aqua/sanity/fixures/Fixture5.java
Java
apache-2.0
331
// Copyright (C) 2012 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.extensions.registration; import static java.util.Objects.requireNonNull; import com.google.gerrit.extensions.annotations.Export; import com.google.inject.Key; import com.google.inject.Provider; /** <b>DO NOT USE</b> */ public class PrivateInternals_DynamicMapImpl<T> extends DynamicMap<T> { PrivateInternals_DynamicMapImpl() {} /** * Store one new element into the map. * * @param pluginName unique name of the plugin providing the export. * @param exportName name the plugin has exported the item as. * @param item the item to add to the collection. Must not be null. * @return handle to remove the item at a later point in time. */ public RegistrationHandle put(String pluginName, String exportName, Provider<T> item) { requireNonNull(item); final NamePair key = new NamePair(pluginName, exportName); items.put(key, item); return () -> items.remove(key, item); } /** * Store one new element that may be hot-replaceable in the future. * * @param pluginName unique name of the plugin providing the export. * @param key unique description from the item's Guice binding. This can be later obtained from * the registration handle to facilitate matching with the new equivalent instance during a * hot reload. The key must use an {@link Export} annotation. * @param item the item to add to the collection right now. Must not be null. * @return a handle that can remove this item later, or hot-swap the item without it ever leaving * the collection. */ public ReloadableRegistrationHandle<T> put(String pluginName, Key<T> key, Provider<T> item) { requireNonNull(item); String exportName = ((Export) key.getAnnotation()).value(); NamePair np = new NamePair(pluginName, exportName); items.put(np, item); return new ReloadableHandle(np, key, item); } private class ReloadableHandle implements ReloadableRegistrationHandle<T> { private final NamePair np; private final Key<T> key; private final Provider<T> item; ReloadableHandle(NamePair np, Key<T> key, Provider<T> item) { this.np = np; this.key = key; this.item = item; } @Override public void remove() { items.remove(np, item); } @Override public Key<T> getKey() { return key; } @Override public ReloadableHandle replace(Key<T> newKey, Provider<T> newItem) { if (items.replace(np, item, newItem)) { return new ReloadableHandle(np, newKey, newItem); } return null; } } }
qtproject/qtqa-gerrit
java/com/google/gerrit/extensions/registration/PrivateInternals_DynamicMapImpl.java
Java
apache-2.0
3,189
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_60) on Mon Mar 28 17:12:03 AEST 2016 --> <title>org.apache.river.norm.lookup (River-Internet vtrunk API Documentation (internals))</title> <meta name="date" content="2016-03-28"> <link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../../script.js"></script> </head> <body> <h1 class="bar"><a href="../../../../../org/apache/river/norm/lookup/package-summary.html" target="classFrame">org.apache.river.norm.lookup</a></h1> <div class="indexContainer"> <h2 title="Interfaces">Interfaces</h2> <ul title="Interfaces"> <li><a href="SubStore.html" title="interface in org.apache.river.norm.lookup" target="classFrame"><span class="interfaceName">SubStore</span></a></li> </ul> <h2 title="Classes">Classes</h2> <ul title="Classes"> <li><a href="JoinState.html" title="class in org.apache.river.norm.lookup" target="classFrame">JoinState</a></li> </ul> </div> </body> </html>
pfirmstone/JGDMS
JGDMS/src/site/resources/old-static-site/doc/internals/org/apache/river/norm/lookup/package-frame.html
HTML
apache-2.0
1,129
/* * Copyright (c) 2010, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.security.util; import org.apache.axiom.om.OMAttribute; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.carbon.core.Resources; import org.wso2.carbon.core.persistence.PersistenceDataNotFoundException; import org.wso2.carbon.core.persistence.PersistenceUtils; import org.wso2.carbon.core.persistence.file.ServiceGroupFilePersistenceManager; import org.wso2.carbon.user.core.UserRealm; import org.wso2.carbon.user.core.UserStoreException; import java.util.ArrayList; import java.util.List; /** * @deprecated Not for public use, has been replaced. */ @Deprecated public class SecurityPersistenceUtils { private static final Log log = LogFactory.getLog(SecurityPersistenceUtils.class); /** * @param serviceGroupId serviceGroupId * @param serviceId serviceId * @param realm realm * @param tenantAwareUserName tenantAwareUserName * @param permissionType Probably UserCoreConstants.INVOKE_SERVICE_PERMISSION is all you need for this * @param serviceGroupFilePM serviceGroupFilePM * @return false if any of the roles of user does not have permission to access it or no roles assigned for the service. * @throws UserStoreException * @deprecated do not use this method */ public static boolean isUserAuthorized( String serviceGroupId, String serviceId, UserRealm realm, String tenantAwareUserName, String permissionType, ServiceGroupFilePersistenceManager serviceGroupFilePM) throws UserStoreException { try { String[] rolesList = realm.getUserStoreManager().getRoleListOfUser(tenantAwareUserName); String serviceXPath = Resources.ServiceProperties.ROOT_XPATH + PersistenceUtils. getXPathAttrPredicate(Resources.NAME, serviceId); String rolesPath = serviceXPath + "/" + Resources.SecurityManagement.ROLE_XML_TAG + PersistenceUtils.getXPathAttrPredicate( Resources.Associations.TYPE, permissionType) + "/@" + Resources.SecurityManagement.ROLENAME_XML_ATTR; List tmpAllowedRolesAttr = serviceGroupFilePM.getAll(serviceGroupId, rolesPath); List<String> allowedRoles = new ArrayList<>(tmpAllowedRolesAttr.size()); for (Object attr : tmpAllowedRolesAttr) { allowedRoles.add(((OMAttribute) attr).getAttributeValue()); } for (String role : rolesList) { if (allowedRoles.contains(role)) { return true; } } return false; } catch (PersistenceDataNotFoundException e) { log.error("Error occurred while reading allowed roles element. Returning false.", e); return false; } } }
wso2/carbon-identity-framework
components/security-mgt/org.wso2.carbon.security.mgt/src/main/java/org/wso2/carbon/security/util/SecurityPersistenceUtils.java
Java
apache-2.0
3,588
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.6.0_20) on Sun Dec 12 16:57:24 GMT 2010 --> <META http-equiv="Content-Type" content="text/html; charset=UTF-8"> <TITLE> ModelMaker (Jena Framework) </TITLE> <META NAME="date" CONTENT="2010-12-12"> <LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style"> <SCRIPT type="text/javascript"> function windowTitle() { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="ModelMaker (Jena Framework)"; } } </SCRIPT> <NOSCRIPT> </NOSCRIPT> </HEAD> <BODY BGCOLOR="white" onload="windowTitle();"> <HR> <!-- ========= START OF TOP NAVBAR ======= --> <A NAME="navbar_top"><!-- --></A> <A HREF="#skip-navbar_top" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_top_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/ModelMaker.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelGraphInterface.html" title="interface in com.hp.hpl.jena.rdf.model"><B>PREV CLASS</B></A>&nbsp; &nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelReader.html" title="interface in com.hp.hpl.jena.rdf.model"><B>NEXT CLASS</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../../../index.html?com/hp/hpl/jena/rdf/model/ModelMaker.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="ModelMaker.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> <TR> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> SUMMARY:&nbsp;NESTED&nbsp;|&nbsp;FIELD&nbsp;|&nbsp;CONSTR&nbsp;|&nbsp;<A HREF="#method_summary">METHOD</A></FONT></TD> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> DETAIL:&nbsp;FIELD&nbsp;|&nbsp;CONSTR&nbsp;|&nbsp;<A HREF="#method_detail">METHOD</A></FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_top"></A> <!-- ========= END OF TOP NAVBAR ========= --> <HR> <!-- ======== START OF CLASS DATA ======== --> <H2> <FONT SIZE="-1"> com.hp.hpl.jena.rdf.model</FONT> <BR> Interface ModelMaker</H2> <DL> <DT><B>All Superinterfaces:</B> <DD><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelGetter.html" title="interface in com.hp.hpl.jena.rdf.model">ModelGetter</A>, <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html" title="interface in com.hp.hpl.jena.rdf.model">ModelSource</A></DD> </DL> <HR> <DL> <DT><PRE>public interface <B>ModelMaker</B><DT>extends <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html" title="interface in com.hp.hpl.jena.rdf.model">ModelSource</A></DL> </PRE> <P> A ModelMaker contains a collection of named models, methods for creating new models [both named and anonymous] and opening previously-named models, removing models, and accessing a single "default" Model for this Maker. <p>Additional constraints are placed on a ModelMaker as compared to its ancestor <code>ModelSource</code>. ModelMakers do not arbitrarily forget their contents - once they contain a named model, that model stays inside the ModelMaker until that ModelMaker goes away, and maybe for longer (eg if the ModelMaker fronted a database or directory). And new models can be added to a ModelMaker. <P> <P> <DL> <DT><B>Author:</B></DT> <DD>kers</DD> </DL> <HR> <P> <!-- ========== METHOD SUMMARY =========== --> <A NAME="method_summary"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2"> <B>Method Summary</B></FONT></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#close()">close</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Close the factory - no more requests need be honoured, and any clean-up can be done.</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/Model.html" title="interface in com.hp.hpl.jena.rdf.model">Model</A></CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#createModel(java.lang.String)">createModel</A></B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Create a Model with the given name if no such model exists.</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/Model.html" title="interface in com.hp.hpl.jena.rdf.model">Model</A></CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#createModel(java.lang.String, boolean)">createModel</A></B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name, boolean&nbsp;strict)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Create a new Model associated with the given name.</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;<A HREF="../../../../../../com/hp/hpl/jena/graph/GraphMaker.html" title="interface in com.hp.hpl.jena.graph">GraphMaker</A></CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#getGraphMaker()">getGraphMaker</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Answer a GraphMaker that makes graphs the same way this ModelMaker makes models.</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;boolean</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#hasModel(java.lang.String)">hasModel</A></B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;return true iff the factory has a Model with the given name</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;com.hp.hpl.jena.util.iterator.ExtendedIterator&lt;<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&gt;</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#listModels()">listModels</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Answer an [extended] iterator where each element is the name of a model in the maker, and the complete sequence exhausts the set of names.</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/Model.html" title="interface in com.hp.hpl.jena.rdf.model">Model</A></CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#openModel(java.lang.String, boolean)">openModel</A></B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name, boolean&nbsp;strict)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Find an existing Model that this factory knows about under the given name.</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelMaker.html#removeModel(java.lang.String)">removeModel</A></B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Remove the association between the name and the Model.</TD> </TR> </TABLE> &nbsp;<A NAME="methods_inherited_from_class_com.hp.hpl.jena.rdf.model.ModelSource"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor"> <TH ALIGN="left"><B>Methods inherited from interface com.hp.hpl.jena.rdf.model.<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html" title="interface in com.hp.hpl.jena.rdf.model">ModelSource</A></B></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD><CODE><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html#createDefaultModel()">createDefaultModel</A>, <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html#createFreshModel()">createFreshModel</A>, <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html#openModel(java.lang.String)">openModel</A>, <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelSource.html#openModelIfPresent(java.lang.String)">openModelIfPresent</A></CODE></TD> </TR> </TABLE> &nbsp;<A NAME="methods_inherited_from_class_com.hp.hpl.jena.rdf.model.ModelGetter"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor"> <TH ALIGN="left"><B>Methods inherited from interface com.hp.hpl.jena.rdf.model.<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelGetter.html" title="interface in com.hp.hpl.jena.rdf.model">ModelGetter</A></B></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD><CODE><A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelGetter.html#getModel(java.lang.String)">getModel</A>, <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelGetter.html#getModel(java.lang.String, com.hp.hpl.jena.rdf.model.ModelReader)">getModel</A></CODE></TD> </TR> </TABLE> &nbsp; <P> <!-- ============ METHOD DETAIL ========== --> <A NAME="method_detail"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2"> <B>Method Detail</B></FONT></TH> </TR> </TABLE> <A NAME="createModel(java.lang.String, boolean)"><!-- --></A><H3> createModel</H3> <PRE> <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/Model.html" title="interface in com.hp.hpl.jena.rdf.model">Model</A> <B>createModel</B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name, boolean&nbsp;strict)</PRE> <DL> <DD>Create a new Model associated with the given name. If there is no such association, create one and return it. If one exists but <code>strict</code> is false, return the associated Model. Otherwise throw an AlreadyExistsException. <P> <DD><DL> </DL> </DD> <DD><DL> <DT><B>Parameters:</B><DD><CODE>name</CODE> - the name to give to the new Model<DD><CODE>strict</CODE> - true to cause existing bindings to throw an exception <DT><B>Throws:</B> <DD><CODE>AlreadyExistsException</CODE> - if that name is already bound.</DL> </DD> </DL> <HR> <A NAME="createModel(java.lang.String)"><!-- --></A><H3> createModel</H3> <PRE> <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/Model.html" title="interface in com.hp.hpl.jena.rdf.model">Model</A> <B>createModel</B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name)</PRE> <DL> <DD>Create a Model with the given name if no such model exists. Otherwise, answer the existing model. Equivalent to <br><code>createModel( name, false )</code>. <P> <DD><DL> </DL> </DD> <DD><DL> </DL> </DD> </DL> <HR> <A NAME="openModel(java.lang.String, boolean)"><!-- --></A><H3> openModel</H3> <PRE> <A HREF="../../../../../../com/hp/hpl/jena/rdf/model/Model.html" title="interface in com.hp.hpl.jena.rdf.model">Model</A> <B>openModel</B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name, boolean&nbsp;strict)</PRE> <DL> <DD>Find an existing Model that this factory knows about under the given name. If such a Model exists, return it. Otherwise, if <code>strict</code> is false, create a new Model, associate it with the name, and return it. Otherwise throw a DoesNotExistException. <p>When called with <code>strict=false</code>, is equivalent to the ancestor <code>openModel(String)</code> method. <P> <DD><DL> </DL> </DD> <DD><DL> <DT><B>Parameters:</B><DD><CODE>name</CODE> - the name of the Model to find and return<DD><CODE>strict</CODE> - false to create a new one if one doesn't already exist <DT><B>Throws:</B> <DD><CODE>DoesNotExistException</CODE> - if there's no such named Model</DL> </DD> </DL> <HR> <A NAME="removeModel(java.lang.String)"><!-- --></A><H3> removeModel</H3> <PRE> void <B>removeModel</B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name)</PRE> <DL> <DD>Remove the association between the name and the Model. create will now be able to create a Model with that name, and open will no longer be able to find it. Throws an exception if there's no such Model. The Model itself is not touched. <P> <DD><DL> </DL> </DD> <DD><DL> <DT><B>Parameters:</B><DD><CODE>name</CODE> - the name to disassociate <DT><B>Throws:</B> <DD><CODE>DoesNotExistException</CODE> - if the name is unbound</DL> </DD> </DL> <HR> <A NAME="hasModel(java.lang.String)"><!-- --></A><H3> hasModel</H3> <PRE> boolean <B>hasModel</B>(<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&nbsp;name)</PRE> <DL> <DD>return true iff the factory has a Model with the given name <P> <DD><DL> </DL> </DD> <DD><DL> <DT><B>Parameters:</B><DD><CODE>name</CODE> - the name of the Model to look for <DT><B>Returns:</B><DD>true iff there's a Model with that name</DL> </DD> </DL> <HR> <A NAME="close()"><!-- --></A><H3> close</H3> <PRE> void <B>close</B>()</PRE> <DL> <DD>Close the factory - no more requests need be honoured, and any clean-up can be done. <P> <DD><DL> </DL> </DD> <DD><DL> </DL> </DD> </DL> <HR> <A NAME="getGraphMaker()"><!-- --></A><H3> getGraphMaker</H3> <PRE> <A HREF="../../../../../../com/hp/hpl/jena/graph/GraphMaker.html" title="interface in com.hp.hpl.jena.graph">GraphMaker</A> <B>getGraphMaker</B>()</PRE> <DL> <DD>Answer a GraphMaker that makes graphs the same way this ModelMaker makes models. In general this will be an underlying GraphMaker. <P> <DD><DL> </DL> </DD> <DD><DL> </DL> </DD> </DL> <HR> <A NAME="listModels()"><!-- --></A><H3> listModels</H3> <PRE> com.hp.hpl.jena.util.iterator.ExtendedIterator&lt;<A HREF="http://java.sun.com/j2se/1.5.0/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</A>&gt; <B>listModels</B>()</PRE> <DL> <DD>Answer an [extended] iterator where each element is the name of a model in the maker, and the complete sequence exhausts the set of names. No particular order is expected from the list. <P> <DD><DL> </DL> </DD> <DD><DL> <DT><B>Returns:</B><DD>an extended iterator over the names of models known to this Maker.</DL> </DD> </DL> <!-- ========= END OF CLASS DATA ========= --> <HR> <!-- ======= START OF BOTTOM NAVBAR ====== --> <A NAME="navbar_bottom"><!-- --></A> <A HREF="#skip-navbar_bottom" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_bottom_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/ModelMaker.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelGraphInterface.html" title="interface in com.hp.hpl.jena.rdf.model"><B>PREV CLASS</B></A>&nbsp; &nbsp;<A HREF="../../../../../../com/hp/hpl/jena/rdf/model/ModelReader.html" title="interface in com.hp.hpl.jena.rdf.model"><B>NEXT CLASS</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../../../index.html?com/hp/hpl/jena/rdf/model/ModelMaker.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="ModelMaker.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> <TR> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> SUMMARY:&nbsp;NESTED&nbsp;|&nbsp;FIELD&nbsp;|&nbsp;CONSTR&nbsp;|&nbsp;<A HREF="#method_summary">METHOD</A></FONT></TD> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> DETAIL:&nbsp;FIELD&nbsp;|&nbsp;CONSTR&nbsp;|&nbsp;<A HREF="#method_detail">METHOD</A></FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_bottom"></A> <!-- ======== END OF BOTTOM NAVBAR ======= --> <HR> Copyright ? 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Hewlett-Packard Development Company, LP </BODY> </HTML>
BiGCAT-UM/wp2lod
lib/Jena-2.6.4/doc/javadoc/com/hp/hpl/jena/rdf/model/ModelMaker.html
HTML
apache-2.0
21,068
/* JavaScript content from worklight/plugins/org.apache.cordova.media-capture/www/CaptureImageOptions.js in JS Resources */ /* JavaScript content from worklight/plugins/org.apache.cordova.media-capture/www/CaptureImageOptions.js in JS Resources */ cordova.define("org.apache.cordova.media-capture.CaptureImageOptions", function(require, exports, module) {/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ /** * Encapsulates all image capture operation configuration options. */ var CaptureImageOptions = function(){ // Upper limit of images user can take. Value must be equal or greater than 1. this.limit = 1; }; module.exports = CaptureImageOptions; });
Zodia/poc-tijari-mobile
attijari-poc-mobile-app/apps/attijari_poc_mobile_app/ipad/native/www/default/worklight/plugins/org.apache.cordova.media-capture/www/CaptureImageOptions.js
JavaScript
apache-2.0
1,431
package org.apache.lucene.index; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Collection; import java.util.HashSet; import java.util.List; import org.apache.lucene.document.Document; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; public class TestSegmentReader extends LuceneTestCase { private Directory dir; private Document testDoc = new Document(); private SegmentReader reader = null; //TODO: Setup the reader w/ multiple documents @Override public void setUp() throws Exception { super.setUp(); dir = newDirectory(); DocHelper.setupDoc(testDoc); SegmentCommitInfo info = DocHelper.writeDoc(random(), dir, testDoc); reader = new SegmentReader(info, IOContext.READ); } @Override public void tearDown() throws Exception { reader.close(); dir.close(); super.tearDown(); } public void test() { assertTrue(dir != null); assertTrue(reader != null); assertTrue(DocHelper.nameValues.size() > 0); assertTrue(DocHelper.numFields(testDoc) == DocHelper.all.size()); } public void testDocument() throws IOException { assertTrue(reader.numDocs() == 1); assertTrue(reader.maxDoc() >= 1); Document result = reader.document(0); assertTrue(result != null); //There are 2 unstored fields on the document that are not preserved across writing assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); List<IndexableField> fields = result.getFields(); for (final IndexableField field : fields ) { assertTrue(field != null); assertTrue(DocHelper.nameValues.containsKey(field.name())); } } public void testGetFieldNameVariations() { Collection<String> allFieldNames = new HashSet<>(); Collection<String> indexedFieldNames = new HashSet<>(); Collection<String> notIndexedFieldNames = new HashSet<>(); Collection<String> tvFieldNames = new HashSet<>(); Collection<String> noTVFieldNames = new HashSet<>(); for(FieldInfo fieldInfo : reader.getFieldInfos()) { final String name = fieldInfo.name; allFieldNames.add(name); if (fieldInfo.getIndexOptions() != IndexOptions.NONE) { indexedFieldNames.add(name); } else { notIndexedFieldNames.add(name); } if (fieldInfo.hasVectors()) { tvFieldNames.add(name); } else if (fieldInfo.getIndexOptions() != IndexOptions.NONE) { noTVFieldNames.add(name); } } assertTrue(allFieldNames.size() == DocHelper.all.size()); for (String s : allFieldNames) { assertTrue(DocHelper.nameValues.containsKey(s) == true || s.equals("")); } assertTrue(indexedFieldNames.size() == DocHelper.indexed.size()); for (String s : indexedFieldNames) { assertTrue(DocHelper.indexed.containsKey(s) == true || s.equals("")); } assertTrue(notIndexedFieldNames.size() == DocHelper.unindexed.size()); //Get all indexed fields that are storing term vectors assertTrue(tvFieldNames.size() == DocHelper.termvector.size()); assertTrue(noTVFieldNames.size() == DocHelper.notermvector.size()); } public void testTerms() throws IOException { Fields fields = MultiFields.getFields(reader); for (String field : fields) { Terms terms = fields.terms(field); assertNotNull(terms); TermsEnum termsEnum = terms.iterator(); while(termsEnum.next() != null) { BytesRef term = termsEnum.term(); assertTrue(term != null); String fieldValue = (String) DocHelper.nameValues.get(field); assertTrue(fieldValue.indexOf(term.utf8ToString()) != -1); } } PostingsEnum termDocs = TestUtil.docs(random(), reader, DocHelper.TEXT_FIELD_1_KEY, new BytesRef("field"), null, 0); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); termDocs = TestUtil.docs(random(), reader, DocHelper.NO_NORMS_KEY, new BytesRef(DocHelper.NO_NORMS_TEXT), null, 0); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); PostingsEnum positions = MultiFields.getTermPositionsEnum(reader, DocHelper.TEXT_FIELD_1_KEY, new BytesRef("field")); // NOTE: prior rev of this test was failing to first // call next here: assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertTrue(positions.docID() == 0); assertTrue(positions.nextPosition() >= 0); } public void testNorms() throws IOException { //TODO: Not sure how these work/should be tested /* try { byte [] norms = reader.norms(DocHelper.TEXT_FIELD_1_KEY); System.out.println("Norms: " + norms); assertTrue(norms != null); } catch (IOException e) { e.printStackTrace(); assertTrue(false); } */ checkNorms(reader); } public static void checkNorms(LeafReader reader) throws IOException { // test omit norms for (int i=0; i<DocHelper.fields.length; i++) { IndexableField f = DocHelper.fields[i]; if (f.fieldType().indexOptions() != IndexOptions.NONE) { assertEquals(reader.getNormValues(f.name()) != null, !f.fieldType().omitNorms()); assertEquals(reader.getNormValues(f.name()) != null, !DocHelper.noNorms.containsKey(f.name())); if (reader.getNormValues(f.name()) == null) { // test for norms of null NumericDocValues norms = MultiDocValues.getNormValues(reader, f.name()); assertNull(norms); } } } } public void testTermVectors() throws IOException { Terms result = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY); assertNotNull(result); assertEquals(3, result.size()); TermsEnum termsEnum = result.iterator(); while(termsEnum.next() != null) { String term = termsEnum.term().utf8ToString(); int freq = (int) termsEnum.totalTermFreq(); assertTrue(DocHelper.FIELD_2_TEXT.indexOf(term) != -1); assertTrue(freq > 0); } Fields results = reader.getTermVectors(0); assertTrue(results != null); assertEquals("We do not have 3 term freq vectors", 3, results.size()); } public void testOutOfBoundsAccess() throws IOException { int numDocs = reader.maxDoc(); try { reader.document(-1); fail(); } catch (IndexOutOfBoundsException expected) {} try { reader.getTermVectors(-1); fail(); } catch (IndexOutOfBoundsException expected) {} try { reader.document(numDocs); fail(); } catch (IndexOutOfBoundsException expected) {} try { reader.getTermVectors(numDocs); fail(); } catch (IndexOutOfBoundsException expected) {} } }
PATRIC3/p3_solr
lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
Java
apache-2.0
7,972
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kuberuntime import ( "errors" "fmt" "io" "os" "time" "github.com/coreos/go-semver/semver" "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/credentialprovider" internalApi "k8s.io/kubernetes/pkg/kubelet/api" runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/network" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/cache" "k8s.io/kubernetes/pkg/kubelet/util/format" kubetypes "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/flowcontrol" utilruntime "k8s.io/kubernetes/pkg/util/runtime" ) const ( // The api version of kubelet runtime api kubeRuntimeAPIVersion = "0.1.0" // The root directory for pod logs podLogsRootDirectory = "/var/log/pods" // A minimal shutdown window for avoiding unnecessary SIGKILLs minimumGracePeriodInSeconds = 2 // The expiration time of version cache. versionCacheTTL = 60 * time.Second ) var ( // ErrVersionNotSupported is returned when the api version of runtime interface is not supported ErrVersionNotSupported = errors.New("Runtime api version is not supported") ) // A subset of the pod.Manager interface extracted for garbage collection purposes. type podGetter interface { GetPodByUID(kubetypes.UID) (*api.Pod, bool) } type kubeGenericRuntimeManager struct { runtimeName string recorder record.EventRecorder osInterface kubecontainer.OSInterface containerRefManager *kubecontainer.RefManager // machineInfo contains the machine information. machineInfo *cadvisorapi.MachineInfo // Container GC manager containerGC *containerGC // Keyring for pulling images keyring credentialprovider.DockerKeyring // Runner of lifecycle events. runner kubecontainer.HandlerRunner // RuntimeHelper that wraps kubelet to generate runtime container options. runtimeHelper kubecontainer.RuntimeHelper // Health check results. livenessManager proberesults.Manager // If true, enforce container cpu limits with CFS quota support cpuCFSQuota bool // Network plugin. networkPlugin network.NetworkPlugin // wrapped image puller. imagePuller images.ImageManager // gRPC service clients runtimeService internalApi.RuntimeService imageService internalApi.ImageManagerService // The version cache of runtime daemon. versionCache *cache.ObjectCache } // NewKubeGenericRuntimeManager creates a new kubeGenericRuntimeManager func NewKubeGenericRuntimeManager( recorder record.EventRecorder, livenessManager proberesults.Manager, containerRefManager *kubecontainer.RefManager, machineInfo *cadvisorapi.MachineInfo, podGetter podGetter, osInterface kubecontainer.OSInterface, networkPlugin network.NetworkPlugin, runtimeHelper kubecontainer.RuntimeHelper, httpClient types.HttpGetter, imageBackOff *flowcontrol.Backoff, serializeImagePulls bool, imagePullQPS float32, imagePullBurst int, cpuCFSQuota bool, runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService, ) (kubecontainer.Runtime, error) { kubeRuntimeManager := &kubeGenericRuntimeManager{ recorder: recorder, cpuCFSQuota: cpuCFSQuota, livenessManager: livenessManager, containerRefManager: containerRefManager, machineInfo: machineInfo, osInterface: osInterface, networkPlugin: networkPlugin, runtimeHelper: runtimeHelper, runtimeService: runtimeService, imageService: imageService, keyring: credentialprovider.NewDockerKeyring(), } typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { glog.Errorf("Get runtime version failed: %v", err) return nil, err } // Only matching kubeRuntimeAPIVersion is supported now // TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642 if typedVersion.GetVersion() != kubeRuntimeAPIVersion { glog.Errorf("Runtime api version %s is not supported, only %s is supported now", typedVersion.GetVersion(), kubeRuntimeAPIVersion) return nil, ErrVersionNotSupported } kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName() glog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s", typedVersion.GetRuntimeName(), typedVersion.GetRuntimeVersion(), typedVersion.GetRuntimeApiVersion()) // If the container logs directory does not exist, create it. // TODO: create podLogsRootDirectory at kubelet.go when kubelet is refactored to // new runtime interface if _, err := osInterface.Stat(podLogsRootDirectory); os.IsNotExist(err) { if err := osInterface.MkdirAll(podLogsRootDirectory, 0755); err != nil { glog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err) } } kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), kubeRuntimeManager, imageBackOff, serializeImagePulls, imagePullQPS, imagePullBurst) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager) kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podGetter, kubeRuntimeManager) kubeRuntimeManager.versionCache = cache.NewObjectCache( func() (interface{}, error) { return kubeRuntimeManager.getTypedVersion() }, versionCacheTTL, ) return kubeRuntimeManager, nil } // Type returns the type of the container runtime. func (m *kubeGenericRuntimeManager) Type() string { return m.runtimeName } // runtimeVersion implements kubecontainer.Version interface by implementing // Compare() and String() type runtimeVersion struct { *semver.Version } func newRuntimeVersion(version string) (runtimeVersion, error) { sem, err := semver.NewVersion(version) if err != nil { return runtimeVersion{}, err } return runtimeVersion{sem}, nil } func (r runtimeVersion) Compare(other string) (int, error) { v, err := semver.NewVersion(other) if err != nil { return -1, err } if r.LessThan(*v) { return -1, nil } if v.LessThan(*r.Version) { return 1, nil } return 0, nil } func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeApi.VersionResponse, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { glog.Errorf("Get remote runtime typed version failed: %v", err) return nil, err } return typedVersion, nil } // Version returns the version information of the container runtime. func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { glog.Errorf("Get remote runtime version failed: %v", err) return nil, err } return newRuntimeVersion(typedVersion.GetVersion()) } // APIVersion returns the cached API version information of the container // runtime. Implementation is expected to update this cache periodically. // This may be different from the runtime engine's version. func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error) { versionObject, err := m.versionCache.Get(m.machineInfo.MachineID) if err != nil { return nil, err } typedVersion := versionObject.(*runtimeApi.VersionResponse) return newRuntimeVersion(typedVersion.GetRuntimeApiVersion()) } // Status returns error if the runtime is unhealthy; nil otherwise. func (m *kubeGenericRuntimeManager) Status() error { _, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { glog.Errorf("Checkout remote runtime status failed: %v", err) return err } return nil } // GetPods returns a list of containers grouped by pods. The boolean parameter // specifies whether the runtime returns all containers including those already // exited and dead containers (used for garbage collection). func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { pods := make(map[kubetypes.UID]*kubecontainer.Pod) sandboxes, err := m.getKubeletSandboxes(all) if err != nil { return nil, err } for i := range sandboxes { s := sandboxes[i] if s.Metadata == nil { glog.V(4).Infof("Sandbox does not have metadata: %+v", s) continue } podUID := kubetypes.UID(s.Metadata.GetUid()) if _, ok := pods[podUID]; !ok { pods[podUID] = &kubecontainer.Pod{ ID: podUID, Name: s.Metadata.GetName(), Namespace: s.Metadata.GetNamespace(), } } p := pods[podUID] converted, err := m.sandboxToKubeContainer(s) if err != nil { glog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err) continue } p.Sandboxes = append(p.Sandboxes, converted) } containers, err := m.getKubeletContainers(all) if err != nil { return nil, err } for i := range containers { c := containers[i] if c.Metadata == nil { glog.V(4).Infof("Container does not have metadata: %+v", c) continue } labelledInfo := getContainerInfoFromLabels(c.Labels) pod, found := pods[labelledInfo.PodUID] if !found { pod = &kubecontainer.Pod{ ID: labelledInfo.PodUID, Name: labelledInfo.PodName, Namespace: labelledInfo.PodNamespace, } pods[labelledInfo.PodUID] = pod } converted, err := m.toKubeContainer(c) if err != nil { glog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err) continue } pod.Containers = append(pod.Containers, converted) } // Convert map to list. var result []*kubecontainer.Pod for _, pod := range pods { result = append(result, pod) } return result, nil } // containerToKillInfo contains neccessary information to kill a container. type containerToKillInfo struct { // The spec of the container. container *api.Container // The name of the container. name string // The message indicates why the container will be killed. message string } // podContainerSpecChanges keeps information on changes that need to happen for a pod. type podContainerSpecChanges struct { // Whether need to create a new sandbox. CreateSandbox bool // The id of existing sandbox. It is used for starting containers in ContainersToStart. SandboxID string // The attempt number of creating sandboxes for the pod. Attempt uint32 // ContainersToStart keeps a map of containers that need to be started, note that // the key is index of the container inside pod.Spec.Containers, while // the value is a message indicates why the container needs to start. ContainersToStart map[int]string // ContainersToKeep keeps a map of containers that need to be kept as is, note that // the key is the container ID of the container, while // the value is index of the container inside pod.Spec.Containers. ContainersToKeep map[kubecontainer.ContainerID]int // ContainersToKill keeps a map of containers that need to be killed, note that // the key is the container ID of the container, while // the value contains neccessary information to kill a container. ContainersToKill map[kubecontainer.ContainerID]containerToKillInfo // InitFailed indicates whether init containers are failed. InitFailed bool // InitContainersToKeep keeps a map of init containers that need to be kept as // is, note that the key is the container ID of the container, while // the value is index of the container inside pod.Spec.InitContainers. InitContainersToKeep map[kubecontainer.ContainerID]int } // podSandboxChanged checks whether the spec of the pod is changed and returns // (changed, new attempt, original sandboxID if exist). func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *api.Pod, podStatus *kubecontainer.PodStatus) (changed bool, attempt uint32, sandboxID string) { if len(podStatus.SandboxStatuses) == 0 { glog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, 0, "" } readySandboxCount := 0 for _, s := range podStatus.SandboxStatuses { if s.GetState() == runtimeApi.PodSandBoxState_READY { readySandboxCount++ } } // Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one. sandboxStatus := podStatus.SandboxStatuses[0] if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeApi.PodSandBoxState_READY { glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.GetAttempt() + 1, sandboxStatus.GetId() } // Needs to create a new sandbox when network namespace changed. if sandboxStatus.Linux != nil && sandboxStatus.Linux.Namespaces.Options != nil && sandboxStatus.Linux.Namespaces.Options.GetHostNetwork() != kubecontainer.IsHostNetworkPod(pod) { glog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.GetAttempt() + 1, "" } return false, sandboxStatus.Metadata.GetAttempt(), sandboxStatus.GetId() } // checkAndKeepInitContainers keeps all successfully completed init containers. If there // are failing containers, only keep the first failing one. func checkAndKeepInitContainers(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) bool { initFailed := false for i, container := range pod.Spec.InitContainers { containerStatus := podStatus.FindContainerStatusByName(container.Name) if containerStatus == nil { continue } if containerStatus.State == kubecontainer.ContainerStateRunning { initContainersToKeep[containerStatus.ID] = i continue } if containerStatus.State == kubecontainer.ContainerStateExited { initContainersToKeep[containerStatus.ID] = i } if isContainerFailed(containerStatus) { initFailed = true break } } return initFailed } // computePodContainerChanges checks whether the pod spec has changed and returns the changes if true. func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges { glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus) changes := podContainerSpecChanges{ CreateSandbox: sandboxChanged, SandboxID: sandboxID, Attempt: attempt, ContainersToStart: make(map[int]string), ContainersToKeep: make(map[kubecontainer.ContainerID]int), InitContainersToKeep: make(map[kubecontainer.ContainerID]int), ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo), } // check the status of init containers. initFailed := false // always reset the init containers if the sandbox is changed. if !sandboxChanged { // Keep all successfully completed containers. If there are failing containers, // only keep the first failing one. initFailed = checkAndKeepInitContainers(pod, podStatus, changes.InitContainersToKeep) } changes.InitFailed = initFailed // check the status of containers. for index, container := range pod.Spec.Containers { containerStatus := podStatus.FindContainerStatusByName(container.Name) if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) glog.Info(message) changes.ContainersToStart[index] = message } continue } if sandboxChanged { if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container) glog.Info(message) changes.ContainersToStart[index] = message } continue } if initFailed { // Initialization failed and Container exists. // If we have an initialization failure everything will be killed anyway. // If RestartPolicy is Always or OnFailure we restart containers that were running before. if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name) glog.V(1).Info(message) changes.ContainersToStart[index] = message } continue } expectedHash := kubecontainer.HashContainer(&container) containerChanged := containerStatus.Hash != expectedHash if containerChanged { message := fmt.Sprintf("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", pod.Name, container.Name, containerStatus.Hash, expectedHash) glog.Info(message) changes.ContainersToStart[index] = message continue } liveness, found := m.livenessManager.Get(containerStatus.ID) if !found || liveness == proberesults.Success { changes.ContainersToKeep[containerStatus.ID] = index continue } if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) glog.Info(message) changes.ContainersToStart[index] = message } } // Don't keep init containers if they are the only containers to keep. if !sandboxChanged && len(changes.ContainersToStart) == 0 && len(changes.ContainersToKeep) == 0 { changes.InitContainersToKeep = make(map[kubecontainer.ContainerID]int) } // compute containers to be killed runningContainerStatuses := podStatus.GetRunningContainerStatuses() for _, containerStatus := range runningContainerStatuses { _, keep := changes.ContainersToKeep[containerStatus.ID] _, keepInit := changes.InitContainersToKeep[containerStatus.ID] if !keep && !keepInit { var podContainer *api.Container var killMessage string for i, c := range pod.Spec.Containers { if c.Name == containerStatus.Name { podContainer = &pod.Spec.Containers[i] killMessage = changes.ContainersToStart[i] break } } changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{ name: containerStatus.Name, container: podContainer, message: killMessage, } } } return changes } // SyncPod syncs the running pod into the desired pod by executing following steps: // // 1. Compute sandbox and container changes. // 2. Kill pod sandbox if necessary. // 3. Kill any containers that should not be running. // 4. Create sandbox if necessary. // 5. Create init containers. // 6. Create normal containers. func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { // Step 1: Compute sandbox and container changes. podContainerChanges := m.computePodContainerChanges(pod, podStatus) glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod)) if podContainerChanges.CreateSandbox { ref, err := api.GetReference(pod) if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } if podContainerChanges.SandboxID != "" { m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.") } else { m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.") } } // Step 2: Kill the pod if the sandbox has changed. if podContainerChanges.CreateSandbox || (len(podContainerChanges.ContainersToKeep) == 0 && len(podContainerChanges.ContainersToStart) == 0) { if len(podContainerChanges.ContainersToKeep) == 0 && len(podContainerChanges.ContainersToStart) == 0 { glog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod)) } else { glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod)) } killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) result.AddPodSyncResult(killResult) if killResult.Error() != nil { glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error()) return } } else { // Step 3: kill any running containers in this pod which are not to keep. for containerID, containerInfo := range podContainerChanges.ContainersToKill { glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod)) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name) result.AddSyncResult(killContainerResult) if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil { killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) glog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err) return } } } // Keep terminated init containers fairly aggressively controlled m.pruneInitContainersBeforeStart(pod, podStatus, podContainerChanges.InitContainersToKeep) // We pass the value of the podIP down to generatePodSandboxConfig and // generateContainerConfig, which in turn passes it to various other // functions, in order to facilitate functionality that requires this // value (hosts file and downward API) and avoid races determining // the pod IP in cases where a container requires restart but the // podIP isn't in the status manager yet. // // We default to the IP in the passed-in pod status, and overwrite it if the // sandbox needs to be (re)started. podIP := "" if podStatus != nil { podIP = podStatus.IP } // Step 4: Create a sandbox for the pod if necessary. podSandboxID := podContainerChanges.SandboxID if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 { var msg string var err error glog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod)) createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod)) result.AddSyncResult(createSandboxResult) podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt) if err != nil { createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg) glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) return } setupNetworkResult := kubecontainer.NewSyncResult(kubecontainer.SetupNetwork, podSandboxID) result.AddSyncResult(setupNetworkResult) if !kubecontainer.IsHostNetworkPod(pod) { glog.V(3).Infof("Calling network plugin %s to setup pod for %s", m.networkPlugin.Name(), format.Pod(pod)) // Setup pod network plugin with sandbox id // TODO: rename the last param to sandboxID err = m.networkPlugin.SetUpPod(pod.Namespace, pod.Name, kubecontainer.ContainerID{ Type: m.runtimeName, ID: podSandboxID, }) if err != nil { message := fmt.Sprintf("Failed to setup network for pod %q using network plugins %q: %v", format.Pod(pod), m.networkPlugin.Name(), err) setupNetworkResult.Fail(kubecontainer.ErrSetupNetwork, message) glog.Error(message) killPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.KillPodSandbox, format.Pod(pod)) result.AddSyncResult(killPodSandboxResult) if err := m.runtimeService.StopPodSandbox(podSandboxID); err != nil { killPodSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) glog.Errorf("Kill sandbox %q failed for pod %q: %v", podSandboxID, format.Pod(pod), err) } return } podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) result.Fail(err) return } // Overwrite the podIP passed in the pod status, since we just started the infra container. podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus) glog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod)) } } // Get podSandboxConfig for containers to start. configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID) result.AddSyncResult(configPodSandboxResult) podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) glog.Error(message) configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message) return } // Step 5: start init containers. status, next, done := findNextInitContainerToRun(pod, podStatus) if status != nil && status.ExitCode != 0 { // container initialization has failed, flag the pod as failed initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name) initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode)) result.AddSyncResult(initContainerResult) if pod.Spec.RestartPolicy == api.RestartPolicyNever { utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status)) return } utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %#v", format.Pod(pod), status.Name, status)) } if next != nil { if len(podContainerChanges.ContainersToStart) == 0 { glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod)) return } // If we need to start the next container, do so now then exit container := next startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) result.AddSyncResult(startContainerResult) isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff) if isInBackOff { startContainerResult.Fail(err, msg) glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) return } glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg)) return } // Successfully started the container; clear the entry in the failure glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) return } if !done { // init container still running glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod)) return } if podContainerChanges.InitFailed { glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod)) return } // Step 6: start containers in podContainerChanges.ContainersToStart. for idx := range podContainerChanges.ContainersToStart { container := &pod.Spec.Containers[idx] startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) result.AddSyncResult(startContainerResult) isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff) if isInBackOff { startContainerResult.Fail(err, msg) glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) continue } glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) continue } } return } // If a container is still in backoff, the function will return a brief backoff error and // a detailed error message. func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) { var cStatus *kubecontainer.ContainerStatus for _, c := range podStatus.ContainerStatuses { if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited { cStatus = c break } } if cStatus == nil { return false, "", nil } glog.Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod)) // Use the finished time of the latest exited container as the start point to calculate whether to do back-off. ts := cStatus.FinishedAt // backOff requires a unique key to identify the container. key := getStableKey(pod, container) if backOff.IsInBackOffSince(key, ts) { if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { m.recorder.Eventf(ref, api.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container") } err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod)) glog.Infof("%s", err.Error()) return true, err.Error(), kubecontainer.ErrCrashLoopBackOff } backOff.Next(key, ts) return false, "", nil } // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. func (m *kubeGenericRuntimeManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) return err.Error() } // killPodWithSyncResult kills a runningPod and returns SyncResult. // Note: The pod passed in could be *nil* when kubelet restarted. func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride) for _, containerResult := range killContainerResults { result.AddSyncResult(containerResult) } // Teardown network plugin if len(runningPod.Sandboxes) == 0 { glog.V(4).Infof("Can not find pod sandbox by UID %q, assuming already removed.", runningPod.ID) return } sandboxID := runningPod.Sandboxes[0].ID.ID isHostNetwork, err := m.isHostNetwork(sandboxID, pod) if err != nil { result.Fail(err) return } if !isHostNetwork { teardownNetworkResult := kubecontainer.NewSyncResult(kubecontainer.TeardownNetwork, runningPod.ID) result.AddSyncResult(teardownNetworkResult) // Tear down network plugin with sandbox id if err := m.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubecontainer.ContainerID{ Type: m.runtimeName, ID: sandboxID, }); err != nil { message := fmt.Sprintf("Failed to teardown network for pod %s_%s(%s) using network plugins %q: %v", runningPod.Name, runningPod.Namespace, runningPod.ID, m.networkPlugin.Name(), err) teardownNetworkResult.Fail(kubecontainer.ErrTeardownNetwork, message) glog.Error(message) } } // stop sandbox, the sandbox will be removed in GarbageCollect killSandboxResult := kubecontainer.NewSyncResult(kubecontainer.KillPodSandbox, runningPod.ID) result.AddSyncResult(killSandboxResult) // Stop all sandboxes belongs to same pod for _, podSandbox := range runningPod.Sandboxes { if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil { killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) glog.Errorf("Failed to stop sandbox %q", podSandbox.ID) } } return } // isHostNetwork checks whether the pod is running in host-network mode. func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *api.Pod) (bool, error) { if pod != nil { return kubecontainer.IsHostNetworkPod(pod), nil } podStatus, err := m.runtimeService.PodSandboxStatus(podSandBoxID) if err != nil { return false, err } if podStatus.Linux != nil && podStatus.Linux.Namespaces != nil && podStatus.Linux.Namespaces.Options != nil { if podStatus.Linux.Namespaces.Options.HostNetwork != nil { return podStatus.Linux.Namespaces.Options.GetHostNetwork(), nil } } return false, nil } // GetPodStatus retrieves the status of the pod, including the // information of all containers in the pod that are visble in Runtime. func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { // Now we retain restart count of container as a container label. Each time a container // restarts, pod will read the restart count from the registered dead container, increment // it to get the new restart count, and then add a label with the new restart count on // the newly started container. // However, there are some limitations of this method: // 1. When all dead containers were garbage collected, the container status could // not get the historical value and would be *inaccurate*. Fortunately, the chance // is really slim. // 2. When working with old version containers which have no restart count label, // we can only assume their restart count is 0. // Anyhow, we only promised "best-effort" restart count reporting, we can just ignore // these limitations now. // TODO: move this comment to SyncPod. podSandboxIDs, err := m.getSandboxIDByPodUID(string(uid), nil) if err != nil { return nil, err } podFullName := format.Pod(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: namespace, UID: uid, }, }) glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName) sandboxStatuses := make([]*runtimeApi.PodSandboxStatus, len(podSandboxIDs)) podIP := "" for idx, podSandboxID := range podSandboxIDs { podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { glog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err) return nil, err } sandboxStatuses[idx] = podSandboxStatus // Only get pod IP from latest sandbox if idx == 0 && podSandboxStatus.GetState() == runtimeApi.PodSandBoxState_READY { podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus) } } // Get statuses of all containers visible in the pod. containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace) if err != nil { glog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) return nil, err } return &kubecontainer.PodStatus{ ID: uid, Name: name, Namespace: namespace, IP: podIP, SandboxStatuses: sandboxStatuses, ContainerStatuses: containerStatuses, }, nil } // Returns the filesystem path of the pod's network namespace; if the // runtime does not handle namespace creation itself, or cannot return // the network namespace path, it returns an 'not supported' error. // TODO: Rename param name to sandboxID in kubecontainer.Runtime.GetNetNS(). // TODO: Remove GetNetNS after networking is delegated to the container runtime. func (m *kubeGenericRuntimeManager) GetNetNS(sandboxID kubecontainer.ContainerID) (string, error) { filter := &runtimeApi.PodSandboxFilter{ Id: &sandboxID.ID, LabelSelector: map[string]string{kubernetesManagedLabel: "true"}, } sandboxes, err := m.runtimeService.ListPodSandbox(filter) if err != nil { glog.Errorf("ListPodSandbox with filter %q failed: %v", filter, err) return "", err } if len(sandboxes) == 0 { glog.Errorf("No sandbox is found with filter %q", filter) return "", fmt.Errorf("Sandbox %q is not found", sandboxID) } sandboxStatus, err := m.runtimeService.PodSandboxStatus(sandboxes[0].GetId()) if err != nil { glog.Errorf("PodSandboxStatus with id %q failed: %v", sandboxes[0].GetId(), err) return "", err } if sandboxStatus.Linux != nil && sandboxStatus.Linux.Namespaces != nil { return sandboxStatus.Linux.Namespaces.GetNetwork(), nil } return "", fmt.Errorf("not supported") } // GarbageCollect removes dead containers using the specified container gc policy. func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error { return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady) } // GetPodContainerID gets pod sandbox ID func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (kubecontainer.ContainerID, error) { formattedPod := kubecontainer.FormatPod(pod) if len(pod.Sandboxes) == 0 { glog.Errorf("No sandboxes are found for pod %q", formattedPod) return kubecontainer.ContainerID{}, fmt.Errorf("sandboxes for pod %q not found", formattedPod) } // return sandboxID of the first sandbox since it is the latest one return pod.Sandboxes[0].ID, nil } // Forward the specified port from the specified pod to the stream. func (m *kubeGenericRuntimeManager) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { formattedPod := kubecontainer.FormatPod(pod) if len(pod.Sandboxes) == 0 { glog.Errorf("No sandboxes are found for pod %q", formattedPod) return fmt.Errorf("sandbox for pod %q not found", formattedPod) } // Use docker portforward directly for in-process docker integration // now to unblock other tests. // TODO: remove this hack after portforward is defined in CRI. if ds, ok := m.runtimeService.(dockershim.DockerLegacyService); ok { return ds.LegacyPortForward(pod.Sandboxes[0].ID.ID, port, stream) } return fmt.Errorf("not implemented") }
kshafiee/kubernetes
pkg/kubelet/kuberuntime/kuberuntime_manager.go
GO
apache-2.0
38,780
/* $NetBSD: limits.h,v 1.25 2014/04/21 10:53:47 matt Exp $ */ /* * Copyright (c) 1988 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)limits.h 7.2 (Berkeley) 6/28/90 */ #ifndef _I386_LIMITS_H_ #define _I386_LIMITS_H_ #include <sys/featuretest.h> #define CHAR_BIT 8 /* number of bits in a char */ #define UCHAR_MAX 0xff /* max value for an unsigned char */ #define SCHAR_MAX 0x7f /* max value for a signed char */ #define SCHAR_MIN (-0x7f-1) /* min value for a signed char */ #define USHRT_MAX 0xffff /* max value for an unsigned short */ #define SHRT_MAX 0x7fff /* max value for a short */ #define SHRT_MIN (-0x7fff-1) /* min value for a short */ #define UINT_MAX 0xffffffffU /* max value for an unsigned int */ #define INT_MAX 0x7fffffff /* max value for an int */ #define INT_MIN (-0x7fffffff-1) /* min value for an int */ #define ULONG_MAX 0xffffffffUL /* max value for an unsigned long */ #define LONG_MAX 0x7fffffffL /* max value for a long */ #define LONG_MIN (-0x7fffffffL-1) /* min value for a long */ #if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) || \ defined(_NETBSD_SOURCE) #define SSIZE_MAX INT_MAX /* max value for a ssize_t */ #if defined(_ISOC99_SOURCE) || (__STDC_VERSION__ - 0) >= 199901L || \ defined(_NETBSD_SOURCE) #define ULLONG_MAX 0xffffffffffffffffULL /* max unsigned long long */ #define LLONG_MAX 0x7fffffffffffffffLL /* max signed long long */ #define LLONG_MIN (-0x7fffffffffffffffLL-1) /* min signed long long */ #endif #if defined(_NETBSD_SOURCE) #define SSIZE_MIN INT_MIN /* min value for a ssize_t */ #define SIZE_T_MAX UINT_MAX /* max value for a size_t */ #define UQUAD_MAX 0xffffffffffffffffULL /* max unsigned quad */ #define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */ #define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */ #endif /* _NETBSD_SOURCE */ #endif /* _POSIX_C_SOURCE || _XOPEN_SOURCE || _NETBSD_SOURCE */ #if defined(_XOPEN_SOURCE) || defined(_NETBSD_SOURCE) #define LONG_BIT 32 #define WORD_BIT 32 #define DBL_DIG __DBL_DIG__ #define DBL_MAX __DBL_MAX__ #define DBL_MIN __DBL_MIN__ #define FLT_DIG __FLT_DIG__ #define FLT_MAX __FLT_MAX__ #define FLT_MIN __FLT_MIN__ #endif #endif /* _I386_LIMITS_H_ */
execunix/vinos
sys/arch/i386/include/limits.h
C
apache-2.0
3,753
# gRPC Release Schedule Below is the release schedule for gRPC [Java](https://github.com/grpc/grpc-java/releases), [Go](https://github.com/grpc/grpc-go/releases) and [Core](https://github.com/grpc/grpc/releases) and its dependent languages C++, C#, Objective-C, PHP, Python and Ruby. Releases are scheduled every six weeks on Tuesdays on a best effort basis. In some unavoidable situations a release may be delayed or a language may skip a release altogether and do the next release to catch up with other languages. See the past releases in the links above. A six-week cycle gives us a good balance between delivering new features/fixes quickly and keeping the release overhead low. Releases are cut from release branches. For Core and Java repos, the release branch is cut two weeks before the scheduled release date. For Go, the branch is cut just before the release. An RC (release candidate) is published for Core and its dependent languages just after the branch cut. This RC is later promoted to release version if no further changes are made to the release branch. We do our best to keep head of master branch stable at all times regardless of release schedule. Daily build packages from master branch for C#, PHP, Python, Ruby and Protoc plugins are published on [packages.grpc.io](https://packages.grpc.io/). If you depend on gRPC in production we recommend to set up your CI system to test the RCs and, if possible, the daily builds. Names of gRPC releases are [here](https://github.com/grpc/grpc/blob/master/doc/g_stands_for.md). Release |Scheduled Branch Cut|Scheduled Release Date --------|--------------------|------------- v1.17.0 |Nov 19, 2018 |Dec 4, 2018 v1.18.0 |Jan 2, 2019 |Jan 15, 2019 v1.19.0 |Feb 12, 2019 |Feb 26, 2019 v1.20.0 |Mar 26, 2019 |Apr 9, 2019 v1.21.0 |May 7, 2019 |May 21, 2019 v1.22.0 |Jun 18, 2019 |Jul 2, 2019 v1.23.0 |Jul 30, 2019 |Aug 13, 2019 v1.24.0 |Sept 10, 2019 |Sept 24, 2019 v1.25.0 |Oct 22, 2019 |Nov 5, 2019 v1.26.0 |Dec 3, 2019 |Dec 17, 2019 v1.27.0 |Jan 14, 2020 |Jan 28, 2020 v1.28.0 |Feb 25, 2020 |Mar 10, 2020 v1.29.0 |Apr 7, 2020 |Apr 21, 2020 v1.30.0 |May 19, 2020 |Jun 2, 2020 v1.31.0 |Jun 30, 2020 |Jul 14, 2020 v1.32.0 |Aug 11, 2020 |Aug 25, 2020 v1.33.0 |Sept 22, 2020 |Oct 6, 2020 v1.34.0 |Nov 3, 2020 |Nov 17, 2020 v1.35.0 |Dec 15, 2020 |Dec 29, 2020 v1.36.0 |Jan 26, 2021 |Feb 9, 2021
firebase/grpc-SwiftPM
doc/grpc_release_schedule.md
Markdown
apache-2.0
2,376
/* * ==================================================================== * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation. For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. * */ package tech.gusavila92.apache.http.entity; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; import java.nio.charset.UnsupportedCharsetException; import tech.gusavila92.apache.http.protocol.HTTP; import tech.gusavila92.apache.http.util.Args; /** * A self contained, repeatable entity that obtains its content from * a {@link String}. * * @since 4.0 */ public class StringEntity extends AbstractHttpEntity implements Cloneable { protected final byte[] content; /** * Creates a StringEntity with the specified content and content type. * * @param string content to be used. Not {@code null}. * @param contentType content type to be used. May be {@code null}, in which case the default * MIME type {@link ContentType#TEXT_PLAIN} is assumed. * * @throws IllegalArgumentException if the string parameter is null * @throws UnsupportedCharsetException Thrown when the named charset is not available in * this instance of the Java virtual machine * @since 4.2 */ public StringEntity(final String string, final ContentType contentType) throws UnsupportedCharsetException { super(); Args.notNull(string, "Source string"); Charset charset = contentType != null ? contentType.getCharset() : null; if (charset == null) { charset = HTTP.DEF_CONTENT_CHARSET; } this.content = string.getBytes(charset); if (contentType != null) { setContentType(contentType.toString()); } } /** * Creates a StringEntity with the specified content, MIME type and charset * * @param string content to be used. Not {@code null}. * @param mimeType MIME type to be used. May be {@code null}, in which case the default * is {@link HTTP#PLAIN_TEXT_TYPE} i.e. "text/plain" * @param charset character set to be used. May be {@code null}, in which case the default * is {@link HTTP#DEF_CONTENT_CHARSET} i.e. "ISO-8859-1" * @throws UnsupportedEncodingException If the named charset is not supported. * * @since 4.1 * @throws IllegalArgumentException if the string parameter is null * * @deprecated (4.1.3) use {@link #StringEntity(String, ContentType)} */ @Deprecated public StringEntity( final String string, final String mimeType, final String charset) throws UnsupportedEncodingException { super(); Args.notNull(string, "Source string"); final String mt = mimeType != null ? mimeType : HTTP.PLAIN_TEXT_TYPE; final String cs = charset != null ? charset :HTTP.DEFAULT_CONTENT_CHARSET; this.content = string.getBytes(cs); setContentType(mt + HTTP.CHARSET_PARAM + cs); } /** * Creates a StringEntity with the specified content and charset. The MIME type defaults * to "text/plain". * * @param string content to be used. Not {@code null}. * @param charset character set to be used. May be {@code null}, in which case the default * is {@link HTTP#DEF_CONTENT_CHARSET} is assumed * * @throws IllegalArgumentException if the string parameter is null * @throws UnsupportedCharsetException Thrown when the named charset is not available in * this instance of the Java virtual machine */ public StringEntity(final String string, final String charset) throws UnsupportedCharsetException { this(string, ContentType.create(ContentType.TEXT_PLAIN.getMimeType(), charset)); } /** * Creates a StringEntity with the specified content and charset. The MIME type defaults * to "text/plain". * * @param string content to be used. Not {@code null}. * @param charset character set to be used. May be {@code null}, in which case the default * is {@link HTTP#DEF_CONTENT_CHARSET} is assumed * * @throws IllegalArgumentException if the string parameter is null * * @since 4.2 */ public StringEntity(final String string, final Charset charset) { this(string, ContentType.create(ContentType.TEXT_PLAIN.getMimeType(), charset)); } /** * Creates a StringEntity with the specified content. The content type defaults to * {@link ContentType#TEXT_PLAIN}. * * @param string content to be used. Not {@code null}. * * @throws IllegalArgumentException if the string parameter is null * @throws UnsupportedEncodingException if the default HTTP charset is not supported. */ public StringEntity(final String string) throws UnsupportedEncodingException { this(string, ContentType.DEFAULT_TEXT); } @Override public boolean isRepeatable() { return true; } @Override public long getContentLength() { return this.content.length; } @Override public InputStream getContent() throws IOException { return new ByteArrayInputStream(this.content); } @Override public void writeTo(final OutputStream outstream) throws IOException { Args.notNull(outstream, "Output stream"); outstream.write(this.content); outstream.flush(); } /** * Tells that this entity is not streaming. * * @return {@code false} */ @Override public boolean isStreaming() { return false; } @Override public Object clone() throws CloneNotSupportedException { return super.clone(); } } // class StringEntity
gusavila92/java-android-websocket-client
src/main/java/tech/gusavila92/apache/http/entity/StringEntity.java
Java
apache-2.0
6,848
// Copyright 2015 ACENSI http://www.acensi.fr/ // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using System.Collections.Generic; using VarProcess.Data; namespace VarProcess.Providers { public interface IProductParametersProvider { IEnumerable<ProductParameters> ProductsParameters { get; } } }
julien-lebot/techdays-2015
VarProcess.Providers/IProductParametersProvider.cs
C#
apache-2.0
830
<html dir="LTR"> <head> <meta http-equiv="Content-Type" content="text/html; charset=Windows-1252" /> <meta name="vs_targetSchema" content="http://schemas.microsoft.com/intellisense/ie5" /> <title>LockVerifyServer Class</title> <xml> </xml> <link rel="stylesheet" type="text/css" href="MSDN.css" /> </head> <body id="bodyID" class="dtBODY"> <div id="nsbanner"> <div id="bannerrow1"> <table class="bannerparthead" cellspacing="0"> <tr id="hdr"> <td class="runninghead">Apache Lucene.Net 2.4.0 Class Library API</td> <td class="product"> </td> </tr> </table> </div> <div id="TitleRow"> <h1 class="dtH1">LockVerifyServer Class</h1> </div> </div> <div id="nstext"> <p> Simple standalone server that must be running when you use {@link VerifyingLockFactory}. This server simply verifies at most one process holds the lock at a time. Run without any args to see usage. </p> <p>For a list of all members of this type, see <a href="Lucene.Net.Store.LockVerifyServerMembers.html">LockVerifyServer Members</a>.</p> <p> <a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassTopic.htm">System.Object</a> <br />   <b>Lucene.Net.Store.LockVerifyServer</b></p> <div class="syntax"> <div>public class LockVerifyServer</div> </div> <H4 class="dtH4">Thread Safety</H4> <P>Public static (<b>Shared</b> in Visual Basic) members of this type are safe for multithreaded operations. Instance members are <b>not</b> guaranteed to be thread-safe.</P> <h4 class="dtH4">Requirements</h4> <p> <b>Namespace: </b> <a href="Lucene.Net.Store.html">Lucene.Net.Store</a> </p> <p> <b>Assembly: </b>Lucene.Net (in Lucene.Net.dll) </p> <h4 class="dtH4">See Also</h4> <p> <a href="Lucene.Net.Store.LockVerifyServerMembers.html">LockVerifyServer Members</a> | <a href="Lucene.Net.Store.html">Lucene.Net.Store Namespace</a> | <a href="Lucene.Net.Store.VerifyingLockFactory.html"> </a> | <a href="Lucene.Net.Store.LockStressTest.html"> </a></p> <object type="application/x-oleobject" classid="clsid:1e2a7bd0-dab9-11d0-b93a-00c04fc99f9e" viewastext="true" style="display: none;"> <param name="Keyword" value="LockVerifyServer class, about LockVerifyServer class"> </param> </object> <hr /> <div id="footer"> <p> </p> <p>Generated from assembly Lucene.Net [2.4.0.2]</p> </div> </div> </body> </html>
Mpdreamz/lucene.net
doc/core/Lucene.Net.Store.LockVerifyServer.html
HTML
apache-2.0
2,733
<!doctype html> <html> <head> <title>md5 - Raspberry Pi</title> <link rel="stylesheet" href="assets/css/style.css"> <link rel="icon" href="assets/images/favicon.ico"> <meta name="viewport" content="width=device-width, user-scalable=no"> </head> <body> <div class="container"> <?php if (isset($_POST["p"])) { echo ' <div class="row"> <div class="col-50"> <div class="block"> <h2>Result</h2> ' . md5($_POST["p"]) . ' </div> </div> <div class="col-50"> <div class="block"> <h2>Create Hash</h2> <form method="post" action=""> <input type="password" class="fullwidth" name="p" placeholder="Text to hash"> </form> </div> </div> </div>'; } else { echo ' <div class="block"> <h2>Create Hash</h2> <form method="post" action=""> <input type="password" class="fullwidth" name="p" placeholder="Text to hash"> </form> </div>'; } ?> </div> </body> </html>
michaelneu/pidash
md5.php
PHP
apache-2.0
1,322
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.codehaus.groovy.tools; import java.io.File; import org.codehaus.groovy.control.CompilationFailedException; import org.codehaus.groovy.control.CompilationUnit; import org.codehaus.groovy.control.CompilerConfiguration; import org.codehaus.groovy.control.SourceUnit; /** * A convenience front end for getting standard compilations done. * All compile() routines generate classes to the filesystem. * * @author <a href="mailto:[email protected]">Chris Poirier</a> */ public class Compiler { // TODO: delete this constant? public static final Compiler DEFAULT = new Compiler(); private CompilerConfiguration configuration = null; // Optional configuration data /** * Initializes the Compiler with default configuration. */ public Compiler() { configuration = null; } /** * Initializes the Compiler with the specified configuration. */ public Compiler( CompilerConfiguration configuration ) { this.configuration = configuration; } /** * Compiles a single File. */ public void compile( File file ) throws CompilationFailedException { CompilationUnit unit = new CompilationUnit( configuration ); unit.addSource( file ); unit.compile(); } /** * Compiles a series of Files. */ public void compile( File[] files ) throws CompilationFailedException { CompilationUnit unit = new CompilationUnit( configuration ); unit.addSources( files ); unit.compile(); } /** * Compiles a series of Files from file names. */ public void compile( String[] files ) throws CompilationFailedException { CompilationUnit unit = new CompilationUnit( configuration ); unit.addSources( files ); unit.compile(); } /** * Compiles a string of code. */ public void compile( String name, String code ) throws CompilationFailedException { CompilationUnit unit = new CompilationUnit( configuration ); unit.addSource( new SourceUnit(name, code, configuration, unit.getClassLoader(), unit.getErrorCollector()) ); unit.compile(); } }
OpenBEL/bel-nav
tools/groovy/src/src/main/org/codehaus/groovy/tools/Compiler.java
Java
apache-2.0
3,108
package com.ftfl.icare.fragment; import java.util.ArrayList; import java.util.List; import android.content.res.Resources; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentManager; import android.support.v4.app.FragmentTransaction; import android.view.KeyEvent; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ImageButton; import android.widget.ListView; import com.ftfl.icare.HomeActivity; import com.ftfl.icare.R; import com.ftfl.icare.adapter.DoctorCustomAdapter; import com.ftfl.icare.database.DoctorProfileDataSource; import com.ftfl.icare.util.DoctorProfile; import com.ftfl.icare.util.ICareConstants; public class FragmentDoctor extends Fragment { ImageButton mibCreateDoctor = null; List<DoctorProfile> profileList = new ArrayList<DoctorProfile>(); List<String> doctorNameList = new ArrayList<String>(); List<String> doctorIdList = new ArrayList<String>(); ListView mListView = null; Bundle bundle = new Bundle(); public FragmentDoctor() { } public void setData() { DoctorProfileDataSource doctorDS = new DoctorProfileDataSource( getActivity()); profileList = doctorDS.doctorProfileList(); for (int i = 0; i < profileList.size(); i++) { doctorNameList.add(profileList.get(i).getName()); doctorIdList.add(profileList.get(i).getId()); } } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_layout_doctor, container, false); mibCreateDoctor = (ImageButton) view .findViewById(R.id.button_create_doctor); mibCreateDoctor.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { // ((HomeActivity)getActivity()).SelectItem(33); ((HomeActivity) getActivity()).SelectItem(31); /* * FragmentNewDoctor fragmentNewNote = new FragmentNewDoctor(); * * * FragmentManager fragmentManager = getFragmentManager(); * FragmentTransaction fragmentTransaction = * fragmentManager.beginTransaction(); * fragmentTransaction.replace(R.id.content_frame, * fragmentNewNote); fragmentTransaction.addToBackStack(null); * fragmentTransaction.commit(); */ } }); mListView = (ListView) view.findViewById(R.id.list_doctor); profileList.clear(); doctorNameList.clear(); setData(); Resources res = getResources(); DoctorCustomAdapter adapter = new DoctorCustomAdapter(getActivity(), profileList, res); mListView.setAdapter(adapter); mListView.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { ((HomeActivity) getActivity()).mUpdatePageName = ICareConstants.UPDATE_DOCTOR_PROFILE; ((HomeActivity) getActivity()).mId = doctorIdList.get(position); FragmentViewDoctorProfile fragmentViewDoctor = new FragmentViewDoctorProfile(); bundle.putString("doctorid", doctorIdList.get(position)); fragmentViewDoctor.setArguments(bundle); FragmentManager fragmentManager = getFragmentManager(); FragmentTransaction fragmentTransaction = fragmentManager .beginTransaction(); fragmentTransaction.replace(R.id.content_frame, fragmentViewDoctor); fragmentTransaction.addToBackStack(null); fragmentTransaction.commit(); } }); view.setFocusableInTouchMode(true); view.requestFocus(); view.setOnKeyListener(new View.OnKeyListener() { @Override public boolean onKey(View v, int keyCode, KeyEvent event) { if (keyCode == KeyEvent.KEYCODE_BACK) { ((HomeActivity) getActivity()).SelectItem(5); return true; } else { return false; } } }); return view; } }
nasser-munshi/Android
ICare/src/com/ftfl/icare/fragment/FragmentDoctor.java
Java
apache-2.0
3,878
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.checkpoint; import org.apache.curator.framework.CuratorFramework; import org.apache.flink.runtime.concurrent.Executors; import org.apache.flink.runtime.jobgraph.JobStatus; import org.apache.flink.runtime.state.RetrievableStateHandle; import org.apache.flink.runtime.zookeeper.RetrievableStateStorageHelper; import org.apache.flink.runtime.zookeeper.ZooKeeperTestEnvironment; import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; /** * Tests for basic {@link CompletedCheckpointStore} contract and ZooKeeper state handling. */ public class ZooKeeperCompletedCheckpointStoreITCase extends CompletedCheckpointStoreTest { private final static ZooKeeperTestEnvironment ZooKeeper = new ZooKeeperTestEnvironment(1); private final static String CheckpointsPath = "/checkpoints"; @AfterClass public static void tearDown() throws Exception { if (ZooKeeper != null) { ZooKeeper.shutdown(); } } @Before public void cleanUp() throws Exception { ZooKeeper.deleteAll(); } @Override protected AbstractCompletedCheckpointStore createCompletedCheckpoints( int maxNumberOfCheckpointsToRetain) throws Exception { return new ZooKeeperCompletedCheckpointStore(maxNumberOfCheckpointsToRetain, ZooKeeper.createClient(), CheckpointsPath, new RetrievableStateStorageHelper<CompletedCheckpoint>() { @Override public RetrievableStateHandle<CompletedCheckpoint> store(CompletedCheckpoint state) throws Exception { return new HeapRetrievableStateHandle<>(state); } }, Executors.directExecutor()); } // --------------------------------------------------------------------------------------------- /** * Tests that older checkpoints are not cleaned up right away when recovering. Only after * another checkpointed has been completed the old checkpoints exceeding the number of * checkpoints to retain will be removed. */ @Test public void testRecover() throws Exception { AbstractCompletedCheckpointStore checkpoints = createCompletedCheckpoints(3); TestCompletedCheckpoint[] expected = new TestCompletedCheckpoint[] { createCheckpoint(0), createCheckpoint(1), createCheckpoint(2) }; // Add multiple checkpoints checkpoints.addCheckpoint(expected[0]); checkpoints.addCheckpoint(expected[1]); checkpoints.addCheckpoint(expected[2]); verifyCheckpointRegistered(expected[0].getTaskStates().values(), checkpoints.sharedStateRegistry); verifyCheckpointRegistered(expected[1].getTaskStates().values(), checkpoints.sharedStateRegistry); verifyCheckpointRegistered(expected[2].getTaskStates().values(), checkpoints.sharedStateRegistry); // All three should be in ZK assertEquals(3, ZooKeeper.getClient().getChildren().forPath(CheckpointsPath).size()); assertEquals(3, checkpoints.getNumberOfRetainedCheckpoints()); resetCheckpoint(expected[0].getTaskStates().values()); resetCheckpoint(expected[1].getTaskStates().values()); resetCheckpoint(expected[2].getTaskStates().values()); // Recover TODO!!! clear registry! checkpoints.recover(); assertEquals(3, ZooKeeper.getClient().getChildren().forPath(CheckpointsPath).size()); assertEquals(3, checkpoints.getNumberOfRetainedCheckpoints()); assertEquals(expected[2], checkpoints.getLatestCheckpoint()); List<CompletedCheckpoint> expectedCheckpoints = new ArrayList<>(3); expectedCheckpoints.add(expected[1]); expectedCheckpoints.add(expected[2]); expectedCheckpoints.add(createCheckpoint(3)); checkpoints.addCheckpoint(expectedCheckpoints.get(2)); List<CompletedCheckpoint> actualCheckpoints = checkpoints.getAllCheckpoints(); assertEquals(expectedCheckpoints, actualCheckpoints); for (CompletedCheckpoint actualCheckpoint : actualCheckpoints) { verifyCheckpointRegistered(actualCheckpoint.getTaskStates().values(), checkpoints.sharedStateRegistry); } } /** * Tests that shutdown discards all checkpoints. */ @Test public void testShutdownDiscardsCheckpoints() throws Exception { CuratorFramework client = ZooKeeper.getClient(); CompletedCheckpointStore store = createCompletedCheckpoints(1); TestCompletedCheckpoint checkpoint = createCheckpoint(0); store.addCheckpoint(checkpoint); assertEquals(1, store.getNumberOfRetainedCheckpoints()); assertNotNull(client.checkExists().forPath(CheckpointsPath + "/" + checkpoint.getCheckpointID())); store.shutdown(JobStatus.FINISHED); assertEquals(0, store.getNumberOfRetainedCheckpoints()); assertNull(client.checkExists().forPath(CheckpointsPath + "/" + checkpoint.getCheckpointID())); store.recover(); assertEquals(0, store.getNumberOfRetainedCheckpoints()); } /** * Tests that suspends keeps all checkpoints (as they can be recovered * later by the ZooKeeper store). */ @Test public void testSuspendKeepsCheckpoints() throws Exception { CuratorFramework client = ZooKeeper.getClient(); CompletedCheckpointStore store = createCompletedCheckpoints(1); TestCompletedCheckpoint checkpoint = createCheckpoint(0); store.addCheckpoint(checkpoint); assertEquals(1, store.getNumberOfRetainedCheckpoints()); assertNotNull(client.checkExists().forPath(CheckpointsPath + "/" + checkpoint.getCheckpointID())); store.shutdown(JobStatus.SUSPENDED); assertEquals(0, store.getNumberOfRetainedCheckpoints()); assertNotNull(client.checkExists().forPath(CheckpointsPath + "/" + checkpoint.getCheckpointID())); // Recover again store.recover(); CompletedCheckpoint recovered = store.getLatestCheckpoint(); assertEquals(checkpoint, recovered); } static class HeapRetrievableStateHandle<T extends Serializable> implements RetrievableStateHandle<T> { private static final long serialVersionUID = -268548467968932L; public HeapRetrievableStateHandle(T state) { this.state = state; } private T state; @Override public T retrieveState() throws Exception { return state; } @Override public void discardState() throws Exception { state = null; } @Override public long getStateSize() { return 0; } } }
hwstreaming/flink
flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java
Java
apache-2.0
7,075
define("jquery-plugin/zTree/3.5.21/js/jquery.ztree.exhide", [], function(require, exports, module){ /* * JQuery zTree exHideNodes v3.5.21 * http://zTree.me/ * * Copyright (c) 2010 Hunter.z * * Licensed same as jquery - MIT License * http://www.opensource.org/licenses/mit-license.php * * email: [email protected] * Date: 2016-02-17 */ (function($){ //default init node of exLib var _initNode = function(setting, level, n, parentNode, isFirstNode, isLastNode, openFlag) { if (typeof n.isHidden == "string") n.isHidden = tools.eqs(n.isHidden, "true"); n.isHidden = !!n.isHidden; data.initHideForExCheck(setting, n); }, //add dom for check _beforeA = function(setting, node, html) {}, //update zTreeObj, add method of exLib _zTreeTools = function(setting, zTreeTools) { zTreeTools.showNodes = function(nodes, options) { view.showNodes(setting, nodes, options); } zTreeTools.showNode = function(node, options) { if (!node) { return; } view.showNodes(setting, [node], options); } zTreeTools.hideNodes = function(nodes, options) { view.hideNodes(setting, nodes, options); } zTreeTools.hideNode = function(node, options) { if (!node) { return; } view.hideNodes(setting, [node], options); } var _checkNode = zTreeTools.checkNode; if (_checkNode) { zTreeTools.checkNode = function(node, checked, checkTypeFlag, callbackFlag) { if (!!node && !!node.isHidden) { return; } _checkNode.apply(zTreeTools, arguments); } } }, //method of operate data _data = { initHideForExCheck: function(setting, n) { if (n.isHidden && setting.check && setting.check.enable) { if(typeof n._nocheck == "undefined") { n._nocheck = !!n.nocheck n.nocheck = true; } n.check_Child_State = -1; if (view.repairParentChkClassWithSelf) { view.repairParentChkClassWithSelf(setting, n); } } }, initShowForExCheck: function(setting, n) { if (!n.isHidden && setting.check && setting.check.enable) { if(typeof n._nocheck != "undefined") { n.nocheck = n._nocheck; delete n._nocheck; } if (view.setChkClass) { var checkObj = $$(n, consts.id.CHECK, setting); view.setChkClass(setting, checkObj, n); } if (view.repairParentChkClassWithSelf) { view.repairParentChkClassWithSelf(setting, n); } } } }, //method of operate ztree dom _view = { clearOldFirstNode: function(setting, node) { var n = node.getNextNode(); while(!!n){ if (n.isFirstNode) { n.isFirstNode = false; view.setNodeLineIcos(setting, n); break; } if (n.isLastNode) { break; } n = n.getNextNode(); } }, clearOldLastNode: function(setting, node, openFlag) { var n = node.getPreNode(); while(!!n){ if (n.isLastNode) { n.isLastNode = false; if (openFlag) { view.setNodeLineIcos(setting, n); } break; } if (n.isFirstNode) { break; } n = n.getPreNode(); } }, makeDOMNodeMainBefore: function(html, setting, node) { html.push("<li ", (node.isHidden ? "style='display:none;' " : ""), "id='", node.tId, "' class='", consts.className.LEVEL, node.level,"' tabindex='0' hidefocus='true' treenode>"); }, showNode: function(setting, node, options) { node.isHidden = false; data.initShowForExCheck(setting, node); $$(node, setting).show(); }, showNodes: function(setting, nodes, options) { if (!nodes || nodes.length == 0) { return; } var pList = {}, i, j; for (i=0, j=nodes.length; i<j; i++) { var n = nodes[i]; if (!pList[n.parentTId]) { var pn = n.getParentNode(); pList[n.parentTId] = (pn === null) ? data.getRoot(setting) : n.getParentNode(); } view.showNode(setting, n, options); } for (var tId in pList) { var children = pList[tId][setting.data.key.children]; view.setFirstNodeForShow(setting, children); view.setLastNodeForShow(setting, children); } }, hideNode: function(setting, node, options) { node.isHidden = true; node.isFirstNode = false; node.isLastNode = false; data.initHideForExCheck(setting, node); view.cancelPreSelectedNode(setting, node); $$(node, setting).hide(); }, hideNodes: function(setting, nodes, options) { if (!nodes || nodes.length == 0) { return; } var pList = {}, i, j; for (i=0, j=nodes.length; i<j; i++) { var n = nodes[i]; if ((n.isFirstNode || n.isLastNode) && !pList[n.parentTId]) { var pn = n.getParentNode(); pList[n.parentTId] = (pn === null) ? data.getRoot(setting) : n.getParentNode(); } view.hideNode(setting, n, options); } for (var tId in pList) { var children = pList[tId][setting.data.key.children]; view.setFirstNodeForHide(setting, children); view.setLastNodeForHide(setting, children); } }, setFirstNode: function(setting, parentNode) { var childKey = setting.data.key.children, childLength = parentNode[childKey].length; if (childLength > 0 && !parentNode[childKey][0].isHidden) { parentNode[childKey][0].isFirstNode = true; } else if (childLength > 0) { view.setFirstNodeForHide(setting, parentNode[childKey]); } }, setLastNode: function(setting, parentNode) { var childKey = setting.data.key.children, childLength = parentNode[childKey].length; if (childLength > 0 && !parentNode[childKey][0].isHidden) { parentNode[childKey][childLength - 1].isLastNode = true; } else if (childLength > 0) { view.setLastNodeForHide(setting, parentNode[childKey]); } }, setFirstNodeForHide: function(setting, nodes) { var n,i,j; for (i=0, j=nodes.length; i<j; i++) { n = nodes[i]; if (n.isFirstNode) { break; } if (!n.isHidden && !n.isFirstNode) { n.isFirstNode = true; view.setNodeLineIcos(setting, n); break; } else { n = null; } } return n; }, setFirstNodeForShow: function(setting, nodes) { var n,i,j, first, old; for(i=0, j=nodes.length; i<j; i++) { n = nodes[i]; if (!first && !n.isHidden && n.isFirstNode) { first = n; break; } else if (!first && !n.isHidden && !n.isFirstNode) { n.isFirstNode = true; first = n; view.setNodeLineIcos(setting, n); } else if (first && n.isFirstNode) { n.isFirstNode = false; old = n; view.setNodeLineIcos(setting, n); break; } else { n = null; } } return {"new":first, "old":old}; }, setLastNodeForHide: function(setting, nodes) { var n,i; for (i=nodes.length-1; i>=0; i--) { n = nodes[i]; if (n.isLastNode) { break; } if (!n.isHidden && !n.isLastNode) { n.isLastNode = true; view.setNodeLineIcos(setting, n); break; } else { n = null; } } return n; }, setLastNodeForShow: function(setting, nodes) { var n,i,j, last, old; for (i=nodes.length-1; i>=0; i--) { n = nodes[i]; if (!last && !n.isHidden && n.isLastNode) { last = n; break; } else if (!last && !n.isHidden && !n.isLastNode) { n.isLastNode = true; last = n; view.setNodeLineIcos(setting, n); } else if (last && n.isLastNode) { n.isLastNode = false; old = n; view.setNodeLineIcos(setting, n); break; } else { n = null; } } return {"new":last, "old":old}; } }, _z = { view: _view, data: _data }; $.extend(true, $.fn.zTree._z, _z); var zt = $.fn.zTree, tools = zt._z.tools, consts = zt.consts, view = zt._z.view, data = zt._z.data, event = zt._z.event, $$ = tools.$; data.addInitNode(_initNode); data.addBeforeA(_beforeA); data.addZTreeTools(_zTreeTools); // Override method in core var _dInitNode = data.initNode; data.initNode = function(setting, level, node, parentNode, isFirstNode, isLastNode, openFlag) { var tmpPNode = (parentNode) ? parentNode: data.getRoot(setting), children = tmpPNode[setting.data.key.children]; data.tmpHideFirstNode = view.setFirstNodeForHide(setting, children); data.tmpHideLastNode = view.setLastNodeForHide(setting, children); if (openFlag) { view.setNodeLineIcos(setting, data.tmpHideFirstNode); view.setNodeLineIcos(setting, data.tmpHideLastNode); } isFirstNode = (data.tmpHideFirstNode === node); isLastNode = (data.tmpHideLastNode === node); if (_dInitNode) _dInitNode.apply(data, arguments); if (openFlag && isLastNode) { view.clearOldLastNode(setting, node, openFlag); } }; var _makeChkFlag = data.makeChkFlag; if (!!_makeChkFlag) { data.makeChkFlag = function(setting, node) { if (!!node && !!node.isHidden) { return; } _makeChkFlag.apply(data, arguments); } } var _getTreeCheckedNodes = data.getTreeCheckedNodes; if (!!_getTreeCheckedNodes) { data.getTreeCheckedNodes = function(setting, nodes, checked, results) { if (!!nodes && nodes.length > 0) { var p = nodes[0].getParentNode(); if (!!p && !!p.isHidden) { return []; } } return _getTreeCheckedNodes.apply(data, arguments); } } var _getTreeChangeCheckedNodes = data.getTreeChangeCheckedNodes; if (!!_getTreeChangeCheckedNodes) { data.getTreeChangeCheckedNodes = function(setting, nodes, results) { if (!!nodes && nodes.length > 0) { var p = nodes[0].getParentNode(); if (!!p && !!p.isHidden) { return []; } } return _getTreeChangeCheckedNodes.apply(data, arguments); } } var _expandCollapseSonNode = view.expandCollapseSonNode; if (!!_expandCollapseSonNode) { view.expandCollapseSonNode = function(setting, node, expandFlag, animateFlag, callback) { if (!!node && !!node.isHidden) { return; } _expandCollapseSonNode.apply(view, arguments); } } var _setSonNodeCheckBox = view.setSonNodeCheckBox; if (!!_setSonNodeCheckBox) { view.setSonNodeCheckBox = function(setting, node, value, srcNode) { if (!!node && !!node.isHidden) { return; } _setSonNodeCheckBox.apply(view, arguments); } } var _repairParentChkClassWithSelf = view.repairParentChkClassWithSelf; if (!!_repairParentChkClassWithSelf) { view.repairParentChkClassWithSelf = function(setting, node) { if (!!node && !!node.isHidden) { return; } _repairParentChkClassWithSelf.apply(view, arguments); } } })(jQuery); });
18826252059/im
web/assets/libs/jquery-plugin/zTree/3.5.21/js/jquery.ztree.exhide.js
JavaScript
apache-2.0
10,870
/* * File: LimeSDR_mini.cpp * Author: Ignas J * * Created on September 18, 2016 */ #include "LimeNET_micro.h" #include "Logger.h" #include "FPGA_Mini.h" #include "device_constants.h" namespace lime { LMS7_LimeNET_micro::LMS7_LimeNET_micro(lime::IConnection* conn, LMS7_Device *obj): LMS7_LimeSDR_mini(conn, obj) { if (lms_list[0]->GetReferenceClk_SX(false) < 0) { lime::info("Reference clock set to 30.72 MHz"); lms_list[0]->SetReferenceClk_SX(false, 30.72e6); } } int LMS7_LimeNET_micro::Init() { struct regVal { uint16_t adr; uint16_t val; }; const std::vector<regVal> initVals = { {0x0022, 0x0FFF}, {0x0023, 0x5550}, {0x002B, 0x0038}, {0x002C, 0x0000}, {0x002D, 0x0641}, {0x0086, 0x4101}, {0x0087, 0x5555}, {0x0088, 0x03F0}, {0x0089, 0x1078}, {0x008B, 0x2100}, {0x008C, 0x267B}, {0x00A1, 0x656A}, {0x00A6, 0x0009}, {0x00A7, 0x8A8A}, {0x00A9, 0x8000}, {0x00AC, 0x2000}, {0x0105, 0x0011}, {0x0108, 0x218C}, {0x0109, 0x6100}, {0x010A, 0x1F4C}, {0x010B, 0x0001}, {0x010C, 0x8865}, {0x010E, 0x0000}, {0x010F, 0x3142}, {0x0110, 0x2B14}, {0x0111, 0x0000}, {0x0112, 0x942E}, {0x0113, 0x03C2}, {0x0114, 0x00D0}, {0x0117, 0x1230}, {0x0119, 0x18D2}, {0x011C, 0x8941}, {0x011D, 0x0000}, {0x011E, 0x0740}, {0x0120, 0xE6C0}, {0x0121, 0x3650}, {0x0123, 0x000F}, {0x0200, 0x00E1}, {0x0208, 0x017B}, {0x020B, 0x4000}, {0x020C, 0x8000}, {0x0400, 0x8081}, {0x0404, 0x0006}, {0x040B, 0x1020}, {0x040C, 0x00FB} }; lime::LMS7002M* lms = lms_list[0]; if (lms->ResetChip() != 0) return -1; lms->Modify_SPI_Reg_bits(LMS7param(MAC), 1); for (auto i : initVals) lms->SPI_write(i.adr, i.val, true); if(lms->CalibrateTxGain(0,nullptr) != 0) return -1; lms->EnableChannel(true, false); lms->Modify_SPI_Reg_bits(LMS7param(MAC), 2); lms->SPI_write(0x0123, 0x000F); //SXT lms->SPI_write(0x0120, 0xE6C0); //SXT lms->SPI_write(0x011C, 0x8941); //SXT lms->EnableChannel(false, false); lms->EnableChannel(true, false); lms->Modify_SPI_Reg_bits(LMS7param(MAC), 1); bool auto_path[2] = {auto_tx_path,auto_rx_path}; auto_tx_path = false; auto_rx_path = false; if(SetFrequency(true, 0, GetFrequency(true, 0))!=0) return -1; if(SetFrequency(false, 0, GetFrequency(false, 0))!=0) return -1; auto_tx_path = auto_path[0]; auto_rx_path = auto_path[1]; if (SetRate(1e6, 16)!=0) return -1; return 0; } std::vector<std::string> LMS7_LimeNET_micro::GetPathNames(bool dir_tx, unsigned chan) const { if (dir_tx) return {"NONE", "BAND1", "BAND2", "Auto"}; else return {"NONE", "LNAH", "LNAL", "LNAW_NC", "Auto"}; } int LMS7_LimeNET_micro::SetRFSwitch(bool isTx, unsigned path) { int reg3 = fpga->ReadRegister(3); int bom_ver = reg3>>4; int hw_ver = reg3 & 0xF; if (isTx==false) { if (path==LMS_PATH_LNAW) { lime::warning("LNAW has no connection to RF ports"); } else if (path==LMS_PATH_LNAL) { uint16_t value = fpga->ReadRegister(0x17); if (hw_ver >= 3) { value &= ~(0x0702); fpga->WriteRegister(0x17, value | 0x0502); } else { value &= ~(3<<8); fpga->WriteRegister(0x17, value | (1<<8)); } } else if (path==LMS_PATH_LNAH) { uint16_t value = fpga->ReadRegister(0x17); if (hw_ver >= 3) { value &= ~(0x0702); fpga->WriteRegister(0x17, value | 0x0602); } else { value &= ~(3<<8); if (bom_ver == 0) fpga->WriteRegister(0x17, value | (1<<8)); else fpga->WriteRegister(0x17, value | (2<<8)); } } } else { if (path==LMS_PATH_TX1) { uint16_t value = fpga->ReadRegister(0x17); if (hw_ver >= 3) { value &= ~(0x7001); fpga->WriteRegister(0x17, value | 0x5000); } else { value &= ~(3<<12); fpga->WriteRegister(0x17, value | (1<<12)); } } else if (path==LMS_PATH_TX2) { uint16_t value = fpga->ReadRegister(0x17); if (hw_ver >= 3) { value &= ~(0x7001); fpga->WriteRegister(0x17, value | 0x6000); } else { value &= ~(3<<12); if (bom_ver == 0) fpga->WriteRegister(0x17, value | (1<<12)); else fpga->WriteRegister(0x17, value | (2<<12)); } } } return 0; } int LMS7_LimeNET_micro::AutoRFPath(bool isTx, double f_Hz) { int reg3 = fpga->ReadRegister(3); int bom_ver = reg3>>4; int hw_ver = reg3 & 0xF; if (hw_ver < 3 && bom_ver == 0) return 0; if ((!isTx) && (f_Hz < 1.7e9)) { int ret = 0; if (GetPath(false, 0)!= LMS_PATH_LNAL) { lime::info("Selected RX path: LNAL"); ret = SetPath(false, 0, LMS_PATH_LNAL); } auto_rx_path = true; return ret; } return LMS7_LimeSDR_mini::AutoRFPath(isTx, f_Hz); } int LMS7_LimeNET_micro::SetClockFreq(unsigned clk_id, double freq, int channel) { return LMS7_Device::SetClockFreq(clk_id, freq, channel); } }//namespace lime
limemicro/lms7suite
src/API/LimeNET_micro.cpp
C++
apache-2.0
5,737
/*------------------------------------------------------------------------- * * proc.c * routines to manage per-process shared memory data structure * * Portions Copyright (c) 2006-2008, Greenplum inc * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.199 2008/01/26 19:55:08 tgl Exp $ * *------------------------------------------------------------------------- */ /* * Interface (a): * ProcSleep(), ProcWakeup(), * ProcQueueAlloc() -- create a shm queue for sleeping processes * ProcQueueInit() -- create a queue without allocing memory * * Waiting for a lock causes the backend to be put to sleep. Whoever releases * the lock wakes the process up again (and gives it an error code so it knows * whether it was awoken on an error condition). * * Interface (b): * * ProcReleaseLocks -- frees the locks associated with current transaction * * ProcKill -- destroys the shared memory state (and locks) * associated with the process. */ #include "postgres.h" #include <signal.h> #include <unistd.h> #include <sys/time.h> #include "access/transam.h" #include "access/xact.h" #include "catalog/namespace.h" /* TempNamespaceOidIsValid */ #include "commands/async.h" #include "miscadmin.h" #include "postmaster/autovacuum.h" #include "replication/syncrep.h" #include "storage/ipc.h" #include "storage/spin.h" #include "storage/sinval.h" #include "storage/lmgr.h" #include "storage/proc.h" #include "storage/procarray.h" #include "storage/pmsignal.h" #include "executor/execdesc.h" #include "utils/resscheduler.h" #include "utils/timestamp.h" #include "utils/portal.h" #include "utils/sharedsnapshot.h" /*SharedLocalSnapshotSlot*/ #include "cdb/cdblocaldistribxact.h" #include "cdb/cdbgang.h" #include "cdb/cdbvars.h" /*Gp_is_writer*/ #include "utils/gp_atomic.h" #include "utils/session_state.h" /* GUC variables */ int DeadlockTimeout = 1000; int StatementTimeout = 0; int IdleSessionGangTimeout = 18000; bool log_lock_waits = false; /* Pointer to this process's PGPROC struct, if any */ PGPROC *MyProc = NULL; /* Special for MPP reader gangs */ PGPROC *lockHolderProcPtr = NULL; /* * This spinlock protects the freelist of recycled PGPROC structures. * We cannot use an LWLock because the LWLock manager depends on already * having a PGPROC and a wait semaphore! But these structures are touched * relatively infrequently (only at backend startup or shutdown) and not for * very long, so a spinlock is okay. */ NON_EXEC_STATIC slock_t *ProcStructLock = NULL; /* Pointers to shared-memory structures */ NON_EXEC_STATIC PROC_HDR *ProcGlobal = NULL; NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL; /* If we are waiting for a lock, this points to the associated LOCALLOCK */ static LOCALLOCK *lockAwaited = NULL; /* Mark these volatile because they can be changed by signal handler */ static volatile bool statement_timeout_active = false; static volatile bool deadlock_timeout_active = false; static volatile DeadLockState deadlock_state = DS_NOT_YET_CHECKED; static volatile sig_atomic_t clientWaitTimeoutInterruptEnabled = 0; static volatile sig_atomic_t clientWaitTimeoutInterruptOccurred = 0; volatile bool cancel_from_timeout = false; /* timeout_start_time is set when log_lock_waits is true */ static TimestampTz timeout_start_time; /* statement_fin_time is valid only if statement_timeout_active is true */ static TimestampTz statement_fin_time; static void RemoveProcFromArray(int code, Datum arg); static void ProcKill(int code, Datum arg); static void AuxiliaryProcKill(int code, Datum arg); static bool CheckStatementTimeout(void); static void ClientWaitTimeoutInterruptHandler(void); static void ProcessClientWaitTimeout(void); /* * Report shared-memory space needed by InitProcGlobal. */ Size ProcGlobalShmemSize(void) { Size size = 0; /* ProcGlobal */ size = add_size(size, sizeof(PROC_HDR)); /* AuxiliaryProcs */ size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC))); /* MyProcs, including autovacuum */ size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC))); /* ProcStructLock */ size = add_size(size, sizeof(slock_t)); return size; } /* * Report number of semaphores needed by InitProcGlobal. */ int ProcGlobalSemas(void) { /* * We need a sema per backend (including autovacuum), plus one for each * auxiliary process. */ return MaxBackends + NUM_AUXILIARY_PROCS; } /* * InitProcGlobal - * Initialize the global process table during postmaster or standalone * backend startup. * * We also create all the per-process semaphores we will need to support * the requested number of backends. We used to allocate semaphores * only when backends were actually started up, but that is bad because * it lets Postgres fail under load --- a lot of Unix systems are * (mis)configured with small limits on the number of semaphores, and * running out when trying to start another backend is a common failure. * So, now we grab enough semaphores to support the desired max number * of backends immediately at initialization --- if the sysadmin has set * MaxConnections or autovacuum_max_workers higher than his kernel will * support, he'll find out sooner rather than later. * * Another reason for creating semaphores here is that the semaphore * implementation typically requires us to create semaphores in the * postmaster, not in backends. * * Note: this is NOT called by individual backends under a postmaster, * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs * pointers must be propagated specially for EXEC_BACKEND operation. */ void InitProcGlobal(int mppLocalProcessCounter) { PGPROC *procs; int i; bool found; /* Create the ProcGlobal shared structure */ ProcGlobal = (PROC_HDR *) ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found); Assert(!found); /* * Create the PGPROC structures for auxiliary (bgwriter) processes, too. * These do not get linked into the freeProcs list. */ AuxiliaryProcs = (PGPROC *) ShmemInitStruct("AuxiliaryProcs", NUM_AUXILIARY_PROCS * sizeof(PGPROC), &found); Assert(!found); /* * Initialize the data structures. */ ProcGlobal->freeProcs = INVALID_OFFSET; ProcGlobal->autovacFreeProcs = INVALID_OFFSET; ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY; ProcGlobal->mppLocalProcessCounter = mppLocalProcessCounter; /* * Pre-create the PGPROC structures and create a semaphore for each. */ procs = (PGPROC *) ShmemAlloc((MaxConnections) * sizeof(PGPROC)); if (!procs) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); MemSet(procs, 0, MaxConnections * sizeof(PGPROC)); for (i = 0; i < MaxConnections; i++) { PGSemaphoreCreate(&(procs[i].sem)); InitSharedLatch(&(procs[i].procLatch)); procs[i].links.next = ProcGlobal->freeProcs; ProcGlobal->freeProcs = MAKE_OFFSET(&procs[i]); } ProcGlobal->procs = procs; ProcGlobal->numFreeProcs = MaxConnections; procs = (PGPROC *) ShmemAlloc((autovacuum_max_workers) * sizeof(PGPROC)); if (!procs) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); MemSet(procs, 0, autovacuum_max_workers * sizeof(PGPROC)); for (i = 0; i < autovacuum_max_workers; i++) { PGSemaphoreCreate(&(procs[i].sem)); InitSharedLatch(&(procs[i].procLatch)); procs[i].links.next = ProcGlobal->autovacFreeProcs; ProcGlobal->autovacFreeProcs = MAKE_OFFSET(&procs[i]); } MemSet(AuxiliaryProcs, 0, NUM_AUXILIARY_PROCS * sizeof(PGPROC)); for (i = 0; i < NUM_AUXILIARY_PROCS; i++) { AuxiliaryProcs[i].pid = 0; /* marks auxiliary proc as not in use */ PGSemaphoreCreate(&(AuxiliaryProcs[i].sem)); InitSharedLatch(&(AuxiliaryProcs[i].procLatch)); } /* Create ProcStructLock spinlock, too */ ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t)); SpinLockInit(ProcStructLock); } /* * InitProcess -- initialize a per-process data structure for this backend */ void InitProcess(void) { /* use volatile pointer to prevent code rearrangement */ volatile PROC_HDR *procglobal = ProcGlobal; SHMEM_OFFSET myOffset; int i; /* * ProcGlobal should be set up already (if we are a backend, we inherit * this by fork() or EXEC_BACKEND mechanism from the postmaster). */ if (procglobal == NULL) elog(PANIC, "proc header uninitialized"); if (MyProc != NULL) elog(ERROR, "you already exist"); /* * Initialize process-local latch support. This could fail if the kernel * is low on resources, and if so we want to exit cleanly before acquiring * any shared-memory resources. */ InitializeLatchSupport(); /* * Try to get a proc struct from the free list. If this fails, we must be * out of PGPROC structures (not to mention semaphores). * * While we are holding the ProcStructLock, also copy the current shared * estimate of spins_per_delay to local storage. */ SpinLockAcquire(ProcStructLock); set_spins_per_delay(procglobal->spins_per_delay); if (IsAutoVacuumWorkerProcess()) myOffset = procglobal->autovacFreeProcs; else myOffset = procglobal->freeProcs; if (myOffset != INVALID_OFFSET) { MyProc = (PGPROC *) MAKE_PTR(myOffset); if (IsAutoVacuumWorkerProcess()) procglobal->autovacFreeProcs = MyProc->links.next; else procglobal->freeProcs = MyProc->links.next; procglobal->numFreeProcs--; /* we removed an entry from the list. */ Assert(procglobal->numFreeProcs >= 0); SpinLockRelease(ProcStructLock); } else { /* * If we reach here, all the PGPROCs are in use. This is one of the * possible places to detect "too many backends", so give the standard * error message. XXX do we need to give a different failure message * in the autovacuum case? */ SpinLockRelease(ProcStructLock); ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("sorry, too many clients already"))); } if (gp_debug_pgproc) { elog(LOG, "allocating PGPROC entry for pid %d, freeProcs (prev offset, new offset): (%ld, %ld)", MyProcPid, MAKE_OFFSET(MyProc), MyProc->links.next); } int mppLocalProcessSerial = pg_atomic_add_fetch_u32((pg_atomic_uint32 *)&procglobal->mppLocalProcessCounter, 1); lockHolderProcPtr = MyProc; /* Set the next pointer to INVALID_OFFSET */ MyProc->links.next = INVALID_OFFSET; /* * Now that we have a PGPROC, mark ourselves as an active postmaster * child; this is so that the postmaster can detect it if we exit without * cleaning up. (XXX autovac launcher currently doesn't participate in * this; it probably should.) * * Ideally, we should create functions similar to IsAutoVacuumWorkerProcess() * for ftsProber, SeqServer etc who call InitProcess(). * But MyPMChildSlot helps to get away with it. */ if (IsUnderPostmaster && !IsAutoVacuumWorkerProcess() && MyPMChildSlot > 0) MarkPostmasterChildActive(); /* * Initialize all fields of MyProc, except for the semaphore which was * prepared for us by InitProcGlobal. */ SHMQueueElemInit(&(MyProc->links)); MyProc->waitStatus = STATUS_OK; MyProc->lxid = InvalidLocalTransactionId; MyProc->xid = InvalidTransactionId; MyProc->localDistribXactData.state = LOCALDISTRIBXACT_STATE_NONE; MyProc->xmin = InvalidTransactionId; MyProc->serializableIsoLevel = false; MyProc->inDropTransaction = false; MyProc->pid = MyProcPid; /* backendId, databaseId and roleId will be filled in later */ MyProc->backendId = InvalidBackendId; MyProc->databaseId = InvalidOid; MyProc->roleId = InvalidOid; MyProc->inCommit = false; MyProc->vacuumFlags = 0; if (IsAutoVacuumWorkerProcess()) MyProc->vacuumFlags |= PROC_IS_AUTOVACUUM; MyProc->lwWaiting = false; MyProc->lwExclusive = false; MyProc->lwWaitLink = NULL; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; for (i = 0; i < NUM_LOCK_PARTITIONS; i++) SHMQueueInit(&(MyProc->myProcLocks[i])); /* * mppLocalProcessSerial uniquely identifies this backend process among * all those that our parent postmaster process creates over its lifetime. * * Since we use the process serial number to decide if we should * deliver a response from a server under this spin, we need to * assign it under the spin lock. */ MyProc->mppLocalProcessSerial = mppLocalProcessSerial; /* * A nonzero gp_session_id uniquely identifies an MPP client session * over the lifetime of the entry postmaster process. A qDisp passes * its gp_session_id down to all of its qExecs. If this is a qExec, * we have already received the gp_session_id from the qDisp. */ if (Gp_role == GP_ROLE_DISPATCH && gp_session_id == -1) gp_session_id = mppLocalProcessSerial; MyProc->mppSessionId = gp_session_id; elog(DEBUG1,"InitProcess(): gp_session_id %d, Gp_role %d",gp_session_id, Gp_role); MyProc->mppIsWriter = Gp_is_writer; if (Gp_role == GP_ROLE_DISPATCH) { MyProc->mppIsWriter = true; } /* Initialize fields for sync rep */ MyProc->waitLSN.xlogid = 0; MyProc->waitLSN.xrecoff = 0; MyProc->syncRepState = SYNC_REP_NOT_WAITING; SHMQueueElemInit(&(MyProc->syncRepLinks)); /* * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch. * Note that there's no particular need to do ResetLatch here. */ OwnLatch(&MyProc->procLatch); /* * We might be reusing a semaphore that belonged to a failed process. So * be careful and reinitialize its value here. (This is not strictly * necessary anymore, but seems like a good idea for cleanliness.) */ PGSemaphoreReset(&MyProc->sem); /* Set wait portal (do not check if resource scheduling is enabled) */ MyProc->waitPortalId = INVALID_PORTALID; MyProc->queryCommandId = -1; /* * Arrange to clean up at backend exit. */ on_shmem_exit(ProcKill, 0); /* * Now that we have a PGPROC, we could try to acquire locks, so initialize * the deadlock checker. */ InitDeadLockChecking(); } /* * InitProcessPhase2 -- make MyProc visible in the shared ProcArray. * * This is separate from InitProcess because we can't acquire LWLocks until * we've created a PGPROC, but in the EXEC_BACKEND case there is a good deal * of stuff to be done before this step that will require LWLock access. */ void InitProcessPhase2(void) { Assert(MyProc != NULL); /* * Add our PGPROC to the PGPROC array in shared memory. */ ProcArrayAdd(MyProc); /* * Arrange to clean that up at backend exit. */ on_shmem_exit(RemoveProcFromArray, 0); } /* * InitAuxiliaryProcess -- create a per-auxiliary-process data structure * * This is called by bgwriter and similar processes so that they will have a * MyProc value that's real enough to let them wait for LWLocks. The PGPROC * and sema that are assigned are one of the extra ones created during * InitProcGlobal. * * Auxiliary processes are presently not expected to wait for real (lockmgr) * locks, so we need not set up the deadlock checker. They are never added * to the ProcArray or the sinval messaging mechanism, either. They also * don't get a VXID assigned, since this is only useful when we actually * hold lockmgr locks. */ void InitAuxiliaryProcess(void) { PGPROC *auxproc; int proctype; int i; /* * ProcGlobal should be set up already (if we are a backend, we inherit * this by fork() or EXEC_BACKEND mechanism from the postmaster). */ if (ProcGlobal == NULL || AuxiliaryProcs == NULL) elog(PANIC, "proc header uninitialized"); if (MyProc != NULL) elog(ERROR, "you already exist"); /* * Initialize process-local latch support. This could fail if the kernel * is low on resources, and if so we want to exit cleanly before acquiring * any shared-memory resources. */ InitializeLatchSupport(); /* * We use the ProcStructLock to protect assignment and releasing of * AuxiliaryProcs entries. * * While we are holding the ProcStructLock, also copy the current shared * estimate of spins_per_delay to local storage. */ SpinLockAcquire(ProcStructLock); set_spins_per_delay(ProcGlobal->spins_per_delay); /* * Find a free auxproc ... *big* trouble if there isn't one ... */ for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++) { auxproc = &AuxiliaryProcs[proctype]; if (auxproc->pid == 0) break; } if (proctype >= NUM_AUXILIARY_PROCS) { SpinLockRelease(ProcStructLock); elog(FATAL, "all AuxiliaryProcs are in use"); } /* Mark auxiliary proc as in use by me */ /* use volatile pointer to prevent code rearrangement */ ((volatile PGPROC *) auxproc)->pid = MyProcPid; MyProc = auxproc; lockHolderProcPtr = auxproc; SpinLockRelease(ProcStructLock); /* * Initialize all fields of MyProc, except for the semaphore which was * prepared for us by InitProcGlobal. */ SHMQueueElemInit(&(MyProc->links)); MyProc->waitStatus = STATUS_OK; MyProc->lxid = InvalidLocalTransactionId; MyProc->xid = InvalidTransactionId; MyProc->localDistribXactData.state = LOCALDISTRIBXACT_STATE_NONE; MyProc->xmin = InvalidTransactionId; MyProc->serializableIsoLevel = false; MyProc->inDropTransaction = false; MyProc->databaseId = InvalidOid; MyProc->roleId = InvalidOid; MyProc->mppLocalProcessSerial = 0; MyProc->mppSessionId = 0; MyProc->mppIsWriter = false; MyProc->inCommit = false; MyProc->vacuumFlags = 0; MyProc->lwWaiting = false; MyProc->lwExclusive = false; MyProc->lwWaitLink = NULL; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; for (i = 0; i < NUM_LOCK_PARTITIONS; i++) SHMQueueInit(&(MyProc->myProcLocks[i])); /* * Auxiliary process doesn't bother with sync rep. Though it was * originally supposed to not do transaction work, but it does in GPDB, * we mark it and avoid sync rep work. */ MyProc->syncRepState = SYNC_REP_DISABLED; /* * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch. * Note that there's no particular need to do ResetLatch here. */ OwnLatch(&MyProc->procLatch); /* * We might be reusing a semaphore that belonged to a failed process. So * be careful and reinitialize its value here. (This is not strictly * necessary anymore, but seems like a good idea for cleanliness.) */ PGSemaphoreReset(&MyProc->sem); MyProc->queryCommandId = -1; /* * Arrange to clean up at process exit. */ on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype)); } /* * Check whether there are at least N free PGPROC objects. */ bool HaveNFreeProcs(int n) { Assert(n >= 0); return (ProcGlobal->numFreeProcs >= n); } /* * Cancel any pending wait for lock, when aborting a transaction. * * (Normally, this would only happen if we accept a cancel/die * interrupt while waiting; but an ereport(ERROR) while waiting is * within the realm of possibility, too.) */ void LockWaitCancel(void) { LWLockId partitionLock; /* Nothing to do if we weren't waiting for a lock */ if (lockAwaited == NULL) return; /* Don't try to cancel resource locks.*/ if (Gp_role == GP_ROLE_DISPATCH && ResourceScheduler && LOCALLOCK_LOCKMETHOD(*lockAwaited) == RESOURCE_LOCKMETHOD) return; /* Turn off the deadlock timer, if it's still running (see ProcSleep) */ disable_sig_alarm(false); /* Unlink myself from the wait queue, if on it (might not be anymore!) */ partitionLock = LockHashPartitionLock(lockAwaited->hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); if (MyProc->links.next != INVALID_OFFSET) { /* We could not have been granted the lock yet */ RemoveFromWaitQueue(MyProc, lockAwaited->hashcode); } else { /* * Somebody kicked us off the lock queue already. Perhaps they * granted us the lock, or perhaps they detected a deadlock. If they * did grant us the lock, we'd better remember it in our local lock * table. */ if (MyProc->waitStatus == STATUS_OK) GrantAwaitedLock(); } lockAwaited = NULL; LWLockRelease(partitionLock); /* * We used to do PGSemaphoreReset() here to ensure that our proc's wait * semaphore is reset to zero. This prevented a leftover wakeup signal * from remaining in the semaphore if someone else had granted us the lock * we wanted before we were able to remove ourselves from the wait-list. * However, now that ProcSleep loops until waitStatus changes, a leftover * wakeup signal isn't harmful, and it seems not worth expending cycles to * get rid of a signal that most likely isn't there. */ } /* * ProcReleaseLocks() -- release locks associated with current transaction * at main transaction commit or abort * * At main transaction commit, we release all locks except session locks. * At main transaction abort, we release all locks including session locks; * this lets us clean up after a VACUUM FULL failure. * * At subtransaction commit, we don't release any locks (so this func is not * needed at all); we will defer the releasing to the parent transaction. * At subtransaction abort, we release all locks held by the subtransaction; * this is implemented by retail releasing of the locks under control of * the ResourceOwner mechanism. * * Note that user locks are not released in any case. */ void ProcReleaseLocks(bool isCommit) { if (!MyProc) return; /* If waiting, get off wait queue (should only be needed after error) */ LockWaitCancel(); /* Release locks */ LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit); } /* * RemoveProcFromArray() -- Remove this process from the shared ProcArray. */ static void RemoveProcFromArray(int code, Datum arg) { Assert(MyProc != NULL); ProcArrayRemove(MyProc, InvalidTransactionId); } /* * update_spins_per_delay * Update spins_per_delay value in ProcGlobal. */ static void update_spins_per_delay() { volatile PROC_HDR *procglobal = ProcGlobal; bool casResult = false; while (!casResult) { int old_spins_per_delay = procglobal->spins_per_delay; int new_spins_per_delay = recompute_spins_per_delay(old_spins_per_delay); casResult = pg_atomic_compare_exchange_u32((pg_atomic_uint32 *)&procglobal->spins_per_delay, (uint32 *)&old_spins_per_delay, new_spins_per_delay); } } /* * ProcKill() -- Destroy the per-proc data structure for * this process. Release any of its held LW locks. */ static void ProcKill(int code, Datum arg) { /* use volatile pointer to prevent code rearrangement */ volatile PROC_HDR *procglobal = ProcGlobal; PGPROC *proc; Assert(MyProc != NULL); /* Make sure we're out of the sync rep lists */ SyncRepCleanupAtProcExit(); /* * Cleanup for any resource locks on portals - from holdable cursors or * unclean process abort (assertion failures). */ if (Gp_role == GP_ROLE_DISPATCH && ResourceScheduler) AtExitCleanup_ResPortals(); /* * Remove the shared snapshot slot. */ if (SharedLocalSnapshotSlot != NULL) { if (Gp_role == GP_ROLE_DISPATCH) { SharedSnapshotRemove(SharedLocalSnapshotSlot, "Query Dispatcher"); } else if (Gp_segment == -1 && Gp_role == GP_ROLE_EXECUTE && !Gp_is_writer) { /* * Entry db singleton QE is a user of the shared snapshot -- not a creator. */ } else if (Gp_role == GP_ROLE_EXECUTE && Gp_is_writer) { SharedSnapshotRemove(SharedLocalSnapshotSlot, "Writer qExec"); } SharedLocalSnapshotSlot = NULL; } /* * Release any LW locks I am holding. There really shouldn't be any, but * it's cheap to check again before we cut the knees off the LWLock * facility by releasing our PGPROC ... */ LWLockReleaseAll(); MyProc->localDistribXactData.state = LOCALDISTRIBXACT_STATE_NONE; MyProc->mppLocalProcessSerial = 0; MyProc->mppSessionId = 0; MyProc->mppIsWriter = false; MyProc->pid = 0; /* * Clear MyProc first; then disown the process latch. This is so that * signal handlers won't try to clear the process latch after it's no * longer ours. */ proc = MyProc; MyProc = NULL; DisownLatch(&proc->procLatch); SpinLockAcquire(ProcStructLock); /* Return PGPROC structure (and semaphore) to freelist */ if (IsAutoVacuumWorkerProcess()) { proc->links.next = procglobal->autovacFreeProcs; procglobal->autovacFreeProcs = MAKE_OFFSET(proc); } else { proc->links.next = procglobal->freeProcs; procglobal->freeProcs = MAKE_OFFSET(proc); } procglobal->numFreeProcs++; /* we added an entry */ /* Update shared estimate of spins_per_delay */ update_spins_per_delay(); SpinLockRelease(ProcStructLock); /* * This process is no longer present in shared memory in any meaningful * way, so tell the postmaster we've cleaned up acceptably well. */ if (IsUnderPostmaster && !IsAutoVacuumWorkerProcess() && MyPMChildSlot > 0) MarkPostmasterChildInactive(); /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */ if (AutovacuumLauncherPid != 0) kill(AutovacuumLauncherPid, SIGUSR1); } /* * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary * processes (bgwriter, etc). The PGPROC and sema are not released, only * marked as not-in-use. */ static void AuxiliaryProcKill(int code, Datum arg) { int proctype = DatumGetInt32(arg); PGPROC *auxproc; Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS); auxproc = &AuxiliaryProcs[proctype]; Assert(MyProc == auxproc); /* Release any LW locks I am holding (see notes above) */ LWLockReleaseAll(); /* Release ownership of the process's latch, too */ DisownLatch(&MyProc->procLatch); SpinLockAcquire(ProcStructLock); /* Mark auxiliary proc no longer in use */ MyProc->pid = 0; /* Update shared estimate of spins_per_delay */ update_spins_per_delay(); SpinLockRelease(ProcStructLock); /* * If the parent process of this auxiliary process does not exist, we * have trouble. Besides the obvious case that the postmaster is gone, * this could happen to filerep subprocesses when the filerep main * process dies unexpectedly. The postmaster will receive the SIGCHLD * signal when we exit in that case. Make sure we exit with non-zero (and * not 1 either) exit status, to force the postmaster to reset the system * if that happens. */ if (!ParentProcIsAlive()) proc_exit(10); /* PGPROC struct isn't mine anymore */ MyProc = NULL; lockHolderProcPtr = NULL; } /* * ProcQueue package: routines for putting processes to sleep * and waking them up */ /* * ProcQueueAlloc -- alloc/attach to a shared memory process queue * * Returns: a pointer to the queue or NULL * Side Effects: Initializes the queue if we allocated one */ #ifdef NOT_USED PROC_QUEUE * ProcQueueAlloc(char *name) { bool found; PROC_QUEUE *queue = (PROC_QUEUE *) ShmemInitStruct(name, sizeof(PROC_QUEUE), &found); if (!queue) return NULL; if (!found) ProcQueueInit(queue); return queue; } #endif /* * ProcQueueInit -- initialize a shared memory process queue */ void ProcQueueInit(PROC_QUEUE *queue) { SHMQueueInit(&(queue->links)); queue->size = 0; } /* * ProcSleep -- put a process to sleep on the specified lock * * Caller must have set MyProc->heldLocks to reflect locks already held * on the lockable object by this process (under all XIDs). * * The lock table's partition lock must be held at entry, and will be held * at exit. * * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock). * * ASSUME: that no one will fiddle with the queue until after * we release the partition lock. * * NOTES: The process queue is now a priority queue for locking. * * P() on the semaphore should put us to sleep. The process * semaphore is normally zero, so when we try to acquire it, we sleep. */ int ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) { LOCKMODE lockmode = locallock->tag.mode; LOCK *lock = locallock->lock; PROCLOCK *proclock = locallock->proclock; uint32 hashcode = locallock->hashcode; LWLockId partitionLock = LockHashPartitionLock(hashcode); PROC_QUEUE *waitQueue = &(lock->waitProcs); LOCKMASK myHeldLocks = MyProc->heldLocks; bool early_deadlock = false; bool allow_autovacuum_cancel = true; int myWaitStatus; PGPROC *proc; int i; /* * Determine where to add myself in the wait queue. * * Normally I should go at the end of the queue. However, if I already * hold locks that conflict with the request of any previous waiter, put * myself in the queue just in front of the first such waiter. This is not * a necessary step, since deadlock detection would move me to before that * waiter anyway; but it's relatively cheap to detect such a conflict * immediately, and avoid delaying till deadlock timeout. * * Special case: if I find I should go in front of some waiter, check to * see if I conflict with already-held locks or the requests before that * waiter. If not, then just grant myself the requested lock immediately. * This is the same as the test for immediate grant in LockAcquire, except * we are only considering the part of the wait queue before my insertion * point. */ if (myHeldLocks != 0) { LOCKMASK aheadRequests = 0; proc = (PGPROC *) MAKE_PTR(waitQueue->links.next); for (i = 0; i < waitQueue->size; i++) { /* Must he wait for me? */ if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks) { /* Must I wait for him ? */ if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks) { /* * Yes, so we have a deadlock. Easiest way to clean up * correctly is to call RemoveFromWaitQueue(), but we * can't do that until we are *on* the wait queue. So, set * a flag to check below, and break out of loop. Also, * record deadlock info for later message. */ RememberSimpleDeadLock(MyProc, lockmode, lock, proc); early_deadlock = true; break; } /* I must go before this waiter. Check special case. */ if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 && LockCheckConflicts(lockMethodTable, lockmode, lock, proclock, MyProc) == STATUS_OK) { /* Skip the wait and just grant myself the lock. */ GrantLock(lock, proclock, lockmode); GrantAwaitedLock(); return STATUS_OK; } /* Break out of loop to put myself before him */ break; } /* Nope, so advance to next waiter */ aheadRequests |= LOCKBIT_ON(proc->waitLockMode); proc = (PGPROC *) MAKE_PTR(proc->links.next); } /* * If we fall out of loop normally, proc points to waitQueue head, so * we will insert at tail of queue as desired. */ } else { /* I hold no locks, so I can't push in front of anyone. */ proc = (PGPROC *) &(waitQueue->links); } /* * Insert self into queue, ahead of the given proc (or at tail of queue). */ SHMQueueInsertBefore(&(proc->links), &(MyProc->links)); waitQueue->size++; lock->waitMask |= LOCKBIT_ON(lockmode); /* Set up wait information in PGPROC object, too */ MyProc->waitLock = lock; MyProc->waitProcLock = proclock; MyProc->waitLockMode = lockmode; MyProc->waitStatus = STATUS_WAITING; /* * If we detected deadlock, give up without waiting. This must agree with * CheckDeadLock's recovery code, except that we shouldn't release the * semaphore since we haven't tried to lock it yet. */ if (early_deadlock) { RemoveFromWaitQueue(MyProc, hashcode); return STATUS_ERROR; } /* mark that we are waiting for a lock */ lockAwaited = locallock; /* * Release the lock table's partition lock. * * NOTE: this may also cause us to exit critical-section state, possibly * allowing a cancel/die interrupt to be accepted. This is OK because we * have recorded the fact that we are waiting for a lock, and so * LockWaitCancel will clean up if cancel/die happens. */ LWLockRelease(partitionLock); /* Reset deadlock_state before enabling the signal handler */ deadlock_state = DS_NOT_YET_CHECKED; /* * Set timer so we can wake up after awhile and check for a deadlock. If a * deadlock is detected, the handler releases the process's semaphore and * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we * must report failure rather than success. * * By delaying the check until we've waited for a bit, we can avoid * running the rather expensive deadlock-check code in most cases. */ if (!enable_sig_alarm(DeadlockTimeout, false)) elog(FATAL, "could not set timer for process wakeup"); /* * If someone wakes us between LWLockRelease and PGSemaphoreLock, * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore * implementation. While this is normally good, there are cases where a * saved wakeup might be leftover from a previous operation (for example, * we aborted ProcWaitForSignal just before someone did ProcSendSignal). * So, loop to wait again if the waitStatus shows we haven't been granted * nor denied the lock yet. * * We pass interruptOK = true, which eliminates a window in which * cancel/die interrupts would be held off undesirably. This is a promise * that we don't mind losing control to a cancel/die interrupt here. We * don't, because we have no shared-state-change work to do after being * granted the lock (the grantor did it all). We do have to worry about * updating the locallock table, but if we lose control to an error, * LockWaitCancel will fix that up. */ do { PGSemaphoreLock(&MyProc->sem, true); /* * waitStatus could change from STATUS_WAITING to something else * asynchronously. Read it just once per loop to prevent surprising * behavior (such as missing log messages). */ myWaitStatus = MyProc->waitStatus; /* * If we are not deadlocked, but are waiting on an autovacuum-induced * task, send a signal to interrupt it. */ if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel) { PGPROC *autovac = GetBlockingAutoVacuumPgproc(); LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * Only do it if the worker is not working to protect against Xid * wraparound. */ if ((autovac != NULL) && (autovac->vacuumFlags & PROC_IS_AUTOVACUUM) && !(autovac->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND)) { int pid = autovac->pid; StringInfoData locktagbuf; StringInfoData logbuf; /* errdetail for server log */ initStringInfo(&locktagbuf); initStringInfo(&logbuf); DescribeLockTag(&locktagbuf, &lock->tag); appendStringInfo(&logbuf, _("Process %d waits for %s on %s."), MyProcPid, GetLockmodeName(lock->tag.locktag_lockmethodid, lockmode), locktagbuf.data); /* release lock as quickly as possible */ LWLockRelease(ProcArrayLock); ereport(LOG, (errmsg("sending cancel to blocking autovacuum PID %d", pid), errdetail("%s", logbuf.data))); pfree(logbuf.data); pfree(locktagbuf.data); /* send the autovacuum worker Back to Old Kent Road */ if (kill(pid, SIGINT) < 0) { /* Just a warning to allow multiple callers */ ereport(WARNING, (errmsg("could not send signal to process %d: %m", pid))); } } else LWLockRelease(ProcArrayLock); /* prevent signal from being resent more than once */ allow_autovacuum_cancel = false; } /* * If awoken after the deadlock check interrupt has run, and * log_lock_waits is on, then report about the wait. */ if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED) { StringInfoData buf; const char *modename; long secs; int usecs; long msecs; initStringInfo(&buf); DescribeLockTag(&buf, &locallock->tag.lock); modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid, lockmode); TimestampDifference(timeout_start_time, GetCurrentTimestamp(), &secs, &usecs); msecs = secs * 1000 + usecs / 1000; usecs = usecs % 1000; if (deadlock_state == DS_SOFT_DEADLOCK) ereport(LOG, (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); else if (deadlock_state == DS_HARD_DEADLOCK) { /* * This message is a bit redundant with the error that will be * reported subsequently, but in some cases the error report * might not make it to the log (eg, if it's caught by an * exception handler), and we want to ensure all long-wait * events get logged. */ ereport(LOG, (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); } if (myWaitStatus == STATUS_WAITING) ereport(LOG, (errmsg("process %d still waiting for %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); else if (myWaitStatus == STATUS_OK) ereport(LOG, (errmsg("process %d acquired %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); else { Assert(myWaitStatus == STATUS_ERROR); /* * Currently, the deadlock checker always kicks its own * process, which means that we'll only see STATUS_ERROR when * deadlock_state == DS_HARD_DEADLOCK, and there's no need to * print redundant messages. But for completeness and * future-proofing, print a message if it looks like someone * else kicked us off the lock. */ if (deadlock_state != DS_HARD_DEADLOCK) ereport(LOG, (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); } /* * At this point we might still need to wait for the lock. Reset * state so we don't print the above messages again. */ deadlock_state = DS_NO_DEADLOCK; pfree(buf.data); } } while (myWaitStatus == STATUS_WAITING); /* * Disable the timer, if it's still running */ if (!disable_sig_alarm(false)) elog(FATAL, "could not disable timer for process wakeup"); /* * Re-acquire the lock table's partition lock. We have to do this to hold * off cancel/die interrupts before we can mess with lockAwaited (else we * might have a missed or duplicated locallock update). */ LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* * We no longer want LockWaitCancel to do anything. */ lockAwaited = NULL; /* * If we got the lock, be sure to remember it in the locallock table. */ if (MyProc->waitStatus == STATUS_OK) GrantAwaitedLock(); /* * We don't have to do anything else, because the awaker did all the * necessary update of the lock table and MyProc. */ return MyProc->waitStatus; } /* * ProcWakeup -- wake up a process by releasing its private semaphore. * * Also remove the process from the wait queue and set its links invalid. * RETURN: the next process in the wait queue. * * The appropriate lock partition lock must be held by caller. * * XXX: presently, this code is only used for the "success" case, and only * works correctly for that case. To clean up in failure case, would need * to twiddle the lock's request counts too --- see RemoveFromWaitQueue. * Hence, in practice the waitStatus parameter must be STATUS_OK. */ PGPROC * ProcWakeup(PGPROC *proc, int waitStatus) { PGPROC *retProc; /* Proc should be sleeping ... */ if (proc->links.prev == INVALID_OFFSET || proc->links.next == INVALID_OFFSET) return NULL; Assert(proc->waitStatus == STATUS_WAITING); /* Save next process before we zap the list link */ retProc = (PGPROC *) MAKE_PTR(proc->links.next); /* Remove process from wait queue */ SHMQueueDelete(&(proc->links)); (proc->waitLock->waitProcs.size)--; /* Clean up process' state and pass it the ok/fail signal */ proc->waitLock = NULL; proc->waitProcLock = NULL; proc->waitStatus = waitStatus; /* And awaken it */ PGSemaphoreUnlock(&proc->sem); return retProc; } /* * ProcLockWakeup -- routine for waking up processes when a lock is * released (or a prior waiter is aborted). Scan all waiters * for lock, waken any that are no longer blocked. * * The appropriate lock partition lock must be held by caller. */ void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock) { PROC_QUEUE *waitQueue = &(lock->waitProcs); int queue_size = waitQueue->size; PGPROC *proc; LOCKMASK aheadRequests = 0; Assert(queue_size >= 0); if (queue_size == 0) return; proc = (PGPROC *) MAKE_PTR(waitQueue->links.next); while (queue_size-- > 0) { LOCKMODE lockmode = proc->waitLockMode; /* * Waken if (a) doesn't conflict with requests of earlier waiters, and * (b) doesn't conflict with already-held locks. */ if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 && LockCheckConflicts(lockMethodTable, lockmode, lock, proc->waitProcLock, proc) == STATUS_OK) { /* OK to waken */ GrantLock(lock, proc->waitProcLock, lockmode); proc = ProcWakeup(proc, STATUS_OK); /* * ProcWakeup removes proc from the lock's waiting process queue * and returns the next proc in chain; don't use proc's next-link, * because it's been cleared. */ } else { /* * Cannot wake this guy. Remember his request for later checks. */ aheadRequests |= LOCKBIT_ON(lockmode); proc = (PGPROC *) MAKE_PTR(proc->links.next); } } Assert(waitQueue->size >= 0); } /* * CheckDeadLock * * We only get to this routine if we got SIGALRM after DeadlockTimeout * while waiting for a lock to be released by some other process. Look * to see if there's a deadlock; if not, just return and continue waiting. * (But signal ProcSleep to log a message, if log_lock_waits is true.) * If we have a real deadlock, remove ourselves from the lock's wait queue * and signal an error to ProcSleep. * * NB: this is run inside a signal handler, so be very wary about what is done * here or in called routines. */ static void CheckDeadLock(void) { int i; /* * Acquire exclusive lock on the entire shared lock data structures. Must * grab LWLocks in partition-number order to avoid LWLock deadlock. * * Note that the deadlock check interrupt had better not be enabled * anywhere that this process itself holds lock partition locks, else this * will wait forever. Also note that LWLockAcquire creates a critical * section, so that this routine cannot be interrupted by cancel/die * interrupts. */ for (i = 0; i < NUM_LOCK_PARTITIONS; i++) LWLockAcquire(FirstLockMgrLock + i, LW_EXCLUSIVE); /* * Check to see if we've been awoken by anyone in the interim. * * If we have, we can return and resume our transaction -- happy day. * Before we are awoken the process releasing the lock grants it to us * so we know that we don't have to wait anymore. * * We check by looking to see if we've been unlinked from the wait queue. * This is quicker than checking our semaphore's state, since no kernel * call is needed, and it is safe because we hold the lock partition lock. */ if (MyProc->links.prev == INVALID_OFFSET || MyProc->links.next == INVALID_OFFSET) goto check_done; #ifdef LOCK_DEBUG if (Debug_deadlocks) DumpAllLocks(); #endif /* Run the deadlock check, and set deadlock_state for use by ProcSleep */ deadlock_state = DeadLockCheck(MyProc); if (deadlock_state == DS_HARD_DEADLOCK) { /* * Oops. We have a deadlock. * * Get this process out of wait state. (Note: we could do this more * efficiently by relying on lockAwaited, but use this coding to * preserve the flexibility to kill some other transaction than the * one detecting the deadlock.) * * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so * ProcSleep will report an error after we return from the signal * handler. */ Assert(MyProc->waitLock != NULL); if (Gp_role == GP_ROLE_DISPATCH && ResourceScheduler && LOCK_LOCKMETHOD(*(MyProc->waitLock)) == RESOURCE_LOCKMETHOD) { ResRemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag))); } else { RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag))); } /* * Unlock my semaphore so that the interrupted ProcSleep() call can * finish. */ PGSemaphoreUnlock(&MyProc->sem); /* * We're done here. Transaction abort caused by the error that * ProcSleep will raise will cause any other locks we hold to be * released, thus allowing other processes to wake up; we don't need * to do that here. NOTE: an exception is that releasing locks we * hold doesn't consider the possibility of waiters that were blocked * behind us on the lock we just failed to get, and might now be * wakable because we're not in front of them anymore. However, * RemoveFromWaitQueue took care of waking up any such processes. */ } else if (log_lock_waits || deadlock_state == DS_BLOCKED_BY_AUTOVACUUM) { /* * Unlock my semaphore so that the interrupted ProcSleep() call can * print the log message (we daren't do it here because we are inside * a signal handler). It will then sleep again until someone releases * the lock. * * If blocked by autovacuum, this wakeup will enable ProcSleep to send * the cancelling signal to the autovacuum worker. */ PGSemaphoreUnlock(&MyProc->sem); } /* * And release locks. We do this in reverse order for two reasons: (1) * Anyone else who needs more than one of the locks will be trying to lock * them in increasing order; we don't want to release the other process * until it can get all the locks it needs. (2) This avoids O(N^2) * behavior inside LWLockRelease. */ check_done: for (i = NUM_LOCK_PARTITIONS; --i >= 0;) LWLockRelease(FirstLockMgrLock + i); } /* * ProcWaitForSignal - wait for a signal from another backend. * * This can share the semaphore normally used for waiting for locks, * since a backend could never be waiting for a lock and a signal at * the same time. As with locks, it's OK if the signal arrives just * before we actually reach the waiting state. Also as with locks, * it's necessary that the caller be robust against bogus wakeups: * always check that the desired state has occurred, and wait again * if not. This copes with possible "leftover" wakeups. */ void ProcWaitForSignal(void) { PGSemaphoreLock(&MyProc->sem, true); } /* * ProcSendSignal - send a signal to a backend identified by PID */ void ProcSendSignal(int pid) { PGPROC *proc = BackendPidGetProc(pid); if (proc != NULL) PGSemaphoreUnlock(&proc->sem); } /***************************************************************************** * SIGALRM interrupt support * * Maybe these should be in pqsignal.c? *****************************************************************************/ /* * Enable the SIGALRM interrupt to fire after the specified delay * * Delay is given in milliseconds. Caller should be sure a SIGALRM * signal handler is installed before this is called. * * This code properly handles nesting of deadlock timeout alarms within * statement timeout alarms. * * Returns TRUE if okay, FALSE on failure. */ bool enable_sig_alarm(int delayms, bool is_statement_timeout) { TimestampTz fin_time; struct itimerval timeval; if (is_statement_timeout) { /* * Begin statement-level timeout * * Note that we compute statement_fin_time with reference to the * statement_timestamp, but apply the specified delay without any * correction; that is, we ignore whatever time has elapsed since * statement_timestamp was set. In the normal case only a small * interval will have elapsed and so this doesn't matter, but there * are corner cases (involving multi-statement query strings with * embedded COMMIT or ROLLBACK) where we might re-initialize the * statement timeout long after initial receipt of the message. In * such cases the enforcement of the statement timeout will be a bit * inconsistent. This annoyance is judged not worth the cost of * performing an additional gettimeofday() here. */ Assert(!deadlock_timeout_active); fin_time = GetCurrentStatementStartTimestamp(); fin_time = TimestampTzPlusMilliseconds(fin_time, delayms); statement_fin_time = fin_time; cancel_from_timeout = false; statement_timeout_active = true; } else if (statement_timeout_active) { /* * Begin deadlock timeout with statement-level timeout active * * Here, we want to interrupt at the closer of the two timeout times. * If fin_time >= statement_fin_time then we need not touch the * existing timer setting; else set up to interrupt at the deadlock * timeout time. * * NOTE: in this case it is possible that this routine will be * interrupted by the previously-set timer alarm. This is okay * because the signal handler will do only what it should do according * to the state variables. The deadlock checker may get run earlier * than normal, but that does no harm. */ timeout_start_time = GetCurrentTimestamp(); fin_time = TimestampTzPlusMilliseconds(timeout_start_time, delayms); deadlock_timeout_active = true; if (fin_time >= statement_fin_time) return true; } else { /* Begin deadlock timeout with no statement-level timeout */ deadlock_timeout_active = true; /* GetCurrentTimestamp can be expensive, so only do it if we must */ if (log_lock_waits) timeout_start_time = GetCurrentTimestamp(); } /* If we reach here, okay to set the timer interrupt */ MemSet(&timeval, 0, sizeof(struct itimerval)); timeval.it_value.tv_sec = delayms / 1000; timeval.it_value.tv_usec = (delayms % 1000) * 1000; if (setitimer(ITIMER_REAL, &timeval, NULL)) return false; return true; } /* * Cancel the SIGALRM timer, either for a deadlock timeout or a statement * timeout. If a deadlock timeout is canceled, any active statement timeout * remains in force. * * Returns TRUE if okay, FALSE on failure. */ bool disable_sig_alarm(bool is_statement_timeout) { /* * Always disable the interrupt if it is active; this avoids being * interrupted by the signal handler and thereby possibly getting * confused. * * We will re-enable the interrupt if necessary in CheckStatementTimeout. */ if (statement_timeout_active || deadlock_timeout_active) { struct itimerval timeval; MemSet(&timeval, 0, sizeof(struct itimerval)); if (setitimer(ITIMER_REAL, &timeval, NULL)) { statement_timeout_active = false; cancel_from_timeout = false; deadlock_timeout_active = false; return false; } } /* Always cancel deadlock timeout, in case this is error cleanup */ deadlock_timeout_active = false; /* Cancel or reschedule statement timeout */ if (is_statement_timeout) { statement_timeout_active = false; cancel_from_timeout = false; } else if (statement_timeout_active) { if (!CheckStatementTimeout()) return false; } return true; } /* * We get here when a session has been idle for a while (waiting for the * client to send us SQL to execute). The idea is to consume less resources while sitting idle, * so we can support more sessions being logged on. * * The expectation is that if the session is logged on, but nobody is sending us work to do, * we want to free up whatever resources we can. Usually it means there is a human being at the * other end of the connection, and that person has walked away from their terminal, or just hasn't * decided what to do next. We could be idle for a very long time (many hours). * * Of course, freeing gangs means that the next time the user does send in an SQL statement, * we need to allocate gangs (at least the writer gang) to do anything. This entails extra work, * so we don't want to do this if we don't think the session has gone idle. * * P.s: Is there anything we can free up on the master (QD) side? I can't think of anything. * */ static void HandleClientWaitTimeout(void) { elog(DEBUG2,"HandleClientWaitTimeout"); /* * cancel the timer, as there is no reason we need it to go off again. */ disable_sig_alarm(false); /* * Free gangs to free up resources on the segDBs. */ if (GangsExist()) { if (IsTransactionOrTransactionBlock() || TempNamespaceOidIsValid()) { /* * If we are in a transaction, we can't release the writer gang, * as this will abort the transaction. * * If we have a TempNameSpace, we can't release the writer gang, as this * would drop any temp tables we own. * * Since we are idle, any reader gangs will be available but not allocated. */ disconnectAndDestroyIdleReaderGangs(); } else { /* * Get rid of ALL gangs... Readers and primary writer. * After this, we have no resources being consumed on the segDBs at all. * * Our session wasn't destroyed due to an fatal error or FTS action, so * we don't need to do anything special. Specifically, we DON'T want * to act like we are now in a new session, since that would be confusing * in the log. * */ DisconnectAndDestroyAllGangs(false); } } } /* * Check for statement timeout. If the timeout time has come, * trigger a query-cancel interrupt; if not, reschedule the SIGALRM * interrupt to occur at the right time. * * Returns true if okay, false if failed to set the interrupt. */ static bool CheckStatementTimeout(void) { TimestampTz now; if (!statement_timeout_active) return true; /* do nothing if not active */ /* QD takes care of timeouts for QE. */ if (Gp_role == GP_ROLE_EXECUTE) return true; now = GetCurrentTimestamp(); if (now >= statement_fin_time) { /* Time to die */ statement_timeout_active = false; cancel_from_timeout = true; #ifdef HAVE_SETSID /* try to signal whole process group */ kill(-MyProcPid, SIGINT); #endif kill(MyProcPid, SIGINT); } else { /* Not time yet, so (re)schedule the interrupt */ long secs; int usecs; struct itimerval timeval; TimestampDifference(now, statement_fin_time, &secs, &usecs); /* * It's possible that the difference is less than a microsecond; * ensure we don't cancel, rather than set, the interrupt. */ if (secs == 0 && usecs == 0) usecs = 1; MemSet(&timeval, 0, sizeof(struct itimerval)); timeval.it_value.tv_sec = secs; timeval.it_value.tv_usec = usecs; if (setitimer(ITIMER_REAL, &timeval, NULL)) return false; } return true; } /* * need DoingCommandRead to be extern so we can test it here. * Or would it be better to have some routine to call to get the * value of the bool? This is simpler. */ extern bool DoingCommandRead; /* * Signal handler for SIGALRM * * Process deadlock check and/or statement timeout check, as needed. * To avoid various edge cases, we must be careful to do nothing * when there is nothing to be done. We also need to be able to * reschedule the timer interrupt if called before end of statement. */ void handle_sig_alarm(SIGNAL_ARGS) { int save_errno = errno; /* SIGALRM is cause for waking anything waiting on the process latch */ if (MyProc) SetLatch(&MyProc->procLatch); /* don't joggle the elbow of proc_exit */ if (!proc_exit_inprogress) { /* * Idle session timeout shares with the deadlock timeout. * If DoingCommandRead is true, we are deciding the session is idle * In that case, we can't possibly be in a deadlock, so no point * in running the deadlock detection. */ if (deadlock_timeout_active && !DoingCommandRead) { deadlock_timeout_active = false; CheckDeadLock(); } if (statement_timeout_active) (void) CheckStatementTimeout(); /* * If we are DoingCommandRead, it means we are sitting idle waiting for * the user to send us some SQL. */ if (DoingCommandRead) { (void) ClientWaitTimeoutInterruptHandler(); deadlock_timeout_active = false; } } errno = save_errno; } static void ClientWaitTimeoutInterruptHandler(void) { int save_errno = errno; /* Don't joggle the elbow of proc_exit */ if (proc_exit_inprogress) return; if (clientWaitTimeoutInterruptEnabled) { bool save_ImmediateInterruptOK = ImmediateInterruptOK; /* * We may be called while ImmediateInterruptOK is true; turn it off * while messing with the client wait timeout state. */ ImmediateInterruptOK = false; /* * I'm not sure whether some flavors of Unix might allow another * SIGALRM occurrence to recursively interrupt this routine. To cope * with the possibility, we do the same sort of dance that * EnableNotifyInterrupt must do -- see that routine for comments. */ clientWaitTimeoutInterruptEnabled = 0; /* disable any recursive signal */ clientWaitTimeoutInterruptOccurred = 1; /* do at least one iteration */ for (;;) { clientWaitTimeoutInterruptEnabled = 1; if (!clientWaitTimeoutInterruptOccurred) break; clientWaitTimeoutInterruptEnabled = 0; if (clientWaitTimeoutInterruptOccurred) { ProcessClientWaitTimeout(); } } /* * Restore ImmediateInterruptOK, and check for interrupts if needed. */ ImmediateInterruptOK = save_ImmediateInterruptOK; if (save_ImmediateInterruptOK) CHECK_FOR_INTERRUPTS(); } else { /* * In this path it is NOT SAFE to do much of anything, except this: */ clientWaitTimeoutInterruptOccurred = 1; } errno = save_errno; } void EnableClientWaitTimeoutInterrupt(void) { for (;;) { clientWaitTimeoutInterruptEnabled = 1; if (!clientWaitTimeoutInterruptOccurred) break; clientWaitTimeoutInterruptEnabled = 0; if (clientWaitTimeoutInterruptOccurred) { ProcessClientWaitTimeout(); } } } bool DisableClientWaitTimeoutInterrupt(void) { bool result = (clientWaitTimeoutInterruptEnabled != 0); clientWaitTimeoutInterruptEnabled = 0; return result; } static void ProcessClientWaitTimeout(void) { bool notify_enabled; bool catchup_enabled; /* Must prevent SIGUSR1 and SIGUSR2 interrupt while I am running */ notify_enabled = DisableNotifyInterrupt(); catchup_enabled = DisableCatchupInterrupt(); clientWaitTimeoutInterruptOccurred = 0; HandleClientWaitTimeout(); if (notify_enabled) EnableNotifyInterrupt(); if (catchup_enabled) EnableCatchupInterrupt(); } /* * ResProcSleep -- put a process to sleep (that is waiting for a resource lock). * * Notes: * Locktable's masterLock must be held at entry, and will be held * at exit. * * This is merely a version of ProcSleep modified for resource locks. * The logic here could have been merged into ProcSleep, however it was * requested to keep as much as possible of this resource lock code * seperate from its standard lock relatives - in the interest of not * introducing new bugs or performance regressions into the lock code. */ int ResProcSleep(LOCKMODE lockmode, LOCALLOCK *locallock, void *incrementSet) { LOCK *lock = locallock->lock; PROCLOCK *proclock = locallock->proclock; PROC_QUEUE *waitQueue = &(lock->waitProcs); PGPROC *proc; uint32 hashcode = locallock->hashcode; LWLockId partitionLock = LockHashPartitionLock(hashcode); bool selflock = true; /* initialize result for error. */ /* * Don't check my held locks, as we just add at the end of the queue. */ proc = (PGPROC *) &(waitQueue->links); SHMQueueInsertBefore(&(proc->links), &(MyProc->links)); waitQueue->size++; lock->waitMask |= LOCKBIT_ON(lockmode); /* * reflect this in PGPROC object, too. */ MyProc->waitLock = lock; MyProc->waitProcLock = (PROCLOCK *) proclock; MyProc->waitLockMode = lockmode; MyProc->waitStatus = STATUS_ERROR; /* initialize result for error */ /* Now check the status of the self lock footgun. */ selflock = ResCheckSelfDeadLock(lock, proclock, incrementSet); if (selflock) { LWLockRelease(partitionLock); ereport(ERROR, (errcode(ERRCODE_T_R_DEADLOCK_DETECTED), errmsg("deadlock detected, locking against self"))); } /* Mark that we are waiting for a lock */ lockAwaited = locallock; /* Ok to wait.*/ LWLockRelease(partitionLock); if (!enable_sig_alarm(DeadlockTimeout, false)) elog(FATAL, "could not set timer for (resource lock) process wakeup"); /* * Sleep on the semaphore. */ PGSemaphoreLock(&MyProc->sem, true); if (!disable_sig_alarm(false)) elog(FATAL, "could not disable timer for (resource lock) process wakeup"); /* * Have been awakened, so continue. */ LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* * We no longer want (Res)LockWaitCancel to do anything. */ lockAwaited = NULL; return MyProc->waitStatus; } /* * ResLockWaitCancel -- Cancel any pending wait for a resource lock, when * aborting a transaction. */ void ResLockWaitCancel(void) { LWLockId partitionLock; if (lockAwaited != NULL) { /* Unlink myself from the wait queue, if on it */ partitionLock = LockHashPartitionLock(lockAwaited->hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); if (MyProc->links.next != INVALID_OFFSET) { /* We could not have been granted the lock yet */ Assert(MyProc->waitStatus == STATUS_ERROR); /* We should only be trying to cancel resource locks. */ Assert(LOCALLOCK_LOCKMETHOD(*lockAwaited) == RESOURCE_LOCKMETHOD); ResRemoveFromWaitQueue(MyProc, lockAwaited->hashcode); } lockAwaited = NULL; LWLockRelease(partitionLock); } /* * Reset the proc wait semaphore to zero. This is necessary in the * scenario where someone else granted us the lock we wanted before we * were able to remove ourselves from the wait-list. */ PGSemaphoreReset(&MyProc->sem); return; } bool ProcGetMppLocalProcessCounter(int *mppLocalProcessCounter) { Assert(mppLocalProcessCounter != NULL); if (ProcGlobal == NULL) return false; *mppLocalProcessCounter = ProcGlobal->mppLocalProcessCounter; return true; } bool ProcCanSetMppSessionId(void) { if (ProcGlobal == NULL || MyProc == NULL) return false; return true; } void ProcNewMppSessionId(int *newSessionId) { Assert(newSessionId != NULL); *newSessionId = MyProc->mppSessionId = pg_atomic_add_fetch_u32((pg_atomic_uint32 *)&ProcGlobal->mppLocalProcessCounter, 1); /* * Make sure that our SessionState entry correctly records our * new session id. */ if (NULL != MySessionState) { /* This should not happen outside of dispatcher on the master */ Assert(GpIdentity.segindex == MASTER_CONTENT_ID && Gp_role == GP_ROLE_DISPATCH); ereport(gp_sessionstate_loglevel, (errmsg("ProcNewMppSessionId: changing session id (old: %d, new: %d), pinCount: %d, activeProcessCount: %d", MySessionState->sessionId, *newSessionId, MySessionState->pinCount, MySessionState->activeProcessCount), errprintstack(true))); #ifdef USE_ASSERT_CHECKING MySessionState->isModifiedSessionId = true; #endif MySessionState->sessionId = *newSessionId; } }
lintzc/gpdb
src/backend/storage/lmgr/proc.c
C
apache-2.0
62,979
/*============================================================================= Library: XNAT/Core Copyright (c) University College London, Centre for Medical Image Computing Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================*/ #ifndef ctkXnatScanFolder_h #define ctkXnatScanFolder_h #include "ctkXNATCoreExport.h" #include "ctkXnatObject.h" class ctkXnatConnection; class ctkXnatExperiment; class ctkXnatScanFolderPrivate; /** * @ingroup XNAT_Core */ class CTK_XNAT_CORE_EXPORT ctkXnatScanFolder : public ctkXnatObject { public: ctkXnatScanFolder(ctkXnatObject* parent = NULL); virtual ~ctkXnatScanFolder(); virtual QString resourceUri() const; void reset(); private: friend class qRestResult; virtual void fetchImpl(); virtual void downloadImpl(const QString&); Q_DECLARE_PRIVATE(ctkXnatScanFolder) }; #endif
laurennlam/CTK
Libs/XNAT/Core/ctkXnatScanFolder.h
C
apache-2.0
1,422
<script src="tree.jquery.js"></script>
ms123s/simpl4-src
surface2/domelements/src/simpl-jqtree/import-js.html
HTML
apache-2.0
39
set SEARCH_PATH to CARABI_CHAT; drop table CHAT_MESSAGE; drop table FILE; drop view DUAL; drop sequence message_id_gen; drop sequence file_id_gen; drop schema CARABI_CHAT restrict; --commit;
Carabi/carabiserver
src/main/sql/chat_drop.sql
SQL
apache-2.0
194
from a10sdk.common.A10BaseClass import A10BaseClass class Port(A10BaseClass): """Class Description:: Port template. Class port supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param health_check_disable: {"description": "Disable configured health check configuration", "format": "flag", "default": 0, "optional": true, "not": "health-check", "type": "number"} :param stats_data_action: {"description": "'stats-data-enable': Enable statistical data collection for real server port; 'stats-data-disable': Disable statistical data collection for real server port; ", "format": "enum", "default": "stats-data-enable", "type": "string", "enum": ["stats-data-enable", "stats-data-disable"], "optional": true} :param resel_on_reset: {"default": 0, "optional": true, "type": "number", "description": "When receiving reset from server, do the server/port reselection (default is 0, don't do reselection)", "format": "flag"} :param dest_nat: {"default": 0, "optional": true, "type": "number", "description": "Destination NAT", "format": "flag"} :param request_rate_limit: {"description": "Request rate limit", "format": "number", "type": "number", "maximum": 1048575, "minimum": 1, "optional": true} :param dynamic_member_priority: {"description": "Set dynamic member's priority (Initial priority (default is 16))", "format": "number", "default": 16, "optional": true, "maximum": 16, "minimum": 1, "type": "number"} :param till: {"description": "Slow start ends when slow start connection limit reaches a number (default 4096) (Slow start ends when connection limit reaches this number)", "format": "number", "default": 4096, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"} :param slow_start: {"default": 0, "optional": true, "type": "number", "description": "Slowly ramp up the connection number after port is up", "format": "flag"} :param decrement: {"description": "Decrease after every round of DNS query (default is 0)", "format": "number", "default": 0, "optional": true, "maximum": 7, "minimum": 0, "type": "number"} :param conn_limit: {"description": "Connection limit", "format": "number", "type": "number", "maximum": 8000000, "minimum": 1, "optional": true} :param retry: {"description": "Maximum retry times before reassign this connection to another server/port (default is 2) (The maximum retry number)", "format": "number", "default": 2, "optional": true, "maximum": 7, "minimum": 0, "type": "number"} :param weight: {"description": "Weight (port weight)", "format": "number", "default": 1, "optional": true, "maximum": 100, "minimum": 1, "type": "number"} :param inband_health_check: {"default": 0, "optional": true, "type": "number", "description": "Use inband traffic to detect port's health status", "format": "flag"} :param initial_slow_start: {"description": "Initial slow start connection limit (default 128)", "format": "number", "default": 128, "optional": true, "maximum": 4095, "minimum": 1, "type": "number"} :param rate_interval: {"description": "'100ms': Use 100 ms as sampling interval; 'second': Use 1 second as sampling interval; ", "format": "enum", "default": "second", "type": "string", "enum": ["100ms", "second"], "optional": true} :param no_ssl: {"default": 0, "optional": true, "type": "number", "description": "No SSL", "format": "flag"} :param request_rate_interval: {"description": "'100ms': Use 100 ms as sampling interval; 'second': Use 1 second as sampling interval; ", "format": "enum", "default": "second", "type": "string", "enum": ["100ms", "second"], "optional": true} :param add: {"description": "Slow start connection limit add by a number every interval (Add by this number every interval)", "format": "number", "optional": true, "maximum": 4095, "minimum": 1, "not": "times", "type": "number"} :param down_grace_period: {"description": "Port down grace period", "format": "number", "type": "number", "maximum": 86400, "minimum": 1, "optional": true} :param resume: {"description": "Resume accepting new connection after connection number drops below threshold (Connection resume threshold)", "format": "number", "type": "number", "maximum": 1048575, "minimum": 1, "optional": true} :param dscp: {"description": "Differentiated Services Code Point (DSCP to Real Server IP Mapping Value)", "format": "number", "type": "number", "maximum": 63, "minimum": 1, "optional": true} :param every: {"description": "Slow start connection limit increment interval (default 10)", "format": "number", "default": 10, "optional": true, "maximum": 60, "minimum": 1, "type": "number"} :param conn_limit_no_logging: {"default": 0, "optional": true, "type": "number", "description": "Do not log connection over limit event", "format": "flag"} :param extended_stats: {"default": 0, "optional": true, "type": "number", "description": "Enable extended statistics on real server port", "format": "flag"} :param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"} :param reset: {"default": 0, "optional": true, "type": "number", "description": "Send client reset when connection rate over limit", "format": "flag"} :param conn_rate_limit_no_logging: {"default": 0, "optional": true, "type": "number", "description": "Do not log connection over limit event", "format": "flag"} :param name: {"description": "Port template name", "format": "string-rlx", "default": "default", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"} :param times: {"description": "Slow start connection limit multiply by a number every interval (default 2) (Multiply by this number every interval)", "format": "number", "default": 2, "optional": true, "maximum": 10, "minimum": 2, "not": "add", "type": "number"} :param request_rate_no_logging: {"default": 0, "optional": true, "type": "number", "description": "Do not log connection over limit event", "format": "flag"} :param down_timer: {"description": "The timer to bring the marked down server/port to up (default is 0, never bring up) (The timer to bring up server (in second, default is 0))", "format": "number", "default": 0, "optional": true, "maximum": 255, "minimum": 0, "type": "number"} :param conn_rate_limit: {"description": "Connection rate limit", "format": "number", "type": "number", "maximum": 1048575, "minimum": 1, "optional": true} :param source_nat: {"description": "Source NAT (IP NAT Pool or pool group name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"} :param reassign: {"description": "Maximum reassign times before declear the server/port down (default is 25) (The maximum reassign number)", "format": "number", "default": 25, "optional": true, "maximum": 255, "minimum": 0, "type": "number"} :param health_check: {"description": "Health Check Monitor (Health monitor name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 31, "not": "health-check-disable", "type": "string", "$ref": "/axapi/v3/health/monitor"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/slb/template/port/{name}`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required = [ "name"] self.b_key = "port" self.a10_url="/axapi/v3/slb/template/port/{name}" self.DeviceProxy = "" self.health_check_disable = "" self.stats_data_action = "" self.resel_on_reset = "" self.dest_nat = "" self.request_rate_limit = "" self.dynamic_member_priority = "" self.till = "" self.slow_start = "" self.decrement = "" self.conn_limit = "" self.retry = "" self.weight = "" self.inband_health_check = "" self.initial_slow_start = "" self.rate_interval = "" self.no_ssl = "" self.request_rate_interval = "" self.add = "" self.down_grace_period = "" self.resume = "" self.dscp = "" self.every = "" self.conn_limit_no_logging = "" self.extended_stats = "" self.uuid = "" self.reset = "" self.conn_rate_limit_no_logging = "" self.name = "" self.times = "" self.request_rate_no_logging = "" self.down_timer = "" self.conn_rate_limit = "" self.source_nat = "" self.reassign = "" self.health_check = "" for keys, value in kwargs.items(): setattr(self,keys, value)
amwelch/a10sdk-python
a10sdk/core/slb/slb_template_port.py
Python
apache-2.0
8,848
package de.stephanlindauer.criticalmaps; import android.app.Application; import android.test.ApplicationTestCase; /** * <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a> */ public class ApplicationTest extends ApplicationTestCase<Application> { public ApplicationTest() { super(Application.class); } }
stephanlindauer/criticalmaps-android
app/src/androidTest/java/de/stephanlindauer/criticalmaps/ApplicationTest.java
Java
apache-2.0
362
package eu.qualimaster.data.imp; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import javax.annotation.Generated; import com.esotericsoftware.kryo.io.Output; import com.esotericsoftware.kryo.io.Input; import com.esotericsoftware.kryo.Serializer; import com.esotericsoftware.kryo.Kryo; import eu.qualimaster.dataManagement.serialization.ISerializer; import eu.qualimaster.dataManagement.serialization.SerializerRegistry; import eu.qualimaster.dataManagement.serialization.IDataInput; import eu.qualimaster.dataManagement.serialization.IDataOutput; import eu.qualimaster.data.imp.SimulatedFinancialData; import eu.qualimaster.data.inf.*; import eu.qualimaster.protos.SimulatedFinancialDataProtos.*; /** * The implementation of the serializers for the data element "SimulatedFinancialData" (GEN). */ @Generated(value="EASy-Producer") public class SimulatedFinancialDataSerializers { /** * Provides a serializer (supports both Kyro and Protobuf) for the default implementation of the data output for the {@link SimulatedFinancialData.ISimulatedFinancialDataSymbolListOutput} algorithm. */ public static class SimulatedFinancialDataSymbolListOutputSerializer extends Serializer<SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput> implements ISerializer<SimulatedFinancialData.ISimulatedFinancialDataSymbolListOutput> { @Override public void serializeTo(SimulatedFinancialData.ISimulatedFinancialDataSymbolListOutput object, OutputStream out) throws IOException { SerializerRegistry.getListSerializerSafe("STRINGLIST", String.class).serializeTo(object.getAllSymbols(), out); } @Override public void serializeTo(SimulatedFinancialData.ISimulatedFinancialDataSymbolListOutput object, IDataOutput out) throws IOException { SerializerRegistry.getListSerializerSafe("STRINGLIST", String.class).serializeTo(object.getAllSymbols(), out); } @Override public SimulatedFinancialData.ISimulatedFinancialDataSymbolListOutput deserializeFrom(InputStream in) throws IOException { SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput result = new SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput(); result.setAllSymbols(SerializerRegistry.getListSerializerSafe("STRINGLIST", String.class).deserializeFrom(in)); return result; } @Override public SimulatedFinancialData.ISimulatedFinancialDataSymbolListOutput deserializeFrom(IDataInput in) throws IOException { SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput result = new SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput(); result.setAllSymbols(SerializerRegistry.getListSerializerSafe("STRINGLIST", String.class).deserializeFrom(in)); return result; } @Override public void write(Kryo kryo, Output output, SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput object) { kryo.writeObject(output, object.getAllSymbols()); } @Override public SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput read(Kryo kryo, Input input, Class<SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput> type) { SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput result = new SimulatedFinancialData.SimulatedFinancialDataSymbolListOutput(); result.setAllSymbols(kryo.readObject(input, java.util.ArrayList.class)); return result; } } /** * Provides a serializer (supports both Kyro and Protobuf) for the default implementation of the data output for the {@link SimulatedFinancialData.ISimulatedFinancialDataSpringStreamOutput} algorithm. */ public static class SimulatedFinancialDataSpringStreamOutputSerializer extends Serializer<SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput> implements ISerializer<SimulatedFinancialData.ISimulatedFinancialDataSpringStreamOutput> { @Override public void serializeTo(SimulatedFinancialData.ISimulatedFinancialDataSpringStreamOutput object, OutputStream out) throws IOException { SSimulatedFinancialDataSpringStreamOutput tmp = SSimulatedFinancialDataSpringStreamOutput.newBuilder() .setSymbolTuple(object.getSymbolTuple()) .build(); tmp.writeDelimitedTo(out); } @Override public void serializeTo(SimulatedFinancialData.ISimulatedFinancialDataSpringStreamOutput object, IDataOutput out) throws IOException { out.writeString(object.getSymbolTuple()); } @Override public SimulatedFinancialData.ISimulatedFinancialDataSpringStreamOutput deserializeFrom(InputStream in) throws IOException { SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput result = new SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput(); SSimulatedFinancialDataSpringStreamOutput tmp = SSimulatedFinancialDataSpringStreamOutput.parseDelimitedFrom(in); result.setSymbolTuple(tmp.getSymbolTuple()); return result; } @Override public SimulatedFinancialData.ISimulatedFinancialDataSpringStreamOutput deserializeFrom(IDataInput in) throws IOException { SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput result = new SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput(); result.setSymbolTuple(in.nextString()); return result; } @Override public void write(Kryo kryo, Output output, SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput object) { output.writeString(object.getSymbolTuple()); } @Override public SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput read(Kryo kryo, Input input, Class<SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput> type) { SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput result = new SimulatedFinancialData.SimulatedFinancialDataSpringStreamOutput(); result.setSymbolTuple(input.readString()); return result; } } }
SSEHUB/EASyProducer
Plugins/EASy-Producer/ScenariosTest/testdata/real/QualiMaster/sep17/expected/if-gen/eu/qualimaster/data/imp/SimulatedFinancialDataSerializers.java
Java
apache-2.0
6,087
package Google::Ads::AdWords::v201409::BidLandscape::LandscapePoint; use strict; use warnings; __PACKAGE__->_set_element_form_qualified(1); sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' }; our $XML_ATTRIBUTE_CLASS; undef $XML_ATTRIBUTE_CLASS; sub __get_attr_class { return $XML_ATTRIBUTE_CLASS; } use Class::Std::Fast::Storable constructor => 'none'; use base qw(Google::Ads::SOAP::Typelib::ComplexType); { # BLOCK to scope variables my %bid_of :ATTR(:get<bid>); my %clicks_of :ATTR(:get<clicks>); my %cost_of :ATTR(:get<cost>); my %marginalCpc_of :ATTR(:get<marginalCpc>); my %impressions_of :ATTR(:get<impressions>); my %promotedImpressions_of :ATTR(:get<promotedImpressions>); __PACKAGE__->_factory( [ qw( bid clicks cost marginalCpc impressions promotedImpressions ) ], { 'bid' => \%bid_of, 'clicks' => \%clicks_of, 'cost' => \%cost_of, 'marginalCpc' => \%marginalCpc_of, 'impressions' => \%impressions_of, 'promotedImpressions' => \%promotedImpressions_of, }, { 'bid' => 'Google::Ads::AdWords::v201409::Money', 'clicks' => 'SOAP::WSDL::XSD::Typelib::Builtin::long', 'cost' => 'Google::Ads::AdWords::v201409::Money', 'marginalCpc' => 'Google::Ads::AdWords::v201409::Money', 'impressions' => 'SOAP::WSDL::XSD::Typelib::Builtin::long', 'promotedImpressions' => 'SOAP::WSDL::XSD::Typelib::Builtin::long', }, { 'bid' => 'bid', 'clicks' => 'clicks', 'cost' => 'cost', 'marginalCpc' => 'marginalCpc', 'impressions' => 'impressions', 'promotedImpressions' => 'promotedImpressions', } ); } # end BLOCK 1; =pod =head1 NAME Google::Ads::AdWords::v201409::BidLandscape::LandscapePoint =head1 DESCRIPTION Perl data type class for the XML Schema defined complexType BidLandscape.LandscapePoint from the namespace https://adwords.google.com/api/adwords/cm/v201409. A set of estimates for a criterion's performance for a specific bid amount. =head2 PROPERTIES The following properties may be accessed using get_PROPERTY / set_PROPERTY methods: =over =item * bid =item * clicks =item * cost =item * marginalCpc =item * impressions =item * promotedImpressions =back =head1 METHODS =head2 new Constructor. The following data structure may be passed to new(): =head1 AUTHOR Generated by SOAP::WSDL =cut
gitpan/GOOGLE-ADWORDS-PERL-CLIENT
lib/Google/Ads/AdWords/v201409/BidLandscape/LandscapePoint.pm
Perl
apache-2.0
2,498
# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.db import resource_extend @resource_extend.has_resource_extenders class SubnetServiceTypeMixin(object): """Mixin class to extend subnet with service type attribute""" @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _extend_subnet_service_types(subnet_res, subnet_db): subnet_res['service_types'] = [service_type['service_type'] for service_type in subnet_db.service_types]
mahak/neutron
neutron/db/subnet_service_type_mixin.py
Python
apache-2.0
1,247
using System; using System.Collections.Generic; using System.Collections.Specialized; using System.Diagnostics; using System.Globalization; using System.IO; using System.Linq; using System.Management.Automation; using System.Runtime.Serialization.Json; using System.Threading.Tasks; using NuGet.VisualStudio; namespace NuGet.PowerShell.Commands { public abstract class JsonApiCommandBase<T> : NuGetBaseCommand where T : class { private readonly IVsPackageSourceProvider _packageSourceProvider; private readonly IPackageRepositoryFactory _repositoryFactory; protected JsonApiCommandBase() : this( ServiceLocator.GetInstance<ISolutionManager>(), ServiceLocator.GetInstance<IVsPackageManagerFactory>(), ServiceLocator.GetInstance<IHttpClientEvents>(), ServiceLocator.GetInstance<IPackageRepositoryFactory>(), ServiceLocator.GetInstance<IVsPackageSourceProvider>()) { } protected JsonApiCommandBase( ISolutionManager solutionManager, IVsPackageManagerFactory packageManagerFactory, IHttpClientEvents httpClientEvents, IPackageRepositoryFactory repositoryFactory, IVsPackageSourceProvider packageSourceProvider) : base(solutionManager, packageManagerFactory, httpClientEvents) { _repositoryFactory = repositoryFactory; _packageSourceProvider = packageSourceProvider; } public abstract string ApiEndpointPath { get; } [Parameter] [ValidateNotNullOrEmpty] public string Source { get; set; } [Parameter] [Alias("Prerelease")] public SwitchParameter IncludePrerelease { get; set; } protected abstract Dictionary<string, string> BuildApiEndpointQueryParameters(); [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate", Justification = "Too much logic for getter.")] protected virtual IPackageRepository GetPackageRepository() { if (!String.IsNullOrEmpty(Source)) { return CreateRepositoryFromSource(_repositoryFactory, _packageSourceProvider, Source); } if (_packageSourceProvider.ActivePackageSource != null) { return _repositoryFactory.CreateRepository(_packageSourceProvider.ActivePackageSource.Source); } throw new InvalidOperationException(Resources.Cmdlet_NoActivePackageSource); } protected abstract IEnumerable<T> GetResultsFromPackageRepository(IPackageRepository packageRepository); protected virtual IEnumerable<T> GetResults(Uri apiEndpointUri) { var jsonSerializer = new DataContractJsonSerializer(typeof(T[])); var httpClient = new HttpClient(apiEndpointUri); using (var stream = new MemoryStream(httpClient.DownloadData())) { return jsonSerializer.ReadObject(stream) as T[]; } } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Usage", "CA2234:PassSystemUriObjectsInsteadOfStrings", Justification="Wrapping the string in a new URI doesn't improve anything.")] protected override void ProcessRecordCore() { var packageRepository = GetPackageRepository(); var aggregatePackageRepository = packageRepository as AggregateRepository; if (aggregatePackageRepository != null) { WriteResults(AggregateResults(aggregatePackageRepository)); } else { WriteResults(GetResults(packageRepository)); } } private IEnumerable<T> AggregateResults(AggregateRepository aggregatePackageRepository) { var tasks = aggregatePackageRepository.Repositories .Select(r => Task.Factory.StartNew(() => GetResults(r))) .ToArray(); Task.WaitAll(tasks); return tasks .SelectMany(t => t.Result) .Distinct() .Take(30); } private static string BuildQueryString(Dictionary<string, string> queryParameters) { if (queryParameters.Count == 0) { return string.Empty; } return String.Join("&", queryParameters.Select(param => string.Format(CultureInfo.InvariantCulture, "{0}={1}", param.Key, Uri.EscapeDataString(param.Value)))); } private IEnumerable<T> GetResults(IPackageRepository packageRepository) { Debug.Assert(!(packageRepository is AggregateRepository), "This should never be called for an aggregate package repository."); if (!UriHelper.IsHttpSource(packageRepository.Source)) { return GetResultsFromPackageRepository(packageRepository); } else { var queryParameters = BuildApiEndpointQueryParameters() ?? new Dictionary<string, string>(); if (IncludePrerelease) { queryParameters.Add("includePrerelease", "true"); } var uriBuilder = new UriBuilder(packageRepository.Source) { Path = ApiEndpointPath, Query = BuildQueryString(queryParameters) }; return GetResults(uriBuilder.Uri); } } private void WriteResults(IEnumerable<T> results) { foreach (var result in results) { WriteObject(result); } } } }
atheken/nuget
src/VsConsole/PowerShellCmdlets/JsonApiCommandBase.cs
C#
apache-2.0
5,780
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <title>org.codehaus.groovy.runtime.powerassert (groovy 2.4.4 API)</title> <meta name="keywords" content="org.codehaus.groovy.runtime.powerassert package"> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <link rel ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" title="Style"> <link href="../../../../../groovy.ico" type="image/x-icon" rel="shortcut icon"> <link href="../../../../../groovy.ico" type="image/x-icon" rel="icon"> <script type="text/javascript"> function windowTitle() { parent.document.title="org.codehaus.groovy.runtime.powerassert (groovy 2.4.4 API)"; } </script> <noscript> </noscript> </head> <body class="center" onload="windowTitle();"> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <a name="navbar_top_firstrow"><!-- --></a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li class="navBarCell1Rev">Package</li> <li>Class</li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../index.html?org/codehaus/groovy/runtime/powerassert/package-summary.html" target="_top">Frames</a></li> <li><a href="package-summary.html" target="_top">No Frames</a></li> </ul> </div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h1 class="title">Package org.codehaus.groovy.runtime.powerassert</h1> </div> <div class="contentContainer"> <div class="summary"> <ul class="blockList"> <li class="blockList"> <table border="0" cellpadding="3" cellspacing="0" summary="Class Summary"> <caption><span>Class Summary</span><span class="tabEnd">&nbsp;</span></caption> <tbody> <tr> <th class="colFirst" scope="col">Class</th> <th class="colLast" scope="col">Description</th> </tr> <tr class="altColor"> <td class="colOne"> <strong><a href="AssertionRenderer.html" title="class in org/codehaus/groovy/runtime/powerassert"> AssertionRenderer </a></strong> </td> <td>Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. </td> </tr> <tr class="altColor"> <td class="colOne"> <strong><a href="SourceText.html" title="class in org/codehaus/groovy/runtime/powerassert"> SourceText </a></strong> </td> <td>Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. </td> </tr> <tr class="altColor"> <td class="colOne"> <strong><a href="Value.html" title="class in org/codehaus/groovy/runtime/powerassert"> Value </a></strong> </td> <td>Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. </td> </tr> <tr class="altColor"> <td class="colOne"> <strong><a href="ValueRecorder.html" title="class in org/codehaus/groovy/runtime/powerassert"> ValueRecorder </a></strong> </td> <td>Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. </td> </tr> </tbody> </table> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <table border="0" cellpadding="3" cellspacing="0" summary="Exception Summary"> <caption><span>Exception Summary</span><span class="tabEnd">&nbsp;</span></caption> <tbody> <tr> <th class="colFirst" scope="col">Exception</th> <th class="colLast" scope="col">Description</th> </tr> <tr class="altColor"> <td class="colOne"> <strong><a href="SourceTextNotAvailableException.html" title="class in org/codehaus/groovy/runtime/powerassert"> SourceTextNotAvailableException </a></strong> </td> <td>Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. </td> </tr> </tbody> </table> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <table border="0" cellpadding="3" cellspacing="0" summary="Error Summary"> <caption><span>Error Summary</span><span class="tabEnd">&nbsp;</span></caption> <tbody> <tr> <th class="colFirst" scope="col">Error</th> <th class="colLast" scope="col">Description</th> </tr> <tr class="altColor"> <td class="colOne"> <strong><a href="PowerAssertionError.html" title="class in org/codehaus/groovy/runtime/powerassert"> PowerAssertionError </a></strong> </td> <td>Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. </td> </tr> </tbody> </table> </li> </ul> </div> </div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="overview-summary.html">Overview</a></li> <li class="navBarCell1Rev">Package</li> <li>Class</li> <li><a href="deprecated-list.html">Deprecated</a></li> <li><a href="index-all.html">Index</a></li> <li><a href="help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <div> <ul class="navList"> <li><a href="index.html?deprecated-list.html" target="_top">Frames</a></li> <li><a href="deprecated-list.html" target="_top">No Frames</a></li> </ul> </div> <div class="aboutLanguage"><em>Copyright &copy; 2003-2015 The Apache Software Foundation. All rights reserved.</em></div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
OpenBEL/kam-nav
tools/groovy/doc/html/gapi/org/codehaus/groovy/runtime/powerassert/package-summary.html
HTML
apache-2.0
8,765
# # Copyright 2017 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package network::dlink::standard::snmp::mode::components::temperature; use strict; use warnings; # In MIB 'env_mib.mib' my $mapping = { swTemperatureCurrent => { oid => '.1.3.6.1.4.1.171.12.11.1.8.1.2' }, }; my $oid_swTemperatureEntry = '.1.3.6.1.4.1.171.12.11.1.8.1'; sub load { my ($self) = @_; push @{$self->{request}}, { oid => $oid_swTemperatureEntry }; } sub check { my ($self) = @_; $self->{output}->output_add(long_msg => "Checking temperatures"); $self->{components}->{temperature} = {name => 'temperatures', total => 0, skip => 0}; return if ($self->check_filter(section => 'temperature')); foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_swTemperatureEntry}})) { next if ($oid !~ /^$mapping->{swTemperatureCurrent}->{oid}\.(.*)$/); my $instance = $1; my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_swTemperatureEntry}, instance => $instance); next if ($self->check_filter(section => 'temperature', instance => $instance)); $self->{components}->{temperature}->{total}++; $self->{output}->output_add(long_msg => sprintf("Temperature '%s' is %dC.", $instance, $result->{swTemperatureCurrent})); my ($exit, $warn, $crit, $checked) = $self->get_severity_numeric(section => 'temperature', instance => $instance, value => $result->{swTemperatureCurrent}); if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) { $self->{output}->output_add(severity => $exit, short_msg => sprintf("Temperature '%s' is %s degree centigrade", $instance, $result->{swTemperatureCurrent})); } $self->{output}->perfdata_add(label => "temp_" . $instance, unit => 'C', value => $result->{swTemperatureCurrent}, warning => $warn, critical => $crit); } } 1;
nichols-356/centreon-plugins
network/dlink/standard/snmp/mode/components/temperature.pm
Perl
apache-2.0
2,858
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using BattleChess.Interfaces; using BattleChess.Utilities; using Microsoft.VisualStudio.TestTools.UnitTesting; namespace BattleChessUnitTests.PositionTestCases { public abstract class AbstractPositionFactoryTestCase { [ClassInitialize] public static void BaseInit(TestContext context) { } private const string InvalidColumnErrorMessage = ErrorMessages.InvalidColumn; private const string InvalidRowErrorMessage = ErrorMessages.InvalidRow; private IPositionFactory factory; private IPosition result; private char validColumn; private char invalidColumn; private int validRow; private int invalidRow; protected abstract IPositionFactory GetFactory(); [TestInitialize] public void SetUp() { this.factory = this.GetFactory(); this.validColumn = 'A'; this.invalidColumn = 'Z'; this.validRow = 1; this.invalidRow = -1; } [TestMethod] public void TestCreatePositionWithValidParametersDoesNotThrowException() { try { result = this.factory.Create(this.validColumn, this.validRow); } catch (Exception e) { this.FailWithException(e); } } [TestMethod] public void TestCreatePositionWithValidParametersDoesNotReturnNull() { result = this.factory.Create(this.validColumn, this.validRow); Assert.IsNotNull(result); } [TestMethod] [ExpectedException(typeof(ArgumentOutOfRangeException), InvalidColumnErrorMessage)] public void TestCreatePositionWithInvalidColumnAndValidRowThrowsColumnException() { result = this.factory.Create(this.invalidColumn, this.validRow); } [TestMethod] [ExpectedException(typeof (ArgumentOutOfRangeException), InvalidRowErrorMessage)] public void TestCreatePositionWithInvalidRowAndValidColumnThrowsRowException() { result = this.factory.Create(this.validColumn, this.invalidRow); } [TestMethod] [ExpectedException(typeof (ArgumentOutOfRangeException), InvalidColumnErrorMessage)] public void TestCreatePositionWithInvalidColumnAndInvalidRowThrowsColumnException() { result = this.factory.Create(this.invalidColumn, this.invalidRow); } private void FailWithException(Exception e) { string errorMessage = Utils.BuildErrorMessage(e); Assert.Fail(errorMessage); } } }
BadassGamingCrew/Chess-Might-and-Magic
BattleChessUnitTests/PositionTestCases/AbstractPositionFactoryTestCase.cs
C#
apache-2.0
2,808
/* * Hello world example of a TLS client: fetch an HTTPS page * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of mbed TLS (https://tls.mbed.org) */ #if !defined(TARGET_LIKE_MBED) #include <stdio.h> int main() { printf("this program only works on mbed OS\n"); return 0; } #else /** \file main.cpp * \brief An example TLS Client application * This application sends an HTTPS request to developer.mbed.org and searches for a string in * the result. * * This example is implemented as a logic class (HelloHTTPS) wrapping a TCP socket. * The logic class handles all events, leaving the main loop to just check if the process * has finished. */ /* Change to a number between 1 and 4 to debug the TLS connection */ #define DEBUG_LEVEL 0 /* Change to 1 to skip certificate verification (UNSAFE, for debug only!) */ #define UNSAFE 0 #include "mbed.h" #include "EthernetInterface.h" #include "mbed-net-sockets/TCPStream.h" #include "test_env.h" #include "minar/minar.h" #include "lwipv4_init.h" #include "mbedtls/ssl.h" #include "mbedtls/entropy.h" #include "mbedtls/ctr_drbg.h" #include "mbedtls/error.h" #if DEBUG_LEVEL > 0 #include "mbedtls/debug.h" #endif namespace { const char *HTTPS_SERVER_NAME = "developer.mbed.org"; const int HTTPS_SERVER_PORT = 443; const int RECV_BUFFER_SIZE = 600; const char HTTPS_PATH[] = "/media/uploads/mbed_official/hello.txt"; const size_t HTTPS_PATH_LEN = sizeof(HTTPS_PATH) - 1; /* Test related data */ const char *HTTPS_OK_STR = "200 OK"; const char *HTTPS_HELLO_STR = "Hello world!"; /* personalization string for the drbg */ const char *DRBG_PERS = "mbed TLS helloword client"; /* List of trusted root CA certificates * currently only GlobalSign, the CA for developer.mbed.org * * To add more than one root, just concatenate them. */ const char SSL_CA_PEM[] = /* GlobalSign Root R1 SHA1/RSA/2048 * Serial no. 04 00 00 00 00 01 15 4b 5a c3 94 */ "-----BEGIN CERTIFICATE-----\n" "MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG\n" "A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv\n" "b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw\n" "MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i\n" "YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT\n" "aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ\n" "jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp\n" "xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp\n" "1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG\n" "snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ\n" "U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8\n" "9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E\n" "BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B\n" "AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz\n" "yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE\n" "38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP\n" "AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad\n" "DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME\n" "HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==\n" "-----END CERTIFICATE-----\n"; } using namespace mbed::Sockets::v0; /** * \brief HelloHTTPS implements the logic for fetching a file from a webserver * using a TCP socket and parsing the result. */ class HelloHTTPS { public: /** * HelloHTTPS Constructor * Initializes the TCP socket, sets up event handlers and flags. * * @param[in] domain The domain name to fetch from * @param[in] port The port of the HTTPS server */ HelloHTTPS(const char * domain, const uint16_t port) : _stream(SOCKET_STACK_LWIP_IPV4), _domain(domain), _port(port) { _error = false; _gothello = false; _got200 = false; _bpos = 0; _request_sent = 0; _stream.open(SOCKET_AF_INET4); mbedtls_entropy_init(&_entropy); mbedtls_ctr_drbg_init(&_ctr_drbg); mbedtls_x509_crt_init(&_cacert); mbedtls_ssl_init(&_ssl); mbedtls_ssl_config_init(&_ssl_conf); } /** * HelloHTTPS Desctructor */ ~HelloHTTPS() { mbedtls_entropy_free(&_entropy); mbedtls_ctr_drbg_free(&_ctr_drbg); mbedtls_x509_crt_free(&_cacert); mbedtls_ssl_free(&_ssl); mbedtls_ssl_config_free(&_ssl_conf); } /** * Initiate the test. * * Starts by clearing test flags, then resolves the address with DNS. * * @param[in] path The path of the file to fetch from the HTTPS server * @return SOCKET_ERROR_NONE on success, or an error code on failure */ void startTest(const char *path) { /* Initialize the flags */ _got200 = false; _gothello = false; _error = false; _disconnected = false; _request_sent = false; /* Fill the request buffer */ _bpos = snprintf(_buffer, sizeof(_buffer) - 1, "GET %s HTTP/1.1\nHost: %s\n\n", path, HTTPS_SERVER_NAME); /* * Initialize TLS-related stuf. */ int ret; if ((ret = mbedtls_ctr_drbg_seed(&_ctr_drbg, mbedtls_entropy_func, &_entropy, (const unsigned char *) DRBG_PERS, sizeof (DRBG_PERS))) != 0) { print_mbedtls_error("mbedtls_crt_drbg_init", ret); _error = true; return; } if ((ret = mbedtls_x509_crt_parse(&_cacert, (const unsigned char *) SSL_CA_PEM, sizeof (SSL_CA_PEM))) != 0) { print_mbedtls_error("mbedtls_x509_crt_parse", ret); _error = true; return; } if ((ret = mbedtls_ssl_config_defaults(&_ssl_conf, MBEDTLS_SSL_IS_CLIENT, MBEDTLS_SSL_TRANSPORT_STREAM, MBEDTLS_SSL_PRESET_DEFAULT)) != 0) { print_mbedtls_error("mbedtls_ssl_config_defaults", ret); _error = true; return; } mbedtls_ssl_conf_ca_chain(&_ssl_conf, &_cacert, NULL); mbedtls_ssl_conf_rng(&_ssl_conf, mbedtls_ctr_drbg_random, &_ctr_drbg); #if UNSAFE mbedtls_ssl_conf_authmode(&_ssl_conf, MBEDTLS_SSL_VERIFY_OPTIONAL); #endif #if DEBUG_LEVEL > 0 mbedtls_ssl_conf_verify(&_ssl_conf, my_verify, NULL); mbedtls_ssl_conf_dbg(&_ssl_conf, my_debug, NULL); mbedtls_debug_set_threshold(DEBUG_LEVEL); #endif if ((ret = mbedtls_ssl_setup(&_ssl, &_ssl_conf)) != 0) { print_mbedtls_error("mbedtls_ssl_setup", ret); _error = true; return; } mbedtls_ssl_set_hostname(&_ssl, HTTPS_SERVER_NAME); mbedtls_ssl_set_bio(&_ssl, static_cast<void *>(&_stream), ssl_send, ssl_recv, NULL ); /* Connect to the server */ printf("Starting DNS lookup for %s\r\n", _domain); /* Resolve the domain name: */ socket_error_t err = _stream.resolve(_domain, TCPStream::DNSHandler_t(this, &HelloHTTPS::onDNS)); _stream.error_check(err); } /** * Check if the test has completed. * @return Returns true if done, false otherwise. */ bool done() { return _error || (_got200 && _gothello); } /** * Check if there was an error * @return Returns true if there was an error, false otherwise. */ bool error() { return _error; } /** * Closes the TCP socket */ void close() { _stream.close(); while (!_disconnected) __WFI(); } protected: /** * Helper for pretty-printing mbed TLS error codes */ static void print_mbedtls_error(const char *name, int err) { char buf[128]; mbedtls_strerror(err, buf, sizeof (buf)); printf("%s() failed: -0x%04x (%d): %s\r\n", name, -err, err, buf); } #if DEBUG_LEVEL > 0 /** * Debug callback for mbed TLS * Just prints on the USB serial port */ static void my_debug(void *ctx, int level, const char *file, int line, const char *str) { const char *p, *basename; (void) ctx; /* Extract basename from file */ for(p = basename = file; *p != '\0'; p++) { if(*p == '/' || *p == '\\') { basename = p + 1; } } printf("%s:%04d: |%d| %s", basename, line, level, str); } /** * Certificate verification callback for mbed TLS * Here we only use it to display information on each cert in the chain */ static int my_verify(void *data, mbedtls_x509_crt *crt, int depth, uint32_t *flags) { char buf[1024]; (void) data; printf("\nVerifying certificate at depth %d:\n", depth); mbedtls_x509_crt_info(buf, sizeof (buf) - 1, " ", crt); printf("%s", buf); if (*flags == 0) printf("No verification issue for this certificate\n"); else { mbedtls_x509_crt_verify_info(buf, sizeof (buf), " ! ", *flags); printf("%s\n", buf); } return 0; } #endif /** * Receive callback for mbed TLS */ static int ssl_recv(void *ctx, unsigned char *buf, size_t len) { TCPStream *stream = static_cast<TCPStream *>(ctx); socket_error_t err = stream->recv(buf, &len); if (err == SOCKET_ERROR_NONE) { return static_cast<int>(len); } else if (err == SOCKET_ERROR_WOULD_BLOCK) { return MBEDTLS_ERR_SSL_WANT_READ; } else { return -1; } } /** * Send callback for mbed TLS */ static int ssl_send(void *ctx, const unsigned char *buf, size_t len) { TCPStream *stream = static_cast<TCPStream *>(ctx); socket_error_t err = stream->send(buf, len); if (err == SOCKET_ERROR_NONE) { return static_cast<int>(len); } else if (err == SOCKET_ERROR_WOULD_BLOCK) { return MBEDTLS_ERR_SSL_WANT_WRITE; } else { return -1; } } void onError(Socket *s, socket_error_t err) { (void) s; printf("MBED: Socket Error: %s (%d)\r\n", socket_strerror(err), err); _stream.close(); _error = true; MBED_HOSTTEST_RESULT(false); } /** * On Connect handler * Starts the TLS handshake */ void onConnect(TCPStream *s) { char buf[16]; _remoteAddr.fmtIPv4(buf,sizeof(buf)); printf("Connected to %s:%d\r\n", buf, _port); s->setOnReadable(TCPStream::ReadableHandler_t(this, &HelloHTTPS::onReceive)); s->setOnDisconnect(TCPStream::DisconnectHandler_t(this, &HelloHTTPS::onDisconnect)); /* Start the handshake, the rest will be done in onReceive() */ printf("Starting the TLS handshake...\r\n"); int ret = mbedtls_ssl_handshake(&_ssl); if (ret < 0) { if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) { print_mbedtls_error("mbedtls_ssl_handshake", ret); onError(s, SOCKET_ERROR_UNKNOWN); } return; } } /** * On Receive handler * Parses the response from the server, to check for the HTTPS 200 status code and the expected response ("Hello World!") */ void onReceive(Socket *s) { /* Send request if not done yet */ if (!_request_sent) { int ret = mbedtls_ssl_write(&_ssl, (const unsigned char *) _buffer, _bpos); if (ret < 0) { if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) { print_mbedtls_error("mbedtls_ssl_write", ret); onError(s, SOCKET_ERROR_UNKNOWN); } return; } /* If we get here, the request was sent */ _request_sent = 1; /* It also means the handshake is done, time to print info */ printf("TLS connection to %s established\r\n", HTTPS_SERVER_NAME); { char buf[1024]; mbedtls_x509_crt_info(buf, sizeof(buf), "\r ", mbedtls_ssl_get_peer_cert(&_ssl)); printf("Server certificate:\r\n%s\r", buf); #if defined(UNSAFE) uint32_t flags = mbedtls_ssl_get_verify_result(&_ssl); if( flags != 0 ) { mbedtls_x509_crt_verify_info(buf, sizeof (buf), "\r ! ", flags); printf("Certificate verification failed:\r\n%s\r\r\n", buf); } else #endif printf("Certificate verification passed\r\n\r\n"); } } /* Read data out of the socket */ int ret = mbedtls_ssl_read(&_ssl, (unsigned char *) _buffer, sizeof(_buffer)); if (ret < 0) { if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) { print_mbedtls_error("mbedtls_ssl_read", ret); onError(s, SOCKET_ERROR_UNKNOWN); } return; } _bpos = static_cast<size_t>(ret); _buffer[_bpos] = 0; /* Check each of the flags */ _got200 = _got200 || strstr(_buffer, HTTPS_OK_STR) != NULL; _gothello = _gothello || strstr(_buffer, HTTPS_HELLO_STR) != NULL; /* Print status messages */ printf("HTTPS: Received %d chars from server\r\n", _bpos); printf("HTTPS: Received 200 OK status ... %s\r\n", _got200 ? "[OK]" : "[FAIL]"); printf("HTTPS: Received '%s' status ... %s\r\n", HTTPS_HELLO_STR, _gothello ? "[OK]" : "[FAIL]"); printf("HTTPS: Received message:\r\n\r\n"); printf("%s", _buffer); _error = !(_got200 && _gothello); s->close(); } /** * On DNS Handler * Reads the address returned by DNS, then starts the connect process. */ void onDNS(Socket *s, struct socket_addr addr, const char *domain) { /* Check that the result is a valid DNS response */ if (socket_addr_is_any(&addr)) { /* Could not find DNS entry */ printf("Could not find DNS entry for %s", HTTPS_SERVER_NAME); onError(s, SOCKET_ERROR_DNS_FAILED); } else { /* Start connecting to the remote host */ char buf[16]; _remoteAddr.setAddr(&addr); _remoteAddr.fmtIPv4(buf,sizeof(buf)); printf("DNS Response Received:\r\n%s: %s\r\n", domain, buf); printf("Connecting to %s:%d\r\n", buf, _port); socket_error_t err = _stream.connect(_remoteAddr, _port, TCPStream::ConnectHandler_t(this, &HelloHTTPS::onConnect)); if (err != SOCKET_ERROR_NONE) { onError(s, err); } } } void onDisconnect(TCPStream *s) { s->close(); MBED_HOSTTEST_RESULT(!error()); } protected: TCPStream _stream; /**< The TCP Socket */ const char *_domain; /**< The domain name of the HTTPS server */ const uint16_t _port; /**< The HTTPS server port */ char _buffer[RECV_BUFFER_SIZE]; /**< The response buffer */ size_t _bpos; /**< The current offset in the response buffer */ SocketAddr _remoteAddr; /**< The remote address */ volatile bool _got200; /**< Status flag for HTTPS 200 */ volatile bool _gothello; /**< Status flag for finding the test string */ volatile bool _error; /**< Status flag for an error */ volatile bool _disconnected; volatile bool _request_sent; mbedtls_entropy_context _entropy; mbedtls_ctr_drbg_context _ctr_drbg; mbedtls_x509_crt _cacert; mbedtls_ssl_context _ssl; mbedtls_ssl_config _ssl_conf; }; /** * The main loop of the HTTPS Hello World test */ EthernetInterface eth; HelloHTTPS *hello; void app_start(int, char*[]) { /* The default 9600 bps is too slow to print full TLS debug info and could * cause the other party to time out. Select a higher baud rate for * printf(), regardless of debug level for the sake of uniformity. */ Serial pc(USBTX, USBRX); pc.baud(115200); MBED_HOSTTEST_TIMEOUT(120); MBED_HOSTTEST_SELECT(default); MBED_HOSTTEST_DESCRIPTION(mbed TLS example HTTPS client); MBED_HOSTTEST_START("MBEDTLS_EX_HTTPS_CLIENT"); /* Initialise with DHCP, connect, and start up the stack */ eth.init(); eth.connect(); lwipv4_socket_init(); hello = new HelloHTTPS(HTTPS_SERVER_NAME, HTTPS_SERVER_PORT); printf("Client IP Address is %s\r\n", eth.getIPAddress()); mbed::util::FunctionPointer1<void, const char*> fp(hello, &HelloHTTPS::startTest); minar::Scheduler::postCallback(fp.bind(HTTPS_PATH)); } #endif /* TARGET_LIKE_MBED */
zcy421593/aliyun-openapi-cpp-sdk
aliyun_api_core/third_party/mbedtls-2.1.2/yotta/data/example-tls-client/main.cpp
C++
apache-2.0
17,752
<h1>Ti.StyledLabel Module</h1> <h2>Description</h2> <p>Gives you the power of HTML and CSS without the full weight of a WebView.</p> <p>Lightly wraps around a UIWebView to make it easy and quick for you to display HTML. This is intended for displaying small snippets of HTML, like Tweets, to your users. It works best if you limit the number of Styled Label's you create.</p> <h2>Solutions to Common Scenarios</h2> <h3>Table of Tweets</h3> <p>Problem: You need to display a lot of tweets (or other HTML) in a table in your app.</p> <p>BAD Solution: Create one Styled Label per tweet. So if you have 200 tweets, you end up with 200 Styled Labels.</p> <p>GOOD Solution: Create ONE Styled Label. Render all of the tweets together at once.</p> <h3>Other Scenarios</h3> <p>Are you facing a performance or functional issue with the Styled Label? Let us know and we will come up with a solution together!</p> <p>Send an email to <a href="mailto:[email protected]?subject=iOS%20StyledLabel%20Module">[email protected]</a> RE: StyledLabel.</p> <h2>Getting Started</h2> <p>View the <a href="http://docs.appcelerator.com/titanium/2.0/#!/guide/Using_Titanium_Modules">Using Titanium Modules</a> document for instructions on getting started with using this module in your application.</p> <h2>Accessing the Ti.StyledLabel Module</h2> <p>To access this module from JavaScript, you would do the following:</p> <pre><code>var StyledLabel = require('ti.styledlabel'); </code></pre> <h2>Functions</h2> <h3><a href="label.html">Ti.StyledLabel.Label</a> Ti.StyledLabel.createLabel({...})</h3> <p>Creates a <a href="label.html">Ti.StyledLabel.Label</a>.</p> <h2>Usage</h2> <p>See example.</p> <h2>Author</h2> <p>Dawson Toth</p> <h2>Module History</h2> <p>View the <a href="changelog.html">change log</a> for this module.</p> <h2>Feedback and Support</h2> <p>Please direct all questions, feedback, and concerns to <a href="mailto:[email protected]?subject=iOS%20StyledLabel%20Module">[email protected]</a>.</p> <h2>License</h2> <p>Copyright(c) 2010-2011 by Appcelerator, Inc. All Rights Reserved. Please see the LICENSE file included in the distribution for further details.</p>
karthi-anubavam/styledLabel-app
modules/iphone/ti.styledlabel/1.5.2/documentation/index.html
HTML
apache-2.0
2,202
package org.apache.pdfbox.pdmodel.documentinterchange.taggedpdf; import org.apache.pdfbox.cos.COSDictionary; /** * A Table attribute object. * * @author Johannes Koch */ public class PDTableAttributeObject extends PDStandardAttributeObject { /** * standard attribute owner: Table */ public static final String OWNER_TABLE = "Table"; protected static final String ROW_SPAN = "RowSpan"; protected static final String COL_SPAN = "ColSpan"; protected static final String HEADERS = "Headers"; protected static final String SCOPE = "Scope"; protected static final String SUMMARY = "Summary"; /** * Scope: Both */ public static final String SCOPE_BOTH = "Both"; /** * Scope: Column */ public static final String SCOPE_COLUMN = "Column"; /** * Scope: Row */ public static final String SCOPE_ROW = "Row"; /** * Default constructor. */ public PDTableAttributeObject() { this.setOwner(OWNER_TABLE); } /** * Creates a new Table attribute object with a given dictionary. * * @param dictionary the dictionary */ public PDTableAttributeObject(COSDictionary dictionary) { super(dictionary); } /** * Gets the number of rows in the enclosing table that shall be spanned by * the cell (RowSpan). The default value is 1. * * @return the row span */ public int getRowSpan() { return this.getInteger(ROW_SPAN, 1); } /** * Sets the number of rows in the enclosing table that shall be spanned by * the cell (RowSpan). * * @param rowSpan the row span */ public void setRowSpan(int rowSpan) { this.setInteger(ROW_SPAN, rowSpan); } /** * Gets the number of columns in the enclosing table that shall be spanned * by the cell (ColSpan). The default value is 1. * * @return the column span */ public int getColSpan() { return this.getInteger(COL_SPAN, 1); } /** * Sets the number of columns in the enclosing table that shall be spanned * by the cell (ColSpan). * * @param colSpan the column span */ public void setColSpan(int colSpan) { this.setInteger(COL_SPAN, colSpan); } /** * Gets the headers (Headers). An array of byte strings, where each string * shall be the element identifier (see the * {@link org.apache.pdfbox.pdmodel.documentinterchange.logicalstructure.PDStructureElement#getElementIdentifier()}) for a TH structure * element that shall be used as a header associated with this cell. * * @return the headers. */ public String[] getHeaders() { return this.getArrayOfString(HEADERS); } /** * Sets the headers (Headers). An array of byte strings, where each string * shall be the element identifier (see the * {@link org.apache.pdfbox.pdmodel.documentinterchange.logicalstructure.PDStructureElement#getElementIdentifier()}) for a TH structure * element that shall be used as a header associated with this cell. * * @param headers the headers */ public void setHeaders(String[] headers) { this.setArrayOfString(HEADERS, headers); } /** * Gets the scope (Scope). It shall reflect whether the header cell applies * to the rest of the cells in the row that contains it, the column that * contains it, or both the row and the column that contain it. * * @return the scope */ public String getScope() { return this.getName(SCOPE); } /** * Sets the scope (Scope). It shall reflect whether the header cell applies * to the rest of the cells in the row that contains it, the column that * contains it, or both the row and the column that contain it. The value * shall be one of the following: * <ul> * <li>{@link #SCOPE_ROW},</li> * <li>{@link #SCOPE_COLUMN}, or</li> * <li>{@link #SCOPE_BOTH}.</li> * </ul> * * @param scope the scope */ public void setScope(String scope) { this.setName(SCOPE, scope); } /** * Gets the summary of the table’s purpose and structure. * * @return the summary */ public String getSummary() { return this.getString(SUMMARY); } /** * Sets the summary of the table’s purpose and structure. * * @param summary the summary */ public void setSummary(String summary) { this.setString(SUMMARY, summary); } @Override public String toString() { StringBuilder sb = new StringBuilder().append(super.toString()); if (this.isSpecified(ROW_SPAN)) { sb.append(", RowSpan=").append(String.valueOf(this.getRowSpan())); } if (this.isSpecified(COL_SPAN)) { sb.append(", ColSpan=").append(String.valueOf(this.getColSpan())); } if (this.isSpecified(HEADERS)) { sb.append(", Headers=").append(arrayToString(this.getHeaders())); } if (this.isSpecified(SCOPE)) { sb.append(", Scope=").append(this.getScope()); } if (this.isSpecified(SUMMARY)) { sb.append(", Summary=").append(this.getSummary()); } return sb.toString(); } }
kzganesan/PdfBox-Android
library/src/main/java/org/apache/pdfbox/pdmodel/documentinterchange/taggedpdf/PDTableAttributeObject.java
Java
apache-2.0
5,686
#ifndef CPPVECTORBEAN #define CPPVECTORBEAN #include <stdio.h> #include <stdlib.h> #include <string.h> #include <jni.h> #include <stdint.h> #include <iostream> #include <mutex> #include <memory> class CPPVectorBean { private: JNIEnv* env; jobject jniJavaClassRef; int jniCreated = 0; int* data; jintArray dataArr; int data_length; static std::mutex mtx; public: int* getdata(); int getdata_length(); jobject getJavaObject(); CPPVectorBean(jclass replaceMeClassName, jobject replaceMeObjectName, JNIEnv* env); CPPVectorBean(int* dataarg, int data_lengtharg, jclass jClass, JNIEnv* jniEnv); CPPVectorBean(); ~CPPVectorBean(); }; #endif
tudorv91/SparkJNI
sparkjni-examples/src/main/cpp/examples/vectorOps/CPPVectorBean.h
C
apache-2.0
653
/*! * ${copyright} */ sap.ui.define(["sap/ui/fl/Utils", "jquery.sap.global"], function(FlexUtils, jQuery) { "use strict"; /** * Change handler for combining sap.m.Button(s) in a sap.m.MenuButton inside sap.m.Bar * * @alias sap.m.changeHandler.CombineButtons * @author SAP SE * @version ${version} * @experimental Since 1.48 */ var CombineButtons = { }; /** * Combines sap.m.Button(s) in a sap.m.MenuButton * * @param {sap.ui.fl.Change} oChange Change wrapper object with instructions to be applied on the control map * @param {sap.m.Bar} oControl Containing the buttons * @param {object} mPropertyBag Map of properties * @param {object} mPropertyBag.modifier Modifier for the controls * @return {boolean} true if change could be applied * * @public */ CombineButtons.applyChange = function(oChange, oControl, mPropertyBag) { if (mPropertyBag.modifier.targets !== "jsControlTree") { throw new Error("Combine buttons change can't be applied on XML tree"); } var oChangeDefinition = oChange.getDefinition(), oModifier = mPropertyBag.modifier, oView = FlexUtils.getViewForControl(oControl), oSourceControl = oModifier.bySelector(oChangeDefinition.content.combineButtonSelectors[0], mPropertyBag.appComponent), oAppComponent = mPropertyBag.appComponent, oParent = oModifier.getParent(oSourceControl), iAggregationIndex, sParentAggregation, aButtons, bIsRtl = sap.ui.getCore().getConfiguration().getRTL(), oMenu, oMenuButton, aMenuButtonName = [], oRevertData = { menuButtonId: "", parentAggregation: "", insertIndex: 0 }; aButtons = oChangeDefinition.content.combineButtonSelectors.map(function (oCombineButtonSelector) { return oModifier.bySelector(oCombineButtonSelector, oAppComponent); }); sParentAggregation = aButtons[0].sParentAggregationName; oRevertData.parentAggregation = sParentAggregation; iAggregationIndex = oModifier.findIndexInParentAggregation(oSourceControl); oRevertData.insertIndex = iAggregationIndex; oMenu = oModifier.createControl("sap.m.Menu", mPropertyBag.appComponent, oView); aButtons.forEach(function (oButton, index) { var sId = oView.createId(jQuery.sap.uid()), sButtonText = oModifier.getProperty(oButton, "text"); var oMenuItem = oModifier.createControl("sap.m.MenuItem", mPropertyBag.appComponent, oView, sId); oModifier.setProperty(oMenuItem, "text", oButton.mProperties.text); oModifier.setProperty(oMenuItem, "icon", oButton.mProperties.icon); oMenuItem.attachPress(function(oEvent) { return oButton.firePress(oEvent); }); if (sButtonText) { bIsRtl ? aMenuButtonName.unshift(sButtonText) : aMenuButtonName.push(sButtonText); } var oIdToSave = oModifier.createControl("sap.ui.core.CustomData", mPropertyBag.appComponent, oView, sId + "-originalButtonId"); oModifier.setProperty(oIdToSave, "key", "originalButtonId"); oModifier.setProperty(oIdToSave, "value", oModifier.getId(oButton)); oModifier.removeAggregation(oParent, sParentAggregation, oButton); // adding each button control to the menuItem's dependents aggregation // this way we can save all relevant information it my have oModifier.insertAggregation(oMenuItem, "dependents", oButton); oModifier.insertAggregation(oMenuItem, "customData", oIdToSave); oModifier.insertAggregation(oMenu, "items", oMenuItem, index); }); oMenuButton = oModifier.createControl("sap.m.MenuButton", mPropertyBag.appComponent, oView, oView.createId(jQuery.sap.uid())); oRevertData.menuButtonId = oModifier.getId(oMenuButton); oModifier.setProperty(oMenuButton, "text", aMenuButtonName.join("/")); oModifier.insertAggregation(oMenuButton, "menu", oMenu, 0); oModifier.insertAggregation(oParent, sParentAggregation, oMenuButton, iAggregationIndex); oChange.setRevertData(oRevertData); return true; }; /** * Reverts applied change * * @param {sap.ui.fl.Change} oChange change wrapper object with instructions to be applied on the control map * @param {sap.m.IBar} oControl Bar - Bar that matches the change selector for applying the change * @param {object} mPropertyBag - Property bag containing the modifier and the view * @param {object} mPropertyBag.modifier - modifier for the controls * @param {object} mPropertyBag.view - application view * @return {boolean} True if successful * @public */ CombineButtons.revertChange = function(oChange, oControl, mPropertyBag) { var oModifier = mPropertyBag.modifier, oRevertData = oChange.getRevertData(), oChangeDefinition = oChange.getDefinition(), oParent = oControl, sParentAggregation = oRevertData.parentAggregation, iAggregationIndex = oRevertData.insertIndex, oMenuButton = oModifier.bySelector(oRevertData.menuButtonId, mPropertyBag.appComponent), aButtonsIds = oChangeDefinition.content.combineButtonSelectors; for (var i = 0; i < aButtonsIds.length; i++) { var oButton = oModifier.bySelector(aButtonsIds[i], mPropertyBag.appComponent); oModifier.insertAggregation(oParent, sParentAggregation, oButton, iAggregationIndex + i); } oModifier.removeAggregation(oParent, sParentAggregation, oMenuButton); oChange.resetRevertData(); return true; }; /** * Completes the change by adding change handler specific content * * @param {sap.ui.fl.Change} oChange Change wrapper object to be completed * @param {object} oSpecificChangeInfo Specific info object * @param {object} oSpecificChangeInfo.combineFieldIds Ids of selected buttons * to be combined * @param {object} mPropertyBag Map of properties * @param {object} mPropertyBag.modifier Modifier for the controls * * @public */ CombineButtons.completeChangeContent = function(oChange, oSpecificChangeInfo, mPropertyBag) { var oModifier = mPropertyBag.modifier, oAppComponent = mPropertyBag.appComponent, oChangeDefinition = oChange.getDefinition(), aCombineButtonIds = oSpecificChangeInfo.combineFieldIds; if (aCombineButtonIds && aCombineButtonIds.length >= 2) { oChange.addDependentControl(aCombineButtonIds, "combinedButtons", mPropertyBag); oChangeDefinition.content.combineButtonSelectors = aCombineButtonIds.map(function(sCombineButtonId) { return oModifier.getSelector(sCombineButtonId, oAppComponent); }); } else { throw new Error("Combine buttons action cannot be completed: oSpecificChangeInfo.combineFieldIds attribute required"); } }; return CombineButtons; }, /* bExport= */true);
SQCLabs/openui5
src/sap.m/src/sap/m/changeHandler/CombineButtons.js
JavaScript
apache-2.0
6,687
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System.Collections.Immutable; using System.Diagnostics; using Microsoft.CodeAnalysis.CSharp.Emit; using Microsoft.CodeAnalysis.CSharp.Syntax; using Microsoft.CodeAnalysis.PooledObjects; using Roslyn.Utilities; namespace Microsoft.CodeAnalysis.CSharp.Symbols { /// <summary> /// Represents the compiler generated value parameter for property/event accessor. /// This parameter has no source location/syntax, but may have attributes. /// Attributes with 'param' target specifier on the accessor must be applied to the this parameter. /// </summary> internal sealed class SynthesizedAccessorValueParameterSymbol : SourceComplexParameterSymbol { public SynthesizedAccessorValueParameterSymbol(SourceMemberMethodSymbol accessor, TypeWithAnnotations paramType, int ordinal) : base(accessor, ordinal, paramType, RefKind.None, ParameterSymbol.ValueParameterName, accessor.Locations, syntaxRef: null, defaultSyntaxValue: ConstantValue.Unset, // the default value can be set via [param: DefaultParameterValue] applied on the accessor isParams: false, isExtensionMethodThis: false) { } internal override FlowAnalysisAnnotations FlowAnalysisAnnotations { get { var result = FlowAnalysisAnnotations.None; if (ContainingSymbol is SourcePropertyAccessorSymbol propertyAccessor && propertyAccessor.AssociatedSymbol is SourcePropertySymbol property) { if (property.HasDisallowNull) { result |= FlowAnalysisAnnotations.DisallowNull; } if (property.HasAllowNull) { result |= FlowAnalysisAnnotations.AllowNull; } } return result; } } internal override ImmutableHashSet<string> NotNullIfParameterNotNull => ImmutableHashSet<string>.Empty; public override ImmutableArray<CustomModifier> RefCustomModifiers { get { return ImmutableArray<CustomModifier>.Empty; // since RefKind is always None. } } public override bool IsImplicitlyDeclared { get { return true; } } protected override IAttributeTargetSymbol AttributeOwner { get { return (SourceMemberMethodSymbol)this.ContainingSymbol; } } internal override OneOrMany<SyntaxList<AttributeListSyntax>> GetAttributeDeclarations() { // Bind the attributes on the accessor's attribute syntax list with "param" target specifier. var accessor = (SourceMemberMethodSymbol)this.ContainingSymbol; return accessor.GetAttributeDeclarations(); } internal override void AddSynthesizedAttributes(PEModuleBuilder moduleBuilder, ref ArrayBuilder<SynthesizedAttributeData> attributes) { base.AddSynthesizedAttributes(moduleBuilder, ref attributes); if (ContainingSymbol is SourcePropertyAccessorSymbol propertyAccessor && propertyAccessor.AssociatedSymbol is SourcePropertySymbol property) { var annotations = FlowAnalysisAnnotations; if ((annotations & FlowAnalysisAnnotations.DisallowNull) != 0) { AddSynthesizedAttribute(ref attributes, new SynthesizedAttributeData(property.DisallowNullAttributeIfExists)); } if ((annotations & FlowAnalysisAnnotations.AllowNull) != 0) { AddSynthesizedAttribute(ref attributes, new SynthesizedAttributeData(property.AllowNullAttributeIfExists)); } } } } }
nguerrera/roslyn
src/Compilers/CSharp/Portable/Symbols/Synthesized/SynthesizedAccessorValueParameterSymbol.cs
C#
apache-2.0
4,074
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package java.lang.reflect; import java.lang.annotation.Annotation; /** * Stub implementation of Executable. The actual implementation * is in Method.h and Method.m, so the declared methods in this * class should match the actual methods implemented in order * to catch unsupported API references. * * @since 1.8 * @see Object */ public abstract class Executable extends AccessibleObject implements GenericDeclaration, Member { public abstract Class<?> getDeclaringClass(); public abstract String getName(); public abstract int getModifiers(); public abstract TypeVariable<?>[] getTypeParameters(); public abstract Class<?>[] getParameterTypes(); public abstract Class<?>[] getExceptionTypes(); public abstract Annotation[][] getParameterAnnotations(); public abstract String toGenericString(); public int getParameterCount() { return 0; } public Type[] getGenericParameterTypes() { return null; } public Parameter[] getParameters() { return null; } public Type[] getGenericExceptionTypes() { return null; } public boolean isVarArgs() { return false; } public boolean isSynthetic() { return false; } public <T extends Annotation> T getAnnotation(Class<T> cls) { return null; } public <T extends Annotation> T[] getAnnotationsByType(Class<T> cls) { return null; } public Annotation[] getDeclaredAnnotations() { return null; } boolean hasRealParameterData() { return false; } Type[] getAllGenericParameterTypes() { return null; } }
bandcampdotcom/j2objc
jre_emul/stub_classes/java/lang/reflect/Executable.java
Java
apache-2.0
2,369
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.phoenix.expression.function; import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PTimestamp; import org.joda.time.DateTime; import org.joda.time.chrono.GJChronology; /** * Implementation of DayOfWeekFunction(Date/Timestamp) * * Returns an integer from 1 to 7. Each represents a day of the week as follows : * MONDAY = 1; * TUESDAY = 2; * WEDNESDAY = 3; * THURSDAY = 4; * FRIDAY = 5; * SATURDAY = 6; * SUNDAY = 7; * */ @BuiltInFunction(name=DayOfWeekFunction.NAME, args={@Argument(allowedTypes={PTimestamp.class})}) public class DayOfWeekFunction extends DateScalarFunction { public static final String NAME = "DAYOFWEEK"; public DayOfWeekFunction(){ } public DayOfWeekFunction(List<Expression> children){ super(children); } @Override public String getName() { return NAME; } @Override public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { Expression arg = getChildren().get(0); if (!arg.evaluate(tuple,ptr)) { return false; } if (ptr.getLength() == 0) { return true; } long dateTime = inputCodec.decodeLong(ptr, arg.getSortOrder()); DateTime jodaDT = new DateTime(dateTime, GJChronology.getInstanceUTC()); int day = jodaDT.getDayOfWeek(); PDataType returnDataType = getDataType(); byte[] byteValue = new byte[returnDataType.getByteSize()]; returnDataType.getCodec().encodeInt(day, byteValue, 0); ptr.set(byteValue); return true; } @Override public PDataType getDataType() { return PInteger.INSTANCE; } }
apache/phoenix
phoenix-core/src/main/java/org/apache/phoenix/expression/function/DayOfWeekFunction.java
Java
apache-2.0
2,864
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/elasticfilesystem/model/ModifyMountTargetSecurityGroupsRequest.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::EFS::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; ModifyMountTargetSecurityGroupsRequest::ModifyMountTargetSecurityGroupsRequest() : m_mountTargetIdHasBeenSet(false), m_securityGroupsHasBeenSet(false) { } Aws::String ModifyMountTargetSecurityGroupsRequest::SerializePayload() const { JsonValue payload; if(m_securityGroupsHasBeenSet) { Array<JsonValue> securityGroupsJsonList(m_securityGroups.size()); for(unsigned securityGroupsIndex = 0; securityGroupsIndex < securityGroupsJsonList.GetLength(); ++securityGroupsIndex) { securityGroupsJsonList[securityGroupsIndex].AsString(m_securityGroups[securityGroupsIndex]); } payload.WithArray("SecurityGroups", std::move(securityGroupsJsonList)); } return payload.View().WriteReadable(); }
JoyIfBam5/aws-sdk-cpp
aws-cpp-sdk-elasticfilesystem/source/model/ModifyMountTargetSecurityGroupsRequest.cpp
C++
apache-2.0
1,551
局域网中实现Ubuntu和Windows共享文件夹 ================================================================================ ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/03/Share_Folder_Ubuntu_windows.jpeg) 本文全面详细地向你描述了**在Ubuntu中如何实现局域网内的文件夹共享**。 你的家中是不是有多台电脑?当你需要从一台Ubuntu电脑向另一台电脑传输数据时,是不是必须用到U盘或SD卡?你是否也觉得这个方法很烦人?我想肯定是。本文的目的就是使你在局域网内快速方便地传输文件、文档和其它较大的数据,来节省你的宝贵时间。只需一次设置,然后轻点鼠标,你就可以自由地**在Ubuntu和Windows之间共享文件**,当然这对其它Linux系统同样使用。不要担心这很容易操作,不会花费太多时间。 除此之外,尽管本文是在Ubuntu上进行实践,但这个教程在其它Linux系统上同样有用。 #### 在Ubuntu上实现局域网共享文件夹 #### 如果你的系统是Ubuntu 14.04、14.10或12.04,有两个方法可以使你通过局域网在搭载Windows或其他Linux的电脑上共享本地文件。 - 对局域网中的每个用户提供无密码共享 - 仅限特定访问,提供文件夹密码保护 这篇文章包括两种方法,你可以选择你想用的那种。 ### 1. 局域网无密码共享文件夹 ### #### 步骤一:#### 为了在Ubuntu上实现局域网共享文件夹,右键点击打算共享的文件夹,并选择“Local Network Share”: ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/Share_Folder_Ubuntu.jpeg) **可能有用的故障方案**:如果在右键菜单中看不到“Local Network Share”的选项,那就新建一个终端,使用下面的命令去安装nautlius-share: sudo apt-get install nautilus-share 然后重启Nautilus。可以选择注销再登录,或者使用这个命令: nautilus -q #### 步骤二:#### 一旦点击“Local Network Share”,就会出现共享文件夹的选项。只需选中“Share this folder”这一项: ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/03/Share_Folder_Ubuntu_1.jpeg) 可能的故障方案:如果提示共享服务还未安装,就像下图所示,那就点击安装服务,按照提示操作。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/03/Share_Folder_Ubuntu_2.jpeg) #### 步骤三:#### 一旦选中“Share this folder”的选项,就会看到按钮“Create Share”变成可用了。你也可以允许其他用户在共享文件夹中编辑文件。选项“Guest access”也是如此。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/03/Share_folder_Ubuntu_3.png) 你会看到文件夹图标已经显示为共享的。如果要停止共享文件夹,只需取消“Share this floder”这个选项。 这个方法就是这么简单,使得局域网中的任何人都可以访问共享文件夹中的文件。在正常情况下,你会选择这种方式。因为,家用局域网中的电脑通常都是可信电脑。但情况也不总是这样。如果你只是想特定的用户才能访问怎么办?这个时候就需要Samba服务器了。我们在本文的第二部分讨论这种方法。 ### 2. 在Ubuntu上使用密码保护实现局域网共享文件夹### 为了达到目的,首先需要配置Samba服务器。事实上,在这篇教程的前一部分我们已经用到了Samba,只是我们没有刻意强调。在介绍如何在Ubuntu上搭建Samba服务器实现局域网共享的方法之前,先快速预览一下[Samba][1]到底是什么。 #### Samba是什么? #### Samba是一个允许用户通过网络共享文件、文档和打印机的软件包,无论是在Linux、Windows,还是Mac上。它适用于所有的主流平台,可以在所有支持系统上流畅运行。下面是维基百科的介绍: > Samba是一款重新实现SMB/CIFS网络协议的自由软件,最初由安德鲁·垂鸠开发。在第三版中,Smaba不仅支持通过不同的Windows客户端访问及分享SMB的文件夹及打印机,还可以集成到Windows Server域名,作为主要域名控制站(PDC)或者域名成员。它也可以作为Active Directory域名的一部分。 #### 在Ubuntu上安装Samba服务器 #### 你可以很方便地在Ubuntu电脑上安装Samba。安装前,请先更新系统以便安装任何可用的更新。 sudo apt-get update && apt-get upgrade 然后按照这条命令安装samba和少量所需的软件包: sudo apt-get install samba samba-common system-config-samba python-glade2 gksu 一旦安装完成Samba服务器,就可以从图形界面配置Samba来分享文件。 #### 在Ubuntu上配置Samba服务器 #### 从dash打开Samba配置工具: ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/Setup-Samba.png) 进入到**Preference->Server Settings**。尽管默认已经设置好,可能就是你需要的。但在某些情况下你可能需要做一些改动。 在Server Setting中可以看到两个选项卡,‘Basic’和‘Security’。在Basic选项卡下的选项含义如下: - 工作组 - 用户要连接的电脑所在工作组的名字。比如,如果你想连接到一台Windows电脑,你就要输入Windows电脑的工作组名字。在Windows的Samba服务器设置中,已经默认设置好统一的工作组名字。但如果你有不同的工作组名字,就在这个字段中输入自定义的工作组名字。(在Windows 7中获取工作组名字,右击计算机图标,进到属性,就能看到Windows工作组名字。) - 描述 - 其他用户看到的你的电脑名字。不要使用空格或计算机不支持(望更正!)的字符。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/samba-server-settings.png) 设置samba服务器允许‘Guests’可用是不明智的,所以没必要修改安全设置,保持原样即可。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/Samba-Security-Settings-compressed.jpg) 这就搞定了!你已经搭建好Samba服务器,距离实现网络共享第一个文件夹的目标不远了! #### 为网络文件共享创建一个系统用户 #### 现在我们需要为网络共享文件创建一个系统用户。下面是非常简单的步骤: - 在Systems Settings下点击**User Accounts**。 - 点击**unlock**使其可用,以及+(**plus**)图标。 - 点击+(plus)图标,创建一个新的系统用户。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/create-system-user1.jpg) 如上图所示,需要输入‘Full name’。当你输入‘Full name’时,Username会自动填充为Full name。因为创建这个用户是为了共享文件,所以还要指定Account Type为‘**Standard**’。 完成上述步骤,点击添加,你就创建好一个系统用户。这个用户还没有被激活,所以需要为其设置密码来激活。确保Users accounts界面已经解锁。点击Account disabled。输入一个新密码,然后确认密码,点击Change。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/activate-system-user.jpg) 耶!到目前为止我们已经安装并配置好Samba,已经创建了一个系统用户以通过这个账户在网络上共享文件,而且也已经激活了新用户。现在来到配置Samba的最后一步,然后就可以共享文件夹了。 #### 添加Samba新用户 #### 打开Samba,在Preference先点击Samba Users。填写弹出的对话框,下面是其中几个字段的说明: **Unix Username** - 在这种情况下,我选择刚才创建的用户。 **Windows Username** - 输入你要访问的Windows电脑的用户名。 **Samba Password** - 输入你要访问的Windows电脑的密码。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/samba-user-setting.jpg) 完成后点击OK。现在做个深呼吸。你已经在Samba的帮助下成功创建一个网络。然后重启网络或Samba服务,准备好和其它电脑共享文件。 sudo restart smbd && sudo restart nmbd #### 通过网络共享文件夹或文件 #### 在图形用户界面下通过Samba共享文件是很简单的。点击Plus图标,会看到如图所示的对话框: ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/share-files-and-folders.jpg) 填写完这些字段。在‘Directory’中,浏览要共享的文件夹。你会看到的字段的含义如下: - **Share name** 是其它人会看到的文件夹名字。 - **Description** 是要共享内容的简单描述。 - **Writeable** 默认情况下共享的文件夹是‘read only’。如果允许网络上的其他用户修改它们,设置为writable。 - **Visiable** 当你点击Visiable时,就像它的名字一样,共享文件夹就对网络上的其他人可见。 现在你可以设置共享文件夹的权限。点击‘Access’选项,选择想要共享文件夹的用户。当你选择对所有人允许访问后,这个文件夹就对网络上的所有人可见。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/Screenshot-from-2015-02-28-202031.png) 最后点击OK,完成共享。现在这个文件夹就与你想要共享的用户实现共享。你已经完成了网络共享文件。还有其它要做的吗?对!还不知道如何从网络上移除文件夹? #### 移除共享文件夹 #### 在网络共享一段时间后,我们也需要移除其中的一些文件夹。操作很简答,下面就是我们要做的。 ![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2015/02/remove-shared-folder-from-network.jpg) 全部搞定!我们也可以使用终端进行网络文件共享,但这样没有本文介绍的方法这么容易。如果你确实想知道命令行操作,我会再写一篇关于在Linux上使用命令行实现网络文件共享的文章。 所以,你是怎么找到这篇教程的呢?我希望看了这篇教程你可以**很容易地在Ubuntu和Windows之间共享文件**。如果你有任何问题或建议,请再评论里说出来。 这篇教程是在Kalc的请求下写出的。如果你也想,你可以[请求你自己的教程][2]。我们很乐意帮助你和面临同样问题的读者解决问题。 -------------------------------------------------------------------------------- via: http://itsfoss.com/share-folders-local-network-ubuntu-windows/ 作者:[Mohd Sohail][a] 译者:[KayGuoWhu](https://github.com/KayGuoWhu) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出 [a]:http://itsfoss.com/author/sohail/ [1]:http://en.wikipedia.org/wiki/Samba_%28software%29 [2]:http://itsfoss.com/request-tutorial/
createyuan/TranslateProject
translated/tech/20150304 Share Folders On Local Network Between Ubuntu And Windows.md
Markdown
apache-2.0
11,011
<?php /** * route +----------------------------------------- * @category pt * @package pt\framework * @author page7 <[email protected]> * @version $Id$ */ namespace pt\framework; class route extends base { static protected $rules = array(); /** * Construct +----------------------------------------- * @access public * @param array $config */ public function __construct($config=array()) { if (empty($config['routes'])) $config['routes'] = array(); $routes = array_merge($config['routes'], self::$rules); $path_info = $_SERVER['PATH_INFO']; $count = 0; $reg = str_replace('/', '(\\/', trim($path_info, '/'), $count); $pef = str_repeat('){0,1}', $count+1); preg_match_all("/,\/({$reg}{$pef}\//", ','.implode(',', array_keys($routes)), $matchs); $matchs = array_filter($matchs[0]); rsort($matchs); if (!$matchs) $matchs = array('//'); if (!empty($routes['/'])) $routes['//'] = &$routes['/']; foreach ($matchs as $v) if (($r = substr($v, 1)) && isset($routes[$r]) && is_callable($routes[$r]['callback'])) return call_user_func_array($routes[$r]['callback'], self::params($path_info, $r, $routes[$r]['params'])); } /** * Get Params +----------------------------------------- * @access protected * @param string $path_info * @param string $route * @param string $params * @return void */ static protected function params($path_info, $route, $params) { $_params_val = explode('/', trim(substr($path_info, strlen($route)), '/')); if ($last = array_pop($_params_val)) $_params_val[] = $last; if ($params) { $_params_key = explode('/', trim($params, '/')); foreach ($_params_key as $i => $k) { if ($k[0] == '$' && isset($_params_val[$i])) $_GET[substr($k, 1)] = $_params_val[$i]; } } return $_params_val; } /** * Add Route Rule +----------------------------------------- * @access public * @param string $rule * @param mixed $callback * @param string $params * @return void */ static function add($rule, $callback=null, $params=null) { self::$rules[$rule] = array( 'params' => $params, 'callback' => $callback, ); } }
page7/pt
class/pt/framework/route.class.php
PHP
apache-2.0
2,556
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.processor; import java.io.ByteArrayInputStream; import java.nio.charset.UnsupportedCharsetException; import java.util.Date; import java.util.Locale; import org.apache.camel.ContextTestSupport; import org.apache.camel.Exchange; import org.apache.camel.InvalidPayloadException; import org.apache.camel.RuntimeCamelException; import org.apache.camel.builder.ExchangeBuilder; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.junit.Test; public class ConvertBodyTest extends ContextTestSupport { @Test public void testConvertBodyTo() { try { context.addRoutes(new RouteBuilder() { public void configure() { // set an invalid charset from("direct:invalid").convertBodyTo(String.class, "ASSI").to("mock:endpoint"); } }); fail("Should have thrown an exception"); } catch (Exception e) { assertIsInstanceOf(UnsupportedCharsetException.class, e.getCause()); } } @Test public void testConvertBodyCharset() throws Exception { context.addRoutes(new RouteBuilder() { public void configure() { from("direct:foo").convertBodyTo(byte[].class, "iso-8859-1").to("mock:foo"); } }); getMockEndpoint("mock:foo").expectedMessageCount(1); // do not propagate charset to avoid side effects with double conversion etc getMockEndpoint("mock:foo").message(0).exchangeProperty(Exchange.CHARSET_NAME).isNull(); template.sendBody("direct:foo", "Hello World"); assertMockEndpointsSatisfied(); } @Test public void testConvertBodyCharsetWithExistingCharsetName() throws Exception { context.addRoutes(new RouteBuilder() { public void configure() { from("direct:foo").convertBodyTo(byte[].class, "iso-8859-1").to("mock:foo"); } }); getMockEndpoint("mock:foo").expectedMessageCount(1); // do not propagate charset to avoid side effects with double conversion etc getMockEndpoint("mock:foo").message(0).exchangeProperty(Exchange.CHARSET_NAME).isEqualTo("UTF-8"); Exchange srcExchange = ExchangeBuilder.anExchange(context).withProperty(Exchange.CHARSET_NAME, "UTF-8").withBody("Hello World").build(); template.send("direct:foo", srcExchange); assertMockEndpointsSatisfied(); } @Test public void testConvertToInteger() throws Exception { MockEndpoint result = getMockEndpoint("mock:result"); result.expectedBodiesReceived(11); template.sendBody("direct:start", "11"); assertMockEndpointsSatisfied(); } @Test public void testConvertNullBody() throws Exception { MockEndpoint result = getMockEndpoint("mock:result"); result.expectedMessageCount(1); result.message(0).body().isNull(); template.sendBody("direct:start", null); assertMockEndpointsSatisfied(); } @Test public void testConvertFailed() throws Exception { getMockEndpoint("mock:result").expectedMessageCount(0); try { template.sendBody("direct:invalid", "11"); fail("Should have thrown an exception"); } catch (RuntimeCamelException e) { assertTrue(e.getCause() instanceof InvalidPayloadException); } assertMockEndpointsSatisfied(); } @Test public void testConvertToBytesCharset() throws Exception { byte[] body = "Hello World".getBytes("iso-8859-1"); MockEndpoint result = getMockEndpoint("mock:result"); result.expectedBodiesReceived(body); template.sendBody("direct:charset", "Hello World"); assertMockEndpointsSatisfied(); } @Test public void testConvertToStringCharset() throws Exception { String body = "Hello World"; MockEndpoint result = getMockEndpoint("mock:result"); result.expectedBodiesReceived(body); template.sendBody("direct:charset3", new ByteArrayInputStream(body.getBytes("utf-16"))); assertMockEndpointsSatisfied(); } @Test public void testConvertToBytesCharsetFail() throws Exception { byte[] body = "Hello World".getBytes("utf-8"); MockEndpoint result = getMockEndpoint("mock:result"); result.expectedBodiesReceived(body); template.sendBody("direct:charset2", "Hello World"); // should NOT be okay as we expected utf-8 but got it in utf-16 result.assertIsNotSatisfied(); } @Test public void testConvertToStringCharsetFail() throws Exception { // does not work on AIX String osName = System.getProperty("os.name").toLowerCase(Locale.ENGLISH); boolean aix = osName.indexOf("aix") > -1; if (aix) { return; } String body = "Hell\u00F6 W\u00F6rld"; MockEndpoint result = getMockEndpoint("mock:result"); result.expectedBodiesReceived(body); template.sendBody("direct:charset3", new ByteArrayInputStream(body.getBytes("utf-8"))); // should NOT be okay as we expected utf-8 but got it in utf-16 result.assertIsNotSatisfied(); } protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("direct:start").convertBodyTo(Integer.class).to("mock:result"); from("direct:invalid").convertBodyTo(Date.class).to("mock:result"); from("direct:charset").convertBodyTo(byte[].class, "iso-8859-1").to("mock:result"); from("direct:charset2").convertBodyTo(byte[].class, "utf-16").to("mock:result"); from("direct:charset3").convertBodyTo(String.class, "utf-16").to("mock:result"); } }; } }
Fabryprog/camel
core/camel-core/src/test/java/org/apache/camel/processor/ConvertBodyTest.java
Java
apache-2.0
6,756
<?php namespace Edu\Cnm\MisquoteOfTheDay\Test; use Edu\Cnm\MisquoteOfTheDay\ValidateUuid; use PHPUnit\Framework\TestCase; use Ramsey\Uuid\Uuid; // grab the class under scrutiny require_once(dirname(__DIR__) . "/autoload.php"); // grab the uuid generator require_once(dirname(__DIR__, 2) . "/lib/uuid.php"); /** * Minimal UUID Class * * This class is a minimal class that uses the ValidateUuid trait in order to test the accessor and mutator methods **/ class UuidTestObject { use ValidateUuid; /** * minimal uuid state variable * @var Uuid $uuid **/ private $uuid; /** * accessor method for uuid * * @return Uuid current value of uuid **/ public function getUuid() : Uuid { return($this->uuid); } /** * mutator method for uuid * * @param $newUuid Uuid new value of uuid **/ public function setUuid($newUuid) { try { $uuid = self::validateUuid($newUuid); } catch(\InvalidArgumentException | \RangeException | \Exception | \TypeError $exception) { $exceptionType = get_class($exception); throw(new $exceptionType($exception->getMessage(), 0, $exception)); } // store the uuid $this->uuid = $uuid; } } /** * Full PHPUnit test for ValidateUuid trait * * This test class does not communicate with the database. Rather, it verifies that all the possible uuid inputs and * enforces that valid inputs will be allowed. **/ class ValidateUuidTest extends TestCase { /** * valid uuid in raw bytes from mySQL * @var string $VALID_BYTES **/ protected $VALID_BYTES = null; /** * test object using the ValidateUuid trait * @var UuidTestObject $VALID_OBJECT **/ protected $VALID_OBJECT = null; /** * valid uuid in human readable string * @var string $VALID_STRING **/ protected $VALID_STRING = "a300f5c8-ac56-430d-a683-343298ea88d2"; /** * valid uuid already in a Uuid object * @var Uuid $VALID_UUID **/ protected $VALID_UUID = null; /** * create dependent objects before running each test **/ public function setUp() { // seed bytes with an actual uuid $this->VALID_BYTES = chr(118) . chr(9) . chr(185) . chr(49) . chr(221) . chr(132) . chr(79) . chr(33) . chr(165) . chr(15) . chr(133) . chr(169) . chr(172) . chr(81) . chr(251) . chr(146); $this->VALID_OBJECT = new UuidTestObject(); $this->VALID_UUID = generateUuidV4(); } /** * test creating a uuid from raw bytes from mySQL **/ public function testInsertValidBytes() { $uuid = Uuid::fromBytes($this->VALID_BYTES); $this->assertInstanceOf("Ramsey\\Uuid\\Uuid", $uuid); $this->VALID_OBJECT->setUuid($this->VALID_BYTES); $this->assertInstanceOf("Ramsey\\Uuid\\Uuid", $this->VALID_OBJECT->getUuid()); } /** * test creating a uuid from another Uuid object **/ public function testInsertValidObject() { $this->assertInstanceOf("Ramsey\\Uuid\\Uuid", $this->VALID_UUID); $this->VALID_OBJECT->setUuid($this->VALID_UUID); $this->assertInstanceOf("Ramsey\\Uuid\\Uuid", $this->VALID_OBJECT->getUuid()); } /** * test creating a uuid from a human readable string **/ public function testInsertValidString() { $uuid = Uuid::fromString($this->VALID_STRING); $this->assertTrue(Uuid::isValid($this->VALID_STRING)); $this->assertInstanceOf("Ramsey\\Uuid\\Uuid", $uuid); $this->VALID_OBJECT->setUuid($this->VALID_STRING); $this->assertInstanceOf("Ramsey\\Uuid\\Uuid", $this->VALID_OBJECT->getUuid()); } }
dylan-mcdonald/misquote-of-the-day
php/classes/Test/ValidateUuidTest.php
PHP
apache-2.0
3,392
# Microsoft Azure SDK for Node.js - Network Management This project provides a Node.js package that makes it easy to manage Microsoft Azure Network Resources. - **Node.js version: 6.x.x or higher** - **API version: 2017-03-01** ## Features - Manage virtual network - Manage subnet - Manage network security group - Manage network security rule - Manage load balancer - Manage network interface - Manage publicIPAddress - Manage application gateway - Manage connections - Manage express route - Manage local network gateway ## How to Install ```bash npm install azure-arm-network ``` ## How to use ### Authentication, client creation and listing vnets in a resource group as an example ```javascript var msRestAzure = require('ms-rest-azure'); var NetworkManagementClient = require('azure-arm-network'); // Interactive Login // It provides a url and code that needs to be copied and pasted in a browser and authenticated over there. If successful, // the user will get a DeviceTokenCredentials object. msRestAzure.interactiveLogin(function(err, credentials) { var client = new NetworkManagementClient(credentials, 'your-subscription-id'); client.virtualNetworks.list(resourceGroupName, function(err, result, request, response) { if (err) console.log(err); console.log(result); }); }); ``` ## Detailed Sample A detailed sample for managing a load balancer than can be cloned and is ready to used can be found [here](https://github.com/Azure-Samples/network-node-manage-loadbalancer). ## Related projects - [Microsoft Azure SDK for Node.js - All-up](https://github.com/WindowsAzure/azure-sdk-for-node)
begoldsm/azure-sdk-for-node
lib/services/networkManagement2/README.md
Markdown
apache-2.0
1,637
<?php # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/recommender/v1/recommendation.proto namespace Google\Cloud\Recommender\V1; use Google\Protobuf\Internal\GPBType; use Google\Protobuf\Internal\RepeatedField; use Google\Protobuf\Internal\GPBUtil; /** * Contains an operation for a resource loosely based on the JSON-PATCH format * with support for: * * Custom filters for describing partial array patch. * * Extended path values for describing nested arrays. * * Custom fields for describing the resource for which the operation is being * described. * * Allows extension to custom operations not natively supported by RFC6902. * See https://tools.ietf.org/html/rfc6902 for details on the original RFC. * * Generated from protobuf message <code>google.cloud.recommender.v1.Operation</code> */ class Operation extends \Google\Protobuf\Internal\Message { /** * Type of this operation. Contains one of 'add', 'remove', 'replace', 'move', * 'copy', 'test' and custom operations. This field is case-insensitive and * always populated. * * Generated from protobuf field <code>string action = 1;</code> */ private $action = ''; /** * Type of GCP resource being modified/tested. This field is always populated. * Example: cloudresourcemanager.googleapis.com/Project, * compute.googleapis.com/Instance * * Generated from protobuf field <code>string resource_type = 2;</code> */ private $resource_type = ''; /** * Contains the fully qualified resource name. This field is always populated. * ex: //cloudresourcemanager.googleapis.com/projects/foo. * * Generated from protobuf field <code>string resource = 3;</code> */ private $resource = ''; /** * Path to the target field being operated on. If the operation is at the * resource level, then path should be "/". This field is always populated. * * Generated from protobuf field <code>string path = 4;</code> */ private $path = ''; /** * Can be set with action 'copy' to copy resource configuration across * different resources of the same type. Example: A resource clone can be * done via action = 'copy', path = "/", from = "/", * source_resource = <source> and resource_name = <target>. * This field is empty for all other values of `action`. * * Generated from protobuf field <code>string source_resource = 5;</code> */ private $source_resource = ''; /** * Can be set with action 'copy' or 'move' to indicate the source field within * resource or source_resource, ignored if provided for other operation types. * * Generated from protobuf field <code>string source_path = 6;</code> */ private $source_path = ''; /** * Set of filters to apply if `path` refers to array elements or nested array * elements in order to narrow down to a single unique element that is being * tested/modified. * This is intended to be an exact match per filter. To perform advanced * matching, use path_value_matchers. * * Example: * ``` * { * "/versions/&#42;&#47;name" : "it-123" * "/versions/&#42;&#47;targetSize/percent": 20 * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;condition" : null * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;members/&#42;" : ["x&#64;example.com", "y&#64;example.com"] * } * ``` * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * * Generated from protobuf field <code>map<string, .google.protobuf.Value> path_filters = 8;</code> */ private $path_filters; /** * Similar to path_filters, this contains set of filters to apply if `path` * field refers to array elements. This is meant to support value matching * beyond exact match. To perform exact match, use path_filters. * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * * Generated from protobuf field <code>map<string, .google.cloud.recommender.v1.ValueMatcher> path_value_matchers = 11;</code> */ private $path_value_matchers; protected $path_value; /** * Constructor. * * @param array $data { * Optional. Data for populating the Message object. * * @type string $action * Type of this operation. Contains one of 'add', 'remove', 'replace', 'move', * 'copy', 'test' and custom operations. This field is case-insensitive and * always populated. * @type string $resource_type * Type of GCP resource being modified/tested. This field is always populated. * Example: cloudresourcemanager.googleapis.com/Project, * compute.googleapis.com/Instance * @type string $resource * Contains the fully qualified resource name. This field is always populated. * ex: //cloudresourcemanager.googleapis.com/projects/foo. * @type string $path * Path to the target field being operated on. If the operation is at the * resource level, then path should be "/". This field is always populated. * @type string $source_resource * Can be set with action 'copy' to copy resource configuration across * different resources of the same type. Example: A resource clone can be * done via action = 'copy', path = "/", from = "/", * source_resource = <source> and resource_name = <target>. * This field is empty for all other values of `action`. * @type string $source_path * Can be set with action 'copy' or 'move' to indicate the source field within * resource or source_resource, ignored if provided for other operation types. * @type \Google\Protobuf\Value $value * Value for the `path` field. Will be set for actions:'add'/'replace'. * Maybe set for action: 'test'. Either this or `value_matcher` will be set * for 'test' operation. An exact match must be performed. * @type \Google\Cloud\Recommender\V1\ValueMatcher $value_matcher * Can be set for action 'test' for advanced matching for the value of * 'path' field. Either this or `value` will be set for 'test' operation. * @type array|\Google\Protobuf\Internal\MapField $path_filters * Set of filters to apply if `path` refers to array elements or nested array * elements in order to narrow down to a single unique element that is being * tested/modified. * This is intended to be an exact match per filter. To perform advanced * matching, use path_value_matchers. * * Example: * ``` * { * "/versions/&#42;&#47;name" : "it-123" * "/versions/&#42;&#47;targetSize/percent": 20 * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;condition" : null * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;members/&#42;" : ["x&#64;example.com", "y&#64;example.com"] * } * ``` * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * @type array|\Google\Protobuf\Internal\MapField $path_value_matchers * Similar to path_filters, this contains set of filters to apply if `path` * field refers to array elements. This is meant to support value matching * beyond exact match. To perform exact match, use path_filters. * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * } */ public function __construct($data = NULL) { \GPBMetadata\Google\Cloud\Recommender\V1\Recommendation::initOnce(); parent::__construct($data); } /** * Type of this operation. Contains one of 'add', 'remove', 'replace', 'move', * 'copy', 'test' and custom operations. This field is case-insensitive and * always populated. * * Generated from protobuf field <code>string action = 1;</code> * @return string */ public function getAction() { return $this->action; } /** * Type of this operation. Contains one of 'add', 'remove', 'replace', 'move', * 'copy', 'test' and custom operations. This field is case-insensitive and * always populated. * * Generated from protobuf field <code>string action = 1;</code> * @param string $var * @return $this */ public function setAction($var) { GPBUtil::checkString($var, True); $this->action = $var; return $this; } /** * Type of GCP resource being modified/tested. This field is always populated. * Example: cloudresourcemanager.googleapis.com/Project, * compute.googleapis.com/Instance * * Generated from protobuf field <code>string resource_type = 2;</code> * @return string */ public function getResourceType() { return $this->resource_type; } /** * Type of GCP resource being modified/tested. This field is always populated. * Example: cloudresourcemanager.googleapis.com/Project, * compute.googleapis.com/Instance * * Generated from protobuf field <code>string resource_type = 2;</code> * @param string $var * @return $this */ public function setResourceType($var) { GPBUtil::checkString($var, True); $this->resource_type = $var; return $this; } /** * Contains the fully qualified resource name. This field is always populated. * ex: //cloudresourcemanager.googleapis.com/projects/foo. * * Generated from protobuf field <code>string resource = 3;</code> * @return string */ public function getResource() { return $this->resource; } /** * Contains the fully qualified resource name. This field is always populated. * ex: //cloudresourcemanager.googleapis.com/projects/foo. * * Generated from protobuf field <code>string resource = 3;</code> * @param string $var * @return $this */ public function setResource($var) { GPBUtil::checkString($var, True); $this->resource = $var; return $this; } /** * Path to the target field being operated on. If the operation is at the * resource level, then path should be "/". This field is always populated. * * Generated from protobuf field <code>string path = 4;</code> * @return string */ public function getPath() { return $this->path; } /** * Path to the target field being operated on. If the operation is at the * resource level, then path should be "/". This field is always populated. * * Generated from protobuf field <code>string path = 4;</code> * @param string $var * @return $this */ public function setPath($var) { GPBUtil::checkString($var, True); $this->path = $var; return $this; } /** * Can be set with action 'copy' to copy resource configuration across * different resources of the same type. Example: A resource clone can be * done via action = 'copy', path = "/", from = "/", * source_resource = <source> and resource_name = <target>. * This field is empty for all other values of `action`. * * Generated from protobuf field <code>string source_resource = 5;</code> * @return string */ public function getSourceResource() { return $this->source_resource; } /** * Can be set with action 'copy' to copy resource configuration across * different resources of the same type. Example: A resource clone can be * done via action = 'copy', path = "/", from = "/", * source_resource = <source> and resource_name = <target>. * This field is empty for all other values of `action`. * * Generated from protobuf field <code>string source_resource = 5;</code> * @param string $var * @return $this */ public function setSourceResource($var) { GPBUtil::checkString($var, True); $this->source_resource = $var; return $this; } /** * Can be set with action 'copy' or 'move' to indicate the source field within * resource or source_resource, ignored if provided for other operation types. * * Generated from protobuf field <code>string source_path = 6;</code> * @return string */ public function getSourcePath() { return $this->source_path; } /** * Can be set with action 'copy' or 'move' to indicate the source field within * resource or source_resource, ignored if provided for other operation types. * * Generated from protobuf field <code>string source_path = 6;</code> * @param string $var * @return $this */ public function setSourcePath($var) { GPBUtil::checkString($var, True); $this->source_path = $var; return $this; } /** * Value for the `path` field. Will be set for actions:'add'/'replace'. * Maybe set for action: 'test'. Either this or `value_matcher` will be set * for 'test' operation. An exact match must be performed. * * Generated from protobuf field <code>.google.protobuf.Value value = 7;</code> * @return \Google\Protobuf\Value|null */ public function getValue() { return $this->readOneof(7); } public function hasValue() { return $this->hasOneof(7); } /** * Value for the `path` field. Will be set for actions:'add'/'replace'. * Maybe set for action: 'test'. Either this or `value_matcher` will be set * for 'test' operation. An exact match must be performed. * * Generated from protobuf field <code>.google.protobuf.Value value = 7;</code> * @param \Google\Protobuf\Value $var * @return $this */ public function setValue($var) { GPBUtil::checkMessage($var, \Google\Protobuf\Value::class); $this->writeOneof(7, $var); return $this; } /** * Can be set for action 'test' for advanced matching for the value of * 'path' field. Either this or `value` will be set for 'test' operation. * * Generated from protobuf field <code>.google.cloud.recommender.v1.ValueMatcher value_matcher = 10;</code> * @return \Google\Cloud\Recommender\V1\ValueMatcher|null */ public function getValueMatcher() { return $this->readOneof(10); } public function hasValueMatcher() { return $this->hasOneof(10); } /** * Can be set for action 'test' for advanced matching for the value of * 'path' field. Either this or `value` will be set for 'test' operation. * * Generated from protobuf field <code>.google.cloud.recommender.v1.ValueMatcher value_matcher = 10;</code> * @param \Google\Cloud\Recommender\V1\ValueMatcher $var * @return $this */ public function setValueMatcher($var) { GPBUtil::checkMessage($var, \Google\Cloud\Recommender\V1\ValueMatcher::class); $this->writeOneof(10, $var); return $this; } /** * Set of filters to apply if `path` refers to array elements or nested array * elements in order to narrow down to a single unique element that is being * tested/modified. * This is intended to be an exact match per filter. To perform advanced * matching, use path_value_matchers. * * Example: * ``` * { * "/versions/&#42;&#47;name" : "it-123" * "/versions/&#42;&#47;targetSize/percent": 20 * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;condition" : null * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;members/&#42;" : ["x&#64;example.com", "y&#64;example.com"] * } * ``` * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * * Generated from protobuf field <code>map<string, .google.protobuf.Value> path_filters = 8;</code> * @return \Google\Protobuf\Internal\MapField */ public function getPathFilters() { return $this->path_filters; } /** * Set of filters to apply if `path` refers to array elements or nested array * elements in order to narrow down to a single unique element that is being * tested/modified. * This is intended to be an exact match per filter. To perform advanced * matching, use path_value_matchers. * * Example: * ``` * { * "/versions/&#42;&#47;name" : "it-123" * "/versions/&#42;&#47;targetSize/percent": 20 * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;condition" : null * } * ``` * * Example: * ``` * { * "/bindings/&#42;&#47;role": "roles/owner" * "/bindings/&#42;&#47;members/&#42;" : ["x&#64;example.com", "y&#64;example.com"] * } * ``` * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * * Generated from protobuf field <code>map<string, .google.protobuf.Value> path_filters = 8;</code> * @param array|\Google\Protobuf\Internal\MapField $var * @return $this */ public function setPathFilters($var) { $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::STRING, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Protobuf\Value::class); $this->path_filters = $arr; return $this; } /** * Similar to path_filters, this contains set of filters to apply if `path` * field refers to array elements. This is meant to support value matching * beyond exact match. To perform exact match, use path_filters. * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * * Generated from protobuf field <code>map<string, .google.cloud.recommender.v1.ValueMatcher> path_value_matchers = 11;</code> * @return \Google\Protobuf\Internal\MapField */ public function getPathValueMatchers() { return $this->path_value_matchers; } /** * Similar to path_filters, this contains set of filters to apply if `path` * field refers to array elements. This is meant to support value matching * beyond exact match. To perform exact match, use path_filters. * When both path_filters and path_value_matchers are set, an implicit AND * must be performed. * * Generated from protobuf field <code>map<string, .google.cloud.recommender.v1.ValueMatcher> path_value_matchers = 11;</code> * @param array|\Google\Protobuf\Internal\MapField $var * @return $this */ public function setPathValueMatchers($var) { $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::STRING, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\Recommender\V1\ValueMatcher::class); $this->path_value_matchers = $arr; return $this; } /** * @return string */ public function getPathValue() { return $this->whichOneof("path_value"); } }
googleapis/google-cloud-php-recommender
src/V1/Operation.php
PHP
apache-2.0
20,359
/* * Copyright 2021 www.seleniumtests.com * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.seleniumtests.core; import org.testng.xml.XmlSuite; public class DefaultXmlSuite extends XmlSuite { private static final long serialVersionUID = -5443556778899088771L; }
TestingForum/seleniumtestsframework
src/main/java/com/seleniumtests/core/DefaultXmlSuite.java
Java
apache-2.0
783
/* * The contents of this file are subject to the license and copyright * detailed in the LICENSE and NOTICE files at the root of the source * tree and available online at * * http://duracloud.org/license/ */ package org.duracloud.common.error; /** * @author Andrew Woods * Date: 4/20/11 */ public class InvalidUsernameException extends DuraCloudRuntimeException { public InvalidUsernameException(String username) { super("Invalid username: " + username); } }
duracloud/duracloud
common/src/main/java/org/duracloud/common/error/InvalidUsernameException.java
Java
apache-2.0
490
package org.flexiblepower.efi; import org.flexiblepower.efi.uncontrolled.UncontrolledAllocation; import org.flexiblepower.efi.uncontrolled.UncontrolledRegistration; import org.flexiblepower.efi.uncontrolled.UncontrolledUpdate; import org.flexiblepower.messaging.Cardinality; import org.flexiblepower.messaging.Port; import org.flexiblepower.ral.ResourceManager; import org.flexiblepower.ral.messages.AllocationRevoke; import org.flexiblepower.ral.messages.AllocationStatusUpdate; import org.flexiblepower.ral.messages.ControlSpaceRevoke; @Port(name = "controller", accepts = { UncontrolledAllocation.class, AllocationRevoke.class }, sends = { UncontrolledRegistration.class, UncontrolledUpdate.class, AllocationStatusUpdate.class, ControlSpaceRevoke.class }, cardinality = Cardinality.SINGLE) public interface UncontrolledResourceManager extends ResourceManager { }
MalcolmK/fpai-core
flexiblepower.ral.efi/src/org/flexiblepower/efi/UncontrolledResourceManager.java
Java
apache-2.0
931
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.refactoring.introduceParameter; import com.intellij.analysis.AnalysisScope; import com.intellij.codeInsight.AnnotationUtil; import com.intellij.codeInsight.ChangeContextUtil; import com.intellij.codeInsight.generation.GenerateMembersUtil; import com.intellij.lang.findUsages.DescriptiveNameUtil; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.progress.ProgressManager; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Pair; import com.intellij.openapi.util.Ref; import com.intellij.psi.*; import com.intellij.psi.search.GlobalSearchScope; import com.intellij.psi.search.searches.MethodReferencesSearch; import com.intellij.psi.search.searches.OverridingMethodsSearch; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.refactoring.BaseRefactoringProcessor; import com.intellij.refactoring.IntroduceParameterRefactoring; import com.intellij.refactoring.RefactoringBundle; import com.intellij.refactoring.changeSignature.ChangeSignatureProcessor; import com.intellij.refactoring.introduceVariable.IntroduceVariableBase; import com.intellij.refactoring.listeners.RefactoringEventData; import com.intellij.refactoring.util.*; import com.intellij.refactoring.util.duplicates.MethodDuplicatesHandler; import com.intellij.refactoring.util.occurrences.ExpressionOccurrenceManager; import com.intellij.refactoring.util.occurrences.LocalVariableOccurrenceManager; import com.intellij.refactoring.util.occurrences.OccurrenceManager; import com.intellij.refactoring.util.usageInfo.DefaultConstructorImplicitUsageInfo; import com.intellij.refactoring.util.usageInfo.NoConstructorClassUsageInfo; import com.intellij.usageView.UsageInfo; import com.intellij.usageView.UsageViewDescriptor; import com.intellij.usageView.UsageViewUtil; import com.intellij.util.IncorrectOperationException; import com.intellij.util.containers.MultiMap; import gnu.trove.TIntArrayList; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.Set; /** * @author dsl * @since 07.05.2002 */ public class IntroduceParameterProcessor extends BaseRefactoringProcessor implements IntroduceParameterData { private static final Logger LOG = Logger.getInstance("#com.intellij.refactoring.introduceParameter.IntroduceParameterProcessor"); private final PsiMethod myMethodToReplaceIn; private final PsiMethod myMethodToSearchFor; private PsiExpression myParameterInitializer; private final PsiExpression myExpressionToSearch; private final PsiLocalVariable myLocalVariable; private final boolean myRemoveLocalVariable; private final String myParameterName; private final boolean myReplaceAllOccurrences; private int myReplaceFieldsWithGetters; private final boolean myDeclareFinal; private final boolean myGenerateDelegate; private PsiType myForcedType; private final TIntArrayList myParametersToRemove; private final PsiManager myManager; private JavaExpressionWrapper myInitializerWrapper; private boolean myHasConflicts; /** * if expressionToSearch is null, search for localVariable */ public IntroduceParameterProcessor(@NotNull Project project, PsiMethod methodToReplaceIn, @NotNull PsiMethod methodToSearchFor, PsiExpression parameterInitializer, PsiExpression expressionToSearch, PsiLocalVariable localVariable, boolean removeLocalVariable, String parameterName, boolean replaceAllOccurrences, int replaceFieldsWithGetters, boolean declareFinal, boolean generateDelegate, PsiType forcedType, @NotNull TIntArrayList parametersToRemove) { super(project); myMethodToReplaceIn = methodToReplaceIn; myMethodToSearchFor = methodToSearchFor; myParameterInitializer = parameterInitializer; myExpressionToSearch = expressionToSearch; myLocalVariable = localVariable; myRemoveLocalVariable = removeLocalVariable; myParameterName = parameterName; myReplaceAllOccurrences = replaceAllOccurrences; myReplaceFieldsWithGetters = replaceFieldsWithGetters; myDeclareFinal = declareFinal; myGenerateDelegate = generateDelegate; myForcedType = forcedType; myManager = PsiManager.getInstance(project); myParametersToRemove = parametersToRemove; myInitializerWrapper = expressionToSearch == null ? null : new JavaExpressionWrapper(expressionToSearch); } public void setParameterInitializer(PsiExpression parameterInitializer) { myParameterInitializer = parameterInitializer; } @Override @NotNull protected UsageViewDescriptor createUsageViewDescriptor(@NotNull UsageInfo[] usages) { return new IntroduceParameterViewDescriptor(myMethodToSearchFor); } @Override @NotNull public PsiType getForcedType() { return myForcedType; } private void setForcedType(PsiType forcedType) { myForcedType = forcedType; } @Override public int getReplaceFieldsWithGetters() { return myReplaceFieldsWithGetters; } public void setReplaceFieldsWithGetters(int replaceFieldsWithGetters) { myReplaceFieldsWithGetters = replaceFieldsWithGetters; } @Override @NotNull protected UsageInfo[] findUsages() { ArrayList<UsageInfo> result = new ArrayList<>(); PsiMethod[] overridingMethods = OverridingMethodsSearch.search(myMethodToSearchFor).toArray(PsiMethod.EMPTY_ARRAY); for (PsiMethod overridingMethod : overridingMethods) { result.add(new UsageInfo(overridingMethod)); } if (!myGenerateDelegate) { PsiReference[] refs = MethodReferencesSearch.search(myMethodToSearchFor, GlobalSearchScope.projectScope(myProject), true).toArray(PsiReference.EMPTY_ARRAY); for (PsiReference ref1 : refs) { PsiElement ref = ref1.getElement(); if (ref instanceof PsiMethod && ((PsiMethod)ref).isConstructor()) { DefaultConstructorImplicitUsageInfo implicitUsageInfo = new DefaultConstructorImplicitUsageInfo((PsiMethod)ref, ((PsiMethod)ref).getContainingClass(), myMethodToSearchFor); result.add(implicitUsageInfo); } else if (ref instanceof PsiClass) { result.add(new NoConstructorClassUsageInfo((PsiClass)ref)); } else if (!IntroduceParameterUtil.insideMethodToBeReplaced(ref, myMethodToReplaceIn)) { result.add(new ExternalUsageInfo(ref)); } else { result.add(new ChangedMethodCallInfo(ref)); } } } if (myReplaceAllOccurrences) { for (PsiElement expr : getOccurrences()) { result.add(new InternalUsageInfo(expr)); } } else { if (myExpressionToSearch != null && myExpressionToSearch.isValid()) { result.add(new InternalUsageInfo(myExpressionToSearch)); } } final UsageInfo[] usageInfos = result.toArray(UsageInfo.EMPTY_ARRAY); return UsageViewUtil.removeDuplicatedUsages(usageInfos); } protected PsiElement[] getOccurrences() { final OccurrenceManager occurrenceManager; if (myLocalVariable == null) { occurrenceManager = new ExpressionOccurrenceManager(myExpressionToSearch, myMethodToReplaceIn, null); } else { occurrenceManager = new LocalVariableOccurrenceManager(myLocalVariable, null); } return occurrenceManager.getOccurrences(); } public boolean hasConflicts() { return myHasConflicts; } private static class ReferencedElementsCollector extends JavaRecursiveElementWalkingVisitor { private final Set<PsiElement> myResult = new HashSet<>(); @Override public void visitReferenceExpression(PsiReferenceExpression expression) { visitReferenceElement(expression); } @Override public void visitReferenceElement(PsiJavaCodeReferenceElement reference) { super.visitReferenceElement(reference); final PsiElement element = reference.resolve(); if (element != null) { myResult.add(element); } } } @Override protected boolean preprocessUsages(@NotNull Ref<UsageInfo[]> refUsages) { UsageInfo[] usagesIn = refUsages.get(); MultiMap<PsiElement, String> conflicts = new MultiMap<>(); AnySameNameVariables anySameNameVariables = new AnySameNameVariables(); myMethodToReplaceIn.accept(anySameNameVariables); final Pair<PsiElement, String> conflictPair = anySameNameVariables.getConflict(); if (conflictPair != null) { conflicts.putValue(conflictPair.first, conflictPair.second); } if (!myGenerateDelegate) { detectAccessibilityConflicts(usagesIn, conflicts); } if (myParameterInitializer != null && !myMethodToReplaceIn.hasModifierProperty(PsiModifier.PRIVATE)) { final AnySupers anySupers = new AnySupers(); myParameterInitializer.accept(anySupers); if (anySupers.isResult()) { for (UsageInfo usageInfo : usagesIn) { PsiElement element = usageInfo.getElement(); if (!(element instanceof PsiMethod) && !(usageInfo instanceof InternalUsageInfo)) { if (!PsiTreeUtil.isAncestor(myMethodToReplaceIn.getContainingClass(), element, false)) { String message = RefactoringBundle.message("parameter.initializer.contains.0.but.not.all.calls.to.method.are.in.its.class", CommonRefactoringUtil.htmlEmphasize(PsiKeyword.SUPER)); conflicts.putValue(myParameterInitializer, message); break; } } } } } for (IntroduceParameterMethodUsagesProcessor processor : IntroduceParameterMethodUsagesProcessor.EP_NAME.getExtensions()) { processor.findConflicts(this, refUsages.get(), conflicts); } myHasConflicts = !conflicts.isEmpty(); return showConflicts(conflicts, usagesIn); } private void detectAccessibilityConflicts(final UsageInfo[] usageArray, MultiMap<PsiElement, String> conflicts) { if (myParameterInitializer != null) { final ReferencedElementsCollector collector = new ReferencedElementsCollector(); myParameterInitializer.accept(collector); final Set<PsiElement> result = collector.myResult; if (!result.isEmpty()) { for (final UsageInfo usageInfo : usageArray) { if (usageInfo instanceof ExternalUsageInfo && IntroduceParameterUtil.isMethodUsage(usageInfo)) { final PsiElement place = usageInfo.getElement(); for (PsiElement element : result) { if (element instanceof PsiField && myReplaceFieldsWithGetters != IntroduceParameterRefactoring.REPLACE_FIELDS_WITH_GETTERS_NONE) { //check getter access instead final PsiClass psiClass = ((PsiField)element).getContainingClass(); LOG.assertTrue(psiClass != null); final PsiMethod method = psiClass.findMethodBySignature(GenerateMembersUtil.generateGetterPrototype((PsiField)element), true); if (method != null){ element = method; } } if (element instanceof PsiMember && !JavaPsiFacade.getInstance(myProject).getResolveHelper().isAccessible((PsiMember)element, place, null)) { String message = RefactoringBundle.message( "0.is.not.accessible.from.1.value.for.introduced.parameter.in.that.method.call.will.be.incorrect", RefactoringUIUtil.getDescription(element, true), RefactoringUIUtil.getDescription(ConflictsUtil.getContainer(place), true)); conflicts.putValue(element, message); } } } } } } } public static class AnySupers extends JavaRecursiveElementWalkingVisitor { private boolean myResult; @Override public void visitSuperExpression(PsiSuperExpression expression) { myResult = true; } public boolean isResult() { return myResult; } @Override public void visitReferenceExpression(PsiReferenceExpression expression) { visitElement(expression); } } public class AnySameNameVariables extends JavaRecursiveElementWalkingVisitor { private Pair<PsiElement, String> conflict; public Pair<PsiElement, String> getConflict() { return conflict; } @Override public void visitVariable(PsiVariable variable) { if (variable == myLocalVariable) return; if (variable instanceof PsiParameter && ((PsiParameter)variable).getDeclarationScope() == myMethodToReplaceIn) { if (getParametersToRemove().contains(myMethodToReplaceIn.getParameterList().getParameterIndex((PsiParameter)variable))){ return; } } if (myParameterName.equals(variable.getName())) { String descr = RefactoringBundle.message("there.is.already.a.0.it.will.conflict.with.an.introduced.parameter", RefactoringUIUtil.getDescription(variable, true)); conflict = Pair.create(variable, CommonRefactoringUtil.capitalize(descr)); } } @Override public void visitReferenceExpression(PsiReferenceExpression expression) { } @Override public void visitElement(PsiElement element) { if(conflict != null) return; super.visitElement(element); } } @Nullable @Override protected String getRefactoringId() { return "refactoring.introduceParameter"; } @Nullable @Override protected RefactoringEventData getBeforeData() { RefactoringEventData data = new RefactoringEventData(); data.addElements(new PsiElement[] {myLocalVariable, myExpressionToSearch}); return data; } @Nullable @Override protected RefactoringEventData getAfterData(@NotNull UsageInfo[] usages) { final PsiParameter parameter = JavaIntroduceParameterMethodUsagesProcessor.getAnchorParameter(myMethodToReplaceIn); final RefactoringEventData afterData = new RefactoringEventData(); afterData.addElement(parameter); return afterData; } @Override protected void performRefactoring(@NotNull UsageInfo[] usages) { try { PsiElementFactory factory = JavaPsiFacade.getInstance(myManager.getProject()).getElementFactory(); PsiType initializerType = getInitializerType(myForcedType, myParameterInitializer, myLocalVariable); setForcedType(initializerType); // Converting myParameterInitializer if (myParameterInitializer == null) { LOG.assertTrue(myLocalVariable != null); myParameterInitializer = factory.createExpressionFromText(myLocalVariable.getName(), myLocalVariable); } else if (myParameterInitializer instanceof PsiArrayInitializerExpression){ final PsiExpression newExprArrayInitializer = RefactoringUtil.createNewExpressionFromArrayInitializer((PsiArrayInitializerExpression)myParameterInitializer, initializerType); myParameterInitializer = (PsiExpression)myParameterInitializer.replace(newExprArrayInitializer); } myInitializerWrapper = new JavaExpressionWrapper(myParameterInitializer); // Changing external occurrences (the tricky part) IntroduceParameterUtil.processUsages(usages, this); if (myGenerateDelegate) { generateDelegate(myMethodToReplaceIn); if (myMethodToReplaceIn != myMethodToSearchFor) { final PsiMethod method = generateDelegate(myMethodToSearchFor); if (method.getContainingClass().isInterface()) { final PsiCodeBlock block = method.getBody(); if (block != null) { block.delete(); } } } } // Changing signature of initial method // (signature of myMethodToReplaceIn will be either changed now or have already been changed) LOG.assertTrue(initializerType.isValid()); final FieldConflictsResolver fieldConflictsResolver = new FieldConflictsResolver(myParameterName, myMethodToReplaceIn.getBody()); IntroduceParameterUtil.changeMethodSignatureAndResolveFieldConflicts(new UsageInfo(myMethodToReplaceIn), usages, this); if (myMethodToSearchFor != myMethodToReplaceIn) { IntroduceParameterUtil.changeMethodSignatureAndResolveFieldConflicts(new UsageInfo(myMethodToSearchFor), usages, this); } else if (myGenerateDelegate && myMethodToReplaceIn.findSuperMethods().length == 0) { final PsiAnnotation annotation = AnnotationUtil.findAnnotation(myMethodToReplaceIn, true, Override.class.getName()); if (annotation != null) { annotation.delete(); } } ChangeContextUtil.clearContextInfo(myParameterInitializer); // Replacing expression occurrences for (UsageInfo usage : usages) { if (usage instanceof ChangedMethodCallInfo) { PsiElement element = usage.getElement(); processChangedMethodCall(element); } else if (usage instanceof InternalUsageInfo) { PsiElement element = usage.getElement(); if (element instanceof PsiExpression) { element = RefactoringUtil.outermostParenthesizedExpression((PsiExpression)element); } if (element != null) { if (element.getParent() instanceof PsiExpressionStatement) { element.getParent().delete(); } else { PsiExpression newExpr = factory.createExpressionFromText(myParameterName, element); IntroduceVariableBase.replace((PsiExpression)element, newExpr, myProject); } } } } if(myLocalVariable != null && myRemoveLocalVariable) { myLocalVariable.normalizeDeclaration(); myLocalVariable.getParent().delete(); } fieldConflictsResolver.fix(); } catch (IncorrectOperationException ex) { LOG.error(ex); } if (isReplaceDuplicates()) { ApplicationManager.getApplication().invokeLater(() -> processMethodsDuplicates(), myProject.getDisposed()); } } protected boolean isReplaceDuplicates() { return true; } private void processMethodsDuplicates() { final Runnable runnable = () -> { if (!myMethodToReplaceIn.isValid()) return; MethodDuplicatesHandler.invokeOnScope(myProject, Collections.singleton(myMethodToReplaceIn), new AnalysisScope(myMethodToReplaceIn.getContainingFile()), true); }; ProgressManager.getInstance().runProcessWithProgressSynchronously(() -> ApplicationManager.getApplication().runReadAction(runnable), "Search Method Duplicates...", true, myProject); } private PsiMethod generateDelegate(final PsiMethod methodToReplaceIn) throws IncorrectOperationException { final PsiMethod delegate = (PsiMethod)methodToReplaceIn.copy(); final PsiElementFactory elementFactory = JavaPsiFacade.getInstance(myManager.getProject()).getElementFactory(); ChangeSignatureProcessor.makeEmptyBody(elementFactory, delegate); final PsiCallExpression callExpression = ChangeSignatureProcessor.addDelegatingCallTemplate(delegate, delegate.getName()); final PsiExpressionList argumentList = callExpression.getArgumentList(); assert argumentList != null; final PsiParameter[] psiParameters = methodToReplaceIn.getParameterList().getParameters(); final PsiParameter anchorParameter = getAnchorParameter(methodToReplaceIn); if (psiParameters.length == 0) { argumentList.add(myParameterInitializer); } else { if (anchorParameter == null) { argumentList.add(myParameterInitializer); } for (int i = 0; i < psiParameters.length; i++) { PsiParameter psiParameter = psiParameters[i]; if (!myParametersToRemove.contains(i)) { final PsiExpression expression = elementFactory.createExpressionFromText(psiParameter.getName(), delegate); argumentList.add(expression); } if (psiParameter == anchorParameter) { argumentList.add(myParameterInitializer); } } } return (PsiMethod)methodToReplaceIn.getContainingClass().addBefore(delegate, methodToReplaceIn); } static PsiType getInitializerType(PsiType forcedType, PsiExpression parameterInitializer, PsiLocalVariable localVariable) { final PsiType initializerType; if (forcedType == null) { if (parameterInitializer == null) { if (localVariable == null) { LOG.error("all null"); initializerType = null; } else { initializerType = localVariable.getType(); } } else { if (localVariable == null) { initializerType = RefactoringUtil.getTypeByExpressionWithExpectedType(parameterInitializer); } else { initializerType = localVariable.getType(); } } } else { initializerType = forcedType; } return initializerType; } private void processChangedMethodCall(PsiElement element) throws IncorrectOperationException { if (element.getParent() instanceof PsiMethodCallExpression) { PsiMethodCallExpression methodCall = (PsiMethodCallExpression)element.getParent(); if (myMethodToReplaceIn == myMethodToSearchFor && PsiTreeUtil.isAncestor(methodCall, myParameterInitializer, false)) return; PsiElementFactory factory = JavaPsiFacade.getInstance(methodCall.getProject()).getElementFactory(); PsiExpression expression = factory.createExpressionFromText(myParameterName, null); final PsiExpressionList argList = methodCall.getArgumentList(); final PsiExpression[] exprs = argList.getExpressions(); boolean first = false; PsiElement anchor = null; if (myMethodToSearchFor.isVarArgs()) { final int oldParamCount = myMethodToSearchFor.getParameterList().getParametersCount() - 1; if (exprs.length >= oldParamCount) { if (oldParamCount > 1) { anchor = exprs[oldParamCount - 2]; } else { first = true; anchor = null; } } else { anchor = exprs[exprs.length -1]; } } else if (exprs.length > 0) { anchor = exprs[exprs.length - 1]; } if (anchor != null) { argList.addAfter(expression, anchor); } else { if (first && exprs.length > 0) { argList.addBefore(expression, exprs[0]); } else { argList.add(expression); } } removeParametersFromCall(argList); } else { LOG.error(element.getParent()); } } private void removeParametersFromCall(final PsiExpressionList argList) { final PsiExpression[] exprs = argList.getExpressions(); myParametersToRemove.forEachDescending(paramNum -> { if (paramNum < exprs.length) { try { exprs[paramNum].delete(); } catch (IncorrectOperationException e) { LOG.error(e); } } return true; }); } @Override @NotNull protected String getCommandName() { return RefactoringBundle.message("introduce.parameter.command", DescriptiveNameUtil.getDescriptiveName(myMethodToReplaceIn)); } @Nullable private static PsiParameter getAnchorParameter(PsiMethod methodToReplaceIn) { PsiParameterList parameterList = methodToReplaceIn.getParameterList(); final PsiParameter anchorParameter; final PsiParameter[] parameters = parameterList.getParameters(); final int length = parameters.length; if (!methodToReplaceIn.isVarArgs()) { anchorParameter = length > 0 ? parameters[length-1] : null; } else { LOG.assertTrue(length > 0); LOG.assertTrue(parameters[length-1].isVarArgs()); anchorParameter = length > 1 ? parameters[length-2] : null; } return anchorParameter; } @Override public PsiMethod getMethodToReplaceIn() { return myMethodToReplaceIn; } @Override @NotNull public PsiMethod getMethodToSearchFor() { return myMethodToSearchFor; } @Override public JavaExpressionWrapper getParameterInitializer() { return myInitializerWrapper; } @Override @NotNull public String getParameterName() { return myParameterName; } @Override public boolean isDeclareFinal() { return myDeclareFinal; } @Override public boolean isGenerateDelegate() { return myGenerateDelegate; } @Override @NotNull public TIntArrayList getParametersToRemove() { return myParametersToRemove; } @Override @NotNull public Project getProject() { return myProject; } }
goodwinnk/intellij-community
java/java-impl/src/com/intellij/refactoring/introduceParameter/IntroduceParameterProcessor.java
Java
apache-2.0
25,821
// Copyright 2014 Serilog Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using System; using Serilog.Configuration; using Serilog.Events; using Serilog.Sinks.ElmahIO; namespace Serilog { /// <summary> /// Adds the WriteTo.ElmahIO() extension method to <see cref="LoggerConfiguration"/>. /// </summary> public static class LoggerConfigurationElmahIOExtensions { /// <summary> /// Adds a sink that writes log events to the elmah.io webservice. By default it will only send errors to elmah.io as it is primarily used for error tracking. /// </summary> /// <param name="loggerConfiguration">The logger configuration.</param> /// <param name="logId">The log id as found on the elmah.io website.</param> /// <param name="restrictedToMinimumLevel">The minimum log event level required in order to write an event to the sink.</param> /// <param name="formatProvider">Supplies culture-specific formatting information, or null.</param> /// <returns>Logger configuration, allowing configuration to continue.</returns> /// <exception cref="ArgumentNullException">A required parameter is null.</exception> public static LoggerConfiguration ElmahIO( this LoggerSinkConfiguration loggerConfiguration, Guid logId, LogEventLevel restrictedToMinimumLevel = LogEventLevel.Error, IFormatProvider formatProvider = null) { if (loggerConfiguration == null) throw new ArgumentNullException("loggerConfiguration"); return loggerConfiguration.Sink( new ElmahIOSink(formatProvider, logId), restrictedToMinimumLevel); } } }
vossad01/serilog
src/Serilog.Sinks.ElmahIO/LoggerConfigurationElmahIOExtensions.cs
C#
apache-2.0
2,254
/** * Copyright 2005-2014 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.sampleu.bookstore.document.web; import edu.sampleu.bookstore.bo.Book; import edu.sampleu.bookstore.bo.BookOrder; import edu.sampleu.bookstore.document.BookOrderDocument; import org.apache.struts.action.ActionForm; import org.apache.struts.action.ActionForward; import org.apache.struts.action.ActionMapping; import org.kuali.rice.core.api.util.type.KualiDecimal; import org.kuali.rice.kns.web.struts.action.KualiTransactionalDocumentActionBase; import org.kuali.rice.kns.web.struts.form.KualiForm; import org.kuali.rice.krad.service.KRADServiceLocator; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; /* * BookOrderAction class file for BookOrder maintenance Object * Actions prior to submit and post-Submit processes are handled. */ public class BookOrderAction extends KualiTransactionalDocumentActionBase { public ActionForward addBookOrder(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) throws Exception { BookOrderForm form = (BookOrderForm) actionForm; BookOrderDocument document = form.getBookOrderDocument(); BookOrder newBookEntry = form.getNewBookOrder(); document.addBookOrder(newBookEntry); for (BookOrder entry : document.getBookOrders()) { if (entry.getBookId() != null) { Book book = KRADServiceLocator.getBusinessObjectService().findBySinglePrimaryKey(Book.class, entry.getBookId()); entry.setUnitPrice(book.getPrice()); Double totalPrice = 0.0d; if (book.getPrice() != null && entry.getQuantity() != null) { totalPrice = book.getPrice().doubleValue() * entry.getQuantity().intValue(); if (entry.getDiscount() != null && entry.getDiscount().doubleValue() > 0) { totalPrice = totalPrice - (totalPrice * entry.getDiscount().doubleValue() / 100); } } entry.setTotalPrice(new KualiDecimal(totalPrice)); } } // clear the used book order entry form.setNewBookOrder(new BookOrder()); return mapping.findForward("basic"); } public ActionForward deleteBookOrder(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) throws Exception { BookOrderForm form = (BookOrderForm) actionForm; BookOrderDocument document = form.getBookOrderDocument(); int deleteIndex = getLineToDelete(request); document.removeBookOrder(deleteIndex); return mapping.findForward("basic"); } @Override protected void doProcessingAfterPost(KualiForm actionForm, HttpServletRequest request) { super.doProcessingAfterPost(actionForm, request); BookOrderForm form = (BookOrderForm) actionForm; BookOrderDocument document = form.getBookOrderDocument(); for (BookOrder entry : document.getBookOrders()) { if(entry.getBookId() != null){ Book book = KRADServiceLocator.getBusinessObjectService().findBySinglePrimaryKey(Book.class, entry.getBookId()); entry.setUnitPrice(book.getPrice()); Double totalPrice = 0.0d; if (book.getPrice() != null && entry.getQuantity() != null) { totalPrice = book.getPrice().doubleValue() * entry.getQuantity().intValue(); if (entry.getDiscount() != null && entry.getDiscount().doubleValue() > 0) { totalPrice = totalPrice - (totalPrice * entry.getDiscount().doubleValue() / 100); } } entry.setTotalPrice(new KualiDecimal(totalPrice)); entry.setBook(book); } } } }
ua-eas/ua-rice-2.1.9
sampleapp/src/main/java/edu/sampleu/bookstore/document/web/BookOrderAction.java
Java
apache-2.0
4,178
/* * Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Do not modify this file. This file is generated from the opsworks-2013-02-18.normal.json service model. */ using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Text; using System.Xml.Serialization; using Amazon.OpsWorks.Model; using Amazon.Runtime; using Amazon.Runtime.Internal; using Amazon.Runtime.Internal.Transform; using Amazon.Runtime.Internal.Util; using ThirdParty.Json.LitJson; namespace Amazon.OpsWorks.Model.Internal.MarshallTransformations { /// <summary> /// DescribeLayers Request Marshaller /// </summary> public class DescribeLayersRequestMarshaller : IMarshaller<IRequest, DescribeLayersRequest> , IMarshaller<IRequest,AmazonWebServiceRequest> { /// <summary> /// Marshaller the request object to the HTTP request. /// </summary> /// <param name="input"></param> /// <returns></returns> public IRequest Marshall(AmazonWebServiceRequest input) { return this.Marshall((DescribeLayersRequest)input); } /// <summary> /// Marshaller the request object to the HTTP request. /// </summary> /// <param name="publicRequest"></param> /// <returns></returns> public IRequest Marshall(DescribeLayersRequest publicRequest) { IRequest request = new DefaultRequest(publicRequest, "Amazon.OpsWorks"); string target = "OpsWorks_20130218.DescribeLayers"; request.Headers["X-Amz-Target"] = target; request.Headers["Content-Type"] = "application/x-amz-json-1.1"; request.HttpMethod = "POST"; string uriResourcePath = "/"; request.ResourcePath = uriResourcePath; using (StringWriter stringWriter = new StringWriter(CultureInfo.InvariantCulture)) { JsonWriter writer = new JsonWriter(stringWriter); writer.WriteObjectStart(); var context = new JsonMarshallerContext(request, writer); if(publicRequest.IsSetLayerIds()) { context.Writer.WritePropertyName("LayerIds"); context.Writer.WriteArrayStart(); foreach(var publicRequestLayerIdsListValue in publicRequest.LayerIds) { context.Writer.Write(publicRequestLayerIdsListValue); } context.Writer.WriteArrayEnd(); } if(publicRequest.IsSetStackId()) { context.Writer.WritePropertyName("StackId"); context.Writer.Write(publicRequest.StackId); } writer.WriteObjectEnd(); string snippet = stringWriter.ToString(); request.Content = System.Text.Encoding.UTF8.GetBytes(snippet); } return request; } } }
rafd123/aws-sdk-net
sdk/src/Services/OpsWorks/Generated/Model/Internal/MarshallTransformations/DescribeLayersRequestMarshaller.cs
C#
apache-2.0
3,593
<?xml version='1.0' encoding='UTF-8'?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html> <head> <title>DistributedDbscan - org.alitouka.spark.dbscan.DistributedDbscan</title> <meta name="description" content="DistributedDbscan - org.alitouka.spark.dbscan.DistributedDbscan" /> <meta name="keywords" content="DistributedDbscan org.alitouka.spark.dbscan.DistributedDbscan" /> <meta http-equiv="content-type" content="text/html; charset=UTF-8" /> <link href="../../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" /> <link href="../../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" /> <script type="text/javascript"> if(top === self) { var url = '../../../../index.html'; var hash = 'org.alitouka.spark.dbscan.DistributedDbscan'; var anchor = window.location.hash; var anchor_opt = ''; if (anchor.length >= 1) anchor_opt = '@' + anchor.substring(1); window.location.href = url + '#' + hash + anchor_opt; } </script> </head> <body class="type"> <div id="definition"> <img src="../../../../lib/class_big.png" /> <p id="owner"><a href="../../../package.html" class="extype" name="org">org</a>.<a href="../../package.html" class="extype" name="org.alitouka">alitouka</a>.<a href="../package.html" class="extype" name="org.alitouka.spark">spark</a>.<a href="package.html" class="extype" name="org.alitouka.spark.dbscan">dbscan</a></p> <h1>DistributedDbscan</h1> </div> <h4 id="signature" class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">class</span> </span> <span class="symbol"> <span class="name">DistributedDbscan</span><span class="result"> extends <a href="Dbscan.html" class="extype" name="org.alitouka.spark.dbscan.Dbscan">Dbscan</a> with <span class="extype" name="org.alitouka.spark.dbscan.spatial.DistanceCalculation">DistanceCalculation</span> with <span class="extype" name="org.apache.spark.Logging">Logging</span></span> </span> </h4> <div id="comment" class="fullcommenttop"><div class="comment cmt"><p>Implementation of the DBSCAN algorithm which is capable of parallel processing of the input data.</p><p> Parallel processing consists of 4 high-level steps:</p><ul><li>Density-based data partitioning</li><li>Calculating neighbors of each point</li><li>Clustering data in each partition separately</li><li>Merging clusters found in different partitions </li></ul></div><div class="toggleContainer block"> <span class="toggle">Linear Supertypes</span> <div class="superTypes hiddenContent"><span class="extype" name="org.alitouka.spark.dbscan.spatial.DistanceCalculation">DistanceCalculation</span>, <a href="Dbscan.html" class="extype" name="org.alitouka.spark.dbscan.Dbscan">Dbscan</a>, <span class="extype" name="org.apache.spark.Logging">Logging</span>, <span class="extype" name="scala.Serializable">Serializable</span>, <span class="extype" name="java.io.Serializable">Serializable</span>, <span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div> </div></div> <div id="mbrsel"> <div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div> <div id="order"> <span class="filtertype">Ordering</span> <ol> <li class="alpha in"><span>Alphabetic</span></li> <li class="inherit out"><span>By inheritance</span></li> </ol> </div> <div id="ancestors"> <span class="filtertype">Inherited<br /> </span> <ol id="linearization"> <li class="in" name="org.alitouka.spark.dbscan.DistributedDbscan"><span>DistributedDbscan</span></li><li class="in" name="org.alitouka.spark.dbscan.spatial.DistanceCalculation"><span>DistanceCalculation</span></li><li class="in" name="org.alitouka.spark.dbscan.Dbscan"><span>Dbscan</span></li><li class="in" name="org.apache.spark.Logging"><span>Logging</span></li><li class="in" name="scala.Serializable"><span>Serializable</span></li><li class="in" name="java.io.Serializable"><span>Serializable</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li> </ol> </div><div id="ancestors"> <span class="filtertype"></span> <ol> <li class="hideall out"><span>Hide All</span></li> <li class="showall in"><span>Show all</span></li> </ol> <a href="http://docs.scala-lang.org/overviews/scaladoc/usage.html#members" target="_blank">Learn more about member selection</a> </div> <div id="visbl"> <span class="filtertype">Visibility</span> <ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol> </div> </div> <div id="template"> <div id="allMembers"> <div id="constructors" class="members"> <h3>Instance Constructors</h3> <ol><li name="org.alitouka.spark.dbscan.DistributedDbscan#&lt;init&gt;" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="&lt;init&gt;(settings:org.alitouka.spark.dbscan.DbscanSettings,partitioningSettings:org.alitouka.spark.dbscan.spatial.rdd.PartitioningSettings):org.alitouka.spark.dbscan.DistributedDbscan"></a> <a id="&lt;init&gt;:DistributedDbscan"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">new</span> </span> <span class="symbol"> <span class="name">DistributedDbscan</span><span class="params">(<span name="settings">settings: <a href="DbscanSettings.html" class="extype" name="org.alitouka.spark.dbscan.DbscanSettings">DbscanSettings</a></span>, <span name="partitioningSettings">partitioningSettings: <a href="spatial/rdd/PartitioningSettings.html" class="extype" name="org.alitouka.spark.dbscan.spatial.rdd.PartitioningSettings">PartitioningSettings</a> = <span class="defval" name="new org.alitouka.spark.dbscan.spatial.rdd.PartitioningSettings(rdd.this.PartitioningSettings.&lt;init&gt;$default$1, rdd.this.PartitioningSettings.&lt;init&gt;$default$2, rdd.this.PartitioningSettings.&lt;init&gt;$default$3, rdd.this.PartitioningSettings.&lt;init&gt;$default$4)">...</span></span>)</span> </span> </h4> <p class="shortcomment cmt"></p><div class="fullcomment"><div class="comment cmt"></div><dl class="paramcmts block"><dt class="param">settings</dt><dd class="cmt"><p>Parameters of the algorithm. See <a href="DbscanSettings.html" class="extype" name="org.alitouka.spark.dbscan.DbscanSettings">org.alitouka.spark.dbscan.DbscanSettings</a> for details</p></dd><dt class="param">partitioningSettings</dt><dd class="cmt"><p>Partitioning settings. See <a href="spatial/rdd/PartitioningSettings.html" class="extype" name="org.alitouka.spark.dbscan.spatial.rdd.PartitioningSettings">org.alitouka.spark.dbscan.spatial.rdd.PartitioningSettings</a> for details </p></dd></dl></div> </li></ol> </div> <div id="values" class="values members"> <h3>Value Members</h3> <ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="!=(x$1:AnyRef):Boolean"></a> <a id="!=(AnyRef):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.Any#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="!=(x$1:Any):Boolean"></a> <a id="!=(Any):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div> </li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="##():Int"></a> <a id="##():Int"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="==(x$1:AnyRef):Boolean"></a> <a id="==(AnyRef):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.Any#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="==(x$1:Any):Boolean"></a> <a id="==(Any):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div> </li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="asInstanceOf[T0]:T0"></a> <a id="asInstanceOf[T0]:T0"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div> </li><li name="org.alitouka.spark.dbscan.spatial.DistanceCalculation#calculateDistance" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="calculateDistance(pt1:org.alitouka.spark.dbscan.PointCoordinates,pt2:org.alitouka.spark.dbscan.PointCoordinates)(implicitdistanceMeasure:org.apache.commons.math3.ml.distance.DistanceMeasure):Double"></a> <a id="calculateDistance(PointCoordinates,PointCoordinates)(DistanceMeasure):Double"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">calculateDistance</span><span class="params">(<span name="pt1">pt1: <a href="package.html#PointCoordinates=collection.mutable.WrappedArray.ofDouble" class="extmbr" name="org.alitouka.spark.dbscan.PointCoordinates">PointCoordinates</a></span>, <span name="pt2">pt2: <a href="package.html#PointCoordinates=collection.mutable.WrappedArray.ofDouble" class="extmbr" name="org.alitouka.spark.dbscan.PointCoordinates">PointCoordinates</a></span>)</span><span class="params">(<span class="implicit">implicit </span><span name="distanceMeasure">distanceMeasure: <span class="extype" name="org.apache.commons.math3.ml.distance.DistanceMeasure">DistanceMeasure</span></span>)</span><span class="result">: <span class="extype" name="scala.Double">Double</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>DistanceCalculation</dd></dl></div> </li><li name="org.alitouka.spark.dbscan.spatial.DistanceCalculation#calculateDistance" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="calculateDistance(pt1:org.alitouka.spark.dbscan.spatial.Point,pt2:org.alitouka.spark.dbscan.spatial.Point)(implicitdistanceMeasure:org.apache.commons.math3.ml.distance.DistanceMeasure):Double"></a> <a id="calculateDistance(Point,Point)(DistanceMeasure):Double"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">calculateDistance</span><span class="params">(<span name="pt1">pt1: <a href="spatial/Point.html" class="extype" name="org.alitouka.spark.dbscan.spatial.Point">Point</a></span>, <span name="pt2">pt2: <a href="spatial/Point.html" class="extype" name="org.alitouka.spark.dbscan.spatial.Point">Point</a></span>)</span><span class="params">(<span class="implicit">implicit </span><span name="distanceMeasure">distanceMeasure: <span class="extype" name="org.apache.commons.math3.ml.distance.DistanceMeasure">DistanceMeasure</span></span>)</span><span class="result">: <span class="extype" name="scala.Double">Double</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>DistanceCalculation</dd></dl></div> </li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="clone():Object"></a> <a id="clone():AnyRef"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.CloneNotSupportedException]">...</span> </span>)</span> </dd></dl></div> </li><li name="org.alitouka.spark.dbscan.Dbscan#distanceAnalyzer" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="distanceAnalyzer:org.alitouka.spark.dbscan.spatial.DistanceAnalyzer"></a> <a id="distanceAnalyzer:DistanceAnalyzer"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">val</span> </span> <span class="symbol"> <span class="name">distanceAnalyzer</span><span class="result">: <span class="extype" name="org.alitouka.spark.dbscan.spatial.DistanceAnalyzer">DistanceAnalyzer</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd><a href="Dbscan.html" class="extype" name="org.alitouka.spark.dbscan.Dbscan">Dbscan</a></dd></dl></div> </li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="eq(x$1:AnyRef):Boolean"></a> <a id="eq(AnyRef):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="equals(x$1:Any):Boolean"></a> <a id="equals(Any):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">equals</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="finalize():Unit"></a> <a id="finalize():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="symbol">classOf[java.lang.Throwable]</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="getClass():Class[_]"></a> <a id="getClass():Class[_]"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef#hashCode" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="hashCode():Int"></a> <a id="hashCode():Int"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">hashCode</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="isInstanceOf[T0]:Boolean"></a> <a id="isInstanceOf[T0]:Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div> </li><li name="org.alitouka.spark.dbscan.spatial.DistanceCalculation#isPointCloseToAnyBound" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="isPointCloseToAnyBound(pt:org.alitouka.spark.dbscan.spatial.Point,box:org.alitouka.spark.dbscan.spatial.Box,threshold:Double):Boolean"></a> <a id="isPointCloseToAnyBound(Point,Box,Double):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">isPointCloseToAnyBound</span><span class="params">(<span name="pt">pt: <a href="spatial/Point.html" class="extype" name="org.alitouka.spark.dbscan.spatial.Point">Point</a></span>, <span name="box">box: <span class="extype" name="org.alitouka.spark.dbscan.spatial.Box">Box</span></span>, <span name="threshold">threshold: <span class="extype" name="scala.Double">Double</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>DistanceCalculation</dd></dl></div> </li><li name="org.alitouka.spark.dbscan.spatial.DistanceCalculation#isPointCloseToBound" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="isPointCloseToBound(pt:org.alitouka.spark.dbscan.spatial.Point,bound:org.alitouka.spark.dbscan.spatial.BoundsInOneDimension,dimension:Int,threshold:Double):Boolean"></a> <a id="isPointCloseToBound(Point,BoundsInOneDimension,Int,Double):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">isPointCloseToBound</span><span class="params">(<span name="pt">pt: <a href="spatial/Point.html" class="extype" name="org.alitouka.spark.dbscan.spatial.Point">Point</a></span>, <span name="bound">bound: <span class="extype" name="org.alitouka.spark.dbscan.spatial.BoundsInOneDimension">BoundsInOneDimension</span></span>, <span name="dimension">dimension: <span class="extype" name="scala.Int">Int</span></span>, <span name="threshold">threshold: <span class="extype" name="scala.Double">Double</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>DistanceCalculation</dd></dl></div> </li><li name="org.apache.spark.Logging#isTraceEnabled" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="isTraceEnabled():Boolean"></a> <a id="isTraceEnabled():Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">isTraceEnabled</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#log" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="log:org.slf4j.Logger"></a> <a id="log:Logger"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">log</span><span class="result">: <span class="extype" name="org.slf4j.Logger">Logger</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logDebug" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logDebug(msg:&lt;?&gt;,throwable:&lt;?&gt;):Unit"></a> <a id="logDebug(⇒String,Throwable):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logDebug</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>, <span name="throwable">throwable: <span class="extype" name="scala.Throwable">Throwable</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logDebug" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logDebug(msg:&lt;?&gt;):Unit"></a> <a id="logDebug(⇒String):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logDebug</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logError" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logError(msg:&lt;?&gt;,throwable:&lt;?&gt;):Unit"></a> <a id="logError(⇒String,Throwable):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logError</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>, <span name="throwable">throwable: <span class="extype" name="scala.Throwable">Throwable</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logError" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logError(msg:&lt;?&gt;):Unit"></a> <a id="logError(⇒String):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logError</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logInfo" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logInfo(msg:=&gt;String,throwable:Throwable):Unit"></a> <a id="logInfo(⇒String,Throwable):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logInfo</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>, <span name="throwable">throwable: <span class="extype" name="scala.Throwable">Throwable</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logInfo" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logInfo(msg:=&gt;String):Unit"></a> <a id="logInfo(⇒String):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logInfo</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logTrace" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logTrace(msg:&lt;?&gt;,throwable:&lt;?&gt;):Unit"></a> <a id="logTrace(⇒String,Throwable):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logTrace</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>, <span name="throwable">throwable: <span class="extype" name="scala.Throwable">Throwable</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logTrace" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logTrace(msg:&lt;?&gt;):Unit"></a> <a id="logTrace(⇒String):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logTrace</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logWarning" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logWarning(msg:&lt;?&gt;,throwable:&lt;?&gt;):Unit"></a> <a id="logWarning(⇒String,Throwable):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logWarning</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>, <span name="throwable">throwable: <span class="extype" name="scala.Throwable">Throwable</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="org.apache.spark.Logging#logWarning" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="logWarning(msg:&lt;?&gt;):Unit"></a> <a id="logWarning(⇒String):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">logWarning</span><span class="params">(<span name="msg">msg: ⇒ <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd>Logging</dd></dl></div> </li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="ne(x$1:AnyRef):Boolean"></a> <a id="ne(AnyRef):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="notify():Unit"></a> <a id="notify():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="notifyAll():Unit"></a> <a id="notifyAll():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="org.alitouka.spark.dbscan.DistributedDbscan#run" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="run(data:org.alitouka.spark.dbscan.RawDataSet):org.alitouka.spark.dbscan.DbscanModel"></a> <a id="run(RawDataSet):DbscanModel"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">run</span><span class="params">(<span name="data">data: <a href="package.html#RawDataSet=org.apache.spark.rdd.RDD[org.alitouka.spark.dbscan.spatial.Point]" class="extmbr" name="org.alitouka.spark.dbscan.RawDataSet">RawDataSet</a></span>)</span><span class="result">: <a href="DbscanModel.html" class="extype" name="org.alitouka.spark.dbscan.DbscanModel">DbscanModel</a></span> </span> </h4> <p class="shortcomment cmt">Runs the clustering algorithm </p><div class="fullcomment"><div class="comment cmt"><p>Runs the clustering algorithm </p></div><dl class="paramcmts block"><dt class="param">data</dt><dd class="cmt"><p>A data set to be clustered. See <a href="package.html#RawDataSet=org.apache.spark.rdd.RDD[org.alitouka.spark.dbscan.spatial.Point]" class="extmbr" name="org.alitouka.spark.dbscan.RawDataSet">org.alitouka.spark.dbscan.RawDataSet</a> for details</p></dd><dt>returns</dt><dd class="cmt"><p>A <a href="DbscanModel.html" class="extype" name="org.alitouka.spark.dbscan.DbscanModel">org.alitouka.spark.dbscan.DbscanModel</a> object which represents clustering results </p></dd></dl><dl class="attributes block"> <dt>Attributes</dt><dd>protected </dd><dt>Definition Classes</dt><dd><a href="" class="extype" name="org.alitouka.spark.dbscan.DistributedDbscan">DistributedDbscan</a> → <a href="Dbscan.html" class="extype" name="org.alitouka.spark.dbscan.Dbscan">Dbscan</a></dd></dl></div> </li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="synchronized[T0](x$1:=&gt;T0):T0"></a> <a id="synchronized[T0](⇒T0):T0"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: ⇒ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#toString" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="toString():String"></a> <a id="toString():String"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">toString</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.String">String</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="wait():Unit"></a> <a id="wait():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.InterruptedException]">...</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="wait(x$1:Long,x$2:Int):Unit"></a> <a id="wait(Long,Int):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.InterruptedException]">...</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="wait(x$1:Long):Unit"></a> <a id="wait(Long):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span> </span> </h4> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.InterruptedException]">...</span> </span>)</span> </dd></dl></div> </li></ol> </div> </div> <div id="inheritedMembers"> <div class="parent" name="org.alitouka.spark.dbscan.spatial.DistanceCalculation"> <h3>Inherited from <span class="extype" name="org.alitouka.spark.dbscan.spatial.DistanceCalculation">DistanceCalculation</span></h3> </div><div class="parent" name="org.alitouka.spark.dbscan.Dbscan"> <h3>Inherited from <a href="Dbscan.html" class="extype" name="org.alitouka.spark.dbscan.Dbscan">Dbscan</a></h3> </div><div class="parent" name="org.apache.spark.Logging"> <h3>Inherited from <span class="extype" name="org.apache.spark.Logging">Logging</span></h3> </div><div class="parent" name="scala.Serializable"> <h3>Inherited from <span class="extype" name="scala.Serializable">Serializable</span></h3> </div><div class="parent" name="java.io.Serializable"> <h3>Inherited from <span class="extype" name="java.io.Serializable">Serializable</span></h3> </div><div class="parent" name="scala.AnyRef"> <h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3> </div><div class="parent" name="scala.Any"> <h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3> </div> </div> <div id="groupedMembers"> <div class="group" name="Ungrouped"> <h3>Ungrouped</h3> </div> </div> </div> <div id="tooltip"></div> <div id="footer"> </div> <script defer="defer" type="text/javascript" id="jquery-js" src="../../../../lib/jquery.js"></script><script defer="defer" type="text/javascript" id="jquery-ui-js" src="../../../../lib/jquery-ui.js"></script><script defer="defer" type="text/javascript" id="tools-tooltip-js" src="../../../../lib/tools.tooltip.js"></script><script defer="defer" type="text/javascript" id="template-js" src="../../../../lib/template.js"></script> </body> </html>
zerosign/spark_dbscan
scaladoc/org/alitouka/spark/dbscan/DistributedDbscan.html
HTML
apache-2.0
45,469
# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import timeutils from nova import context from nova import db from nova import exception from nova import objects from nova.objects import fields from nova.objects import instance from nova.objects import pci_device from nova.tests.unit.objects import test_objects from nova.tests import uuidsentinel as uuids dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 0, 'dev_type': fields.PciDeviceType.STANDARD, 'parent_addr': None, 'status': fields.PciDeviceStatus.AVAILABLE} fake_db_dev = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'parent_addr': None, 'id': 1, 'compute_node_id': 1, 'address': 'a', 'vendor_id': 'v', 'product_id': 'p', 'numa_node': 0, 'dev_type': fields.PciDeviceType.STANDARD, 'status': fields.PciDeviceStatus.AVAILABLE, 'dev_id': 'i', 'label': 'l', 'instance_uuid': None, 'extra_info': '{}', 'request_id': None, } fake_db_dev_1 = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 2, 'parent_addr': 'a', 'compute_node_id': 1, 'address': 'a1', 'vendor_id': 'v1', 'product_id': 'p1', 'numa_node': 1, 'dev_type': fields.PciDeviceType.STANDARD, 'status': fields.PciDeviceStatus.AVAILABLE, 'dev_id': 'i', 'label': 'l', 'instance_uuid': None, 'extra_info': '{}', 'request_id': None, } fake_db_dev_old = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 2, 'parent_addr': None, 'compute_node_id': 1, 'address': 'a1', 'vendor_id': 'v1', 'product_id': 'p1', 'numa_node': 1, 'dev_type': fields.PciDeviceType.SRIOV_VF, 'status': fields.PciDeviceStatus.AVAILABLE, 'dev_id': 'i', 'label': 'l', 'instance_uuid': None, 'extra_info': '{"phys_function": "blah"}', 'request_id': None, } class _TestPciDeviceObject(object): def _create_fake_instance(self): self.inst = instance.Instance() self.inst.uuid = uuids.instance self.inst.pci_devices = pci_device.PciDeviceList() def _create_fake_pci_device(self, ctxt=None): if not ctxt: ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_addr') db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a') def test_create_pci_device(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.obj_what_changed(), set(['compute_node_id', 'product_id', 'vendor_id', 'numa_node', 'status', 'address', 'extra_info', 'dev_type', 'parent_addr'])) def test_pci_device_extra_info(self): self.dev_dict = copy.copy(dev_dict) self.dev_dict['k1'] = 'v1' self.dev_dict['k2'] = 'v2' self.pci_device = pci_device.PciDevice.create(None, self.dev_dict) extra_value = self.pci_device.extra_info self.assertEqual(extra_value.get('k1'), 'v1') self.assertEqual(set(extra_value.keys()), set(('k1', 'k2'))) self.assertEqual(self.pci_device.obj_what_changed(), set(['compute_node_id', 'address', 'product_id', 'vendor_id', 'numa_node', 'status', 'extra_info', 'dev_type', 'parent_addr'])) def test_update_device(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.pci_device.obj_reset_changes() changes = {'product_id': 'p2', 'vendor_id': 'v2'} self.pci_device.update_device(changes) self.assertEqual(self.pci_device.vendor_id, 'v2') self.assertEqual(self.pci_device.obj_what_changed(), set(['vendor_id', 'product_id', 'parent_addr'])) def test_update_device_same_value(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.pci_device.obj_reset_changes() changes = {'product_id': 'p', 'vendor_id': 'v2'} self.pci_device.update_device(changes) self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.vendor_id, 'v2') self.assertEqual(self.pci_device.obj_what_changed(), set(['vendor_id', 'product_id', 'parent_addr'])) def test_get_by_dev_addr(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_addr') db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a') self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.obj_what_changed(), set()) def test_get_by_dev_id(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_id') db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1) self.assertEqual(self.pci_device.product_id, 'p') self.assertEqual(self.pci_device.obj_what_changed(), set()) def test_from_db_obj_pre_1_5_format(self): ctxt = context.get_admin_context() fake_dev_pre_1_5 = copy.deepcopy(fake_db_dev_old) fake_dev_pre_1_5['status'] = fields.PciDeviceStatus.UNAVAILABLE dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_dev_pre_1_5) self.assertRaises(exception.ObjectActionError, dev.obj_to_primitive, '1.4') def test_save_empty_parent_addr(self): ctxt = context.get_admin_context() dev = pci_device.PciDevice._from_db_object( ctxt, pci_device.PciDevice(), fake_db_dev) dev.parent_addr = None with mock.patch.object(db, 'pci_device_update', return_value=fake_db_dev): dev.save() self.assertIsNone(dev.parent_addr) self.assertEqual({}, dev.extra_info) def test_save(self): ctxt = context.get_admin_context() self._create_fake_pci_device(ctxt=ctxt) return_dev = dict(fake_db_dev, status=fields.PciDeviceStatus.AVAILABLE, instance_uuid=uuids.instance3) self.pci_device.status = fields.PciDeviceStatus.ALLOCATED self.pci_device.instance_uuid = uuids.instance2 expected_updates = dict(status=fields.PciDeviceStatus.ALLOCATED, extra_info='{}', instance_uuid=uuids.instance2) self.mox.StubOutWithMock(db, 'pci_device_update') db.pci_device_update(ctxt, 1, 'a', expected_updates).AndReturn(return_dev) self.mox.ReplayAll() self.pci_device.save() self.assertEqual(self.pci_device.status, fields.PciDeviceStatus.AVAILABLE) self.assertEqual(self.pci_device.instance_uuid, uuids.instance3) def test_save_no_extra_info(self): return_dev = dict(fake_db_dev, status=fields.PciDeviceStatus.AVAILABLE, instance_uuid=uuids.instance3) def _fake_update(ctxt, node_id, addr, updates): self.extra_info = updates.get('extra_info') return return_dev ctxt = context.get_admin_context() self.stub_out('nova.db.pci_device_update', _fake_update) self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.pci_device._context = ctxt self.pci_device.save() self.assertEqual(self.extra_info, '{}') def test_save_removed(self): ctxt = context.get_admin_context() self._create_fake_pci_device(ctxt=ctxt) self.pci_device.status = fields.PciDeviceStatus.REMOVED self.mox.StubOutWithMock(db, 'pci_device_destroy') db.pci_device_destroy(ctxt, 1, 'a') self.mox.ReplayAll() self.pci_device.save() self.assertEqual(self.pci_device.status, fields.PciDeviceStatus.DELETED) def test_save_deleted(self): def _fake_destroy(ctxt, node_id, addr): self.called = True def _fake_update(ctxt, node_id, addr, updates): self.called = True self.stub_out('nova.db.pci_device_destroy', _fake_destroy) self.stub_out('nova.db.pci_device_update', _fake_update) self._create_fake_pci_device() self.pci_device.status = fields.PciDeviceStatus.DELETED self.called = False self.pci_device.save() self.assertFalse(self.called) def test_update_numa_node(self): self.pci_device = pci_device.PciDevice.create(None, dev_dict) self.assertEqual(0, self.pci_device.numa_node) self.dev_dict = copy.copy(dev_dict) self.dev_dict['numa_node'] = '1' self.pci_device = pci_device.PciDevice.create(None, self.dev_dict) self.assertEqual(1, self.pci_device.numa_node) def test_pci_device_equivalent(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) self.assertEqual(pci_device1, pci_device2) def test_pci_device_equivalent_with_ignore_field(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) pci_device2.updated_at = timeutils.utcnow() self.assertEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent1(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) dev_dict2 = copy.copy(dev_dict) dev_dict2['address'] = 'b' pci_device2 = pci_device.PciDevice.create(None, dev_dict2) self.assertNotEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent2(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) delattr(pci_device2, 'address') self.assertNotEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent_with_none(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) pci_device2 = pci_device.PciDevice.create(None, dev_dict) pci_device1.instance_uuid = 'aaa' pci_device2.instance_uuid = None self.assertNotEqual(pci_device1, pci_device2) def test_pci_device_not_equivalent_with_not_pci_device(self): pci_device1 = pci_device.PciDevice.create(None, dev_dict) self.assertNotEqual(pci_device1, None) self.assertNotEqual(pci_device1, 'foo') self.assertNotEqual(pci_device1, 1) self.assertNotEqual(pci_device1, objects.PciDeviceList()) def test_claim_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst.uuid) self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 0) def test_claim_device_fail(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.status = fields.PciDeviceStatus.ALLOCATED self.assertRaises(exception.PciDeviceInvalidStatus, devobj.claim, self.inst) def test_allocate_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst.uuid) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) self.assertEqual(devobj.instance_uuid, uuids.instance) self.assertEqual(len(self.inst.pci_devices), 1) self.assertEqual(self.inst.pci_devices[0].vendor_id, 'v') self.assertEqual(self.inst.pci_devices[0].status, fields.PciDeviceStatus.ALLOCATED) def test_allocate_device_fail_status(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.status = 'removed' self.assertRaises(exception.PciDeviceInvalidStatus, devobj.allocate, self.inst) def test_allocate_device_fail_owner(self): self._create_fake_instance() inst_2 = instance.Instance() inst_2.uuid = uuids.instance_2 devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst.uuid) self.assertRaises(exception.PciDeviceInvalidOwner, devobj.allocate, inst_2) def test_free_claimed_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst.uuid) devobj.free(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE) self.assertIsNone(devobj.instance_uuid) def test_free_allocated_device(self): self._create_fake_instance() ctx = context.get_admin_context() devobj = pci_device.PciDevice._from_db_object( ctx, pci_device.PciDevice(), fake_db_dev) devobj.claim(self.inst.uuid) devobj.allocate(self.inst) self.assertEqual(len(self.inst.pci_devices), 1) devobj.free(self.inst) self.assertEqual(len(self.inst.pci_devices), 0) self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE) self.assertIsNone(devobj.instance_uuid) def test_free_device_fail(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.status = fields.PciDeviceStatus.REMOVED self.assertRaises(exception.PciDeviceInvalidStatus, devobj.free) def test_remove_device(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.remove() self.assertEqual(devobj.status, fields.PciDeviceStatus.REMOVED) self.assertIsNone(devobj.instance_uuid) def test_remove_device_fail(self): self._create_fake_instance() devobj = pci_device.PciDevice.create(None, dev_dict) devobj.claim(self.inst.uuid) self.assertRaises(exception.PciDeviceInvalidStatus, devobj.remove) class TestPciDeviceObject(test_objects._LocalTest, _TestPciDeviceObject): pass class TestPciDeviceObjectRemote(test_objects._RemoteTest, _TestPciDeviceObject): pass fake_pci_devs = [fake_db_dev, fake_db_dev_1] class _TestPciDeviceListObject(object): def test_create_pci_device_list(self): ctxt = context.get_admin_context() devobj = pci_device.PciDevice.create(ctxt, dev_dict) pci_device_list = objects.PciDeviceList( context=ctxt, objects=[devobj]) self.assertEqual(1, len(pci_device_list)) self.assertIsInstance(pci_device_list[0], pci_device.PciDevice) def test_get_by_compute_node(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node') db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs) self.mox.ReplayAll() devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1) for i in range(len(fake_pci_devs)): self.assertIsInstance(devs[i], pci_device.PciDevice) self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id) def test_get_by_instance_uuid(self): ctxt = context.get_admin_context() fake_db_1 = dict(fake_db_dev, address='a1', status=fields.PciDeviceStatus.ALLOCATED, instance_uuid='1') fake_db_2 = dict(fake_db_dev, address='a2', status=fields.PciDeviceStatus.ALLOCATED, instance_uuid='1') self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid') db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn( [fake_db_1, fake_db_2]) self.mox.ReplayAll() devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1') self.assertEqual(len(devs), 2) for i in range(len(fake_pci_devs)): self.assertIsInstance(devs[i], pci_device.PciDevice) self.assertEqual(devs[0].vendor_id, 'v') self.assertEqual(devs[1].vendor_id, 'v') class TestPciDeviceListObject(test_objects._LocalTest, _TestPciDeviceListObject): pass class TestPciDeviceListObjectRemote(test_objects._RemoteTest, _TestPciDeviceListObject): pass class _TestSRIOVPciDeviceObject(object): def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528, num_pfs=2, num_vfs=8): self.sriov_pf_devices = [] for dev in range(num_pfs): pci_dev = {'compute_node_id': 1, 'address': '0000:81:00.%d' % dev, 'vendor_id': '8086', 'product_id': '%d' % pf_product_id, 'status': 'available', 'request_id': None, 'dev_type': fields.PciDeviceType.SRIOV_PF, 'parent_addr': None, 'numa_node': 0} pci_dev_obj = objects.PciDevice.create(None, pci_dev) pci_dev_obj.id = dev + 81 pci_dev_obj.child_devices = [] self.sriov_pf_devices.append(pci_dev_obj) self.sriov_vf_devices = [] for dev in range(num_vfs): pci_dev = {'compute_node_id': 1, 'address': '0000:81:10.%d' % dev, 'vendor_id': '8086', 'product_id': '%d' % vf_product_id, 'status': 'available', 'request_id': None, 'dev_type': fields.PciDeviceType.SRIOV_VF, 'parent_addr': '0000:81:00.%d' % int(dev / 4), 'numa_node': 0} pci_dev_obj = objects.PciDevice.create(None, pci_dev) pci_dev_obj.id = dev + 1 pci_dev_obj.parent_device = self.sriov_pf_devices[int(dev / 4)] pci_dev_obj.parent_device.child_devices.append(pci_dev_obj) self.sriov_vf_devices.append(pci_dev_obj) def _create_fake_instance(self): self.inst = instance.Instance() self.inst.uuid = uuids.instance self.inst.pci_devices = pci_device.PciDeviceList() def _create_fake_pci_device(self, ctxt=None): if not ctxt: ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'pci_device_get_by_addr') db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev) self.mox.ReplayAll() self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a') def _get_children_by_parent_address(self, addr): vf_devs = [] for dev in self.sriov_vf_devices: if dev.parent_addr == addr: vf_devs.append(dev) return vf_devs def _get_parent_by_address(self, addr): for dev in self.sriov_pf_devices: if dev.address == addr: return dev def test_claim_PF(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_pf_devices[0] devobj.claim(self.inst.uuid) self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 0) # check if the all the dependants are UNCLAIMABLE self.assertTrue(all( [dev.status == fields.PciDeviceStatus.UNCLAIMABLE for dev in self._get_children_by_parent_address( self.sriov_pf_devices[0].address)])) def test_claim_VF(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_vf_devices[0] devobj.claim(self.inst.uuid) self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 0) # check if parent device status has been changed to UNCLAIMABLE parent = self._get_parent_by_address(devobj.parent_addr) self.assertEqual(fields.PciDeviceStatus.UNCLAIMABLE, parent.status) def test_allocate_PF(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_pf_devices[0] devobj.claim(self.inst.uuid) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 1) # check if the all the dependants are UNAVAILABLE self.assertTrue(all( [dev.status == fields.PciDeviceStatus.UNAVAILABLE for dev in self._get_children_by_parent_address( self.sriov_pf_devices[0].address)])) def test_allocate_VF(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_vf_devices[0] devobj.claim(self.inst.uuid) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) self.assertEqual(devobj.instance_uuid, self.inst.uuid) self.assertEqual(len(self.inst.pci_devices), 1) # check if parent device status has been changed to UNAVAILABLE parent = self._get_parent_by_address(devobj.parent_addr) self.assertTrue(fields.PciDeviceStatus.UNAVAILABLE, parent.status) def test_claim_PF_fail(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_pf_devices[0] self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDeviceVFInvalidStatus, devobj.claim, self.inst) def test_claim_VF_fail(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_vf_devices[0] parent = self._get_parent_by_address(devobj.parent_addr) parent.status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDevicePFInvalidStatus, devobj.claim, self.inst) def test_allocate_PF_fail(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_pf_devices[0] self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDeviceVFInvalidStatus, devobj.allocate, self.inst) def test_allocate_VF_fail(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_vf_devices[0] parent = self._get_parent_by_address(devobj.parent_addr) parent.status = fields.PciDeviceStatus.CLAIMED self.assertRaises(exception.PciDevicePFInvalidStatus, devobj.allocate, self.inst) def test_free_allocated_PF(self): self._create_fake_instance() self._create_pci_devices() devobj = self.sriov_pf_devices[0] devobj.claim(self.inst.uuid) devobj.allocate(self.inst) devobj.free(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE) self.assertIsNone(devobj.instance_uuid) # check if the all the dependants are AVAILABLE self.assertTrue(all( [dev.status == fields.PciDeviceStatus.AVAILABLE for dev in self._get_children_by_parent_address( self.sriov_pf_devices[0].address)])) def test_free_allocated_VF(self): self._create_fake_instance() self._create_pci_devices() vf = self.sriov_vf_devices[0] dependents = self._get_children_by_parent_address(vf.parent_addr) for devobj in dependents: devobj.claim(self.inst.uuid) devobj.allocate(self.inst) self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED) for devobj in dependents[:-1]: devobj.free(self.inst) # check if parent device status is still UNAVAILABLE parent = self._get_parent_by_address(devobj.parent_addr) self.assertEqual(fields.PciDeviceStatus.UNAVAILABLE, parent.status) devobj = dependents[-1] devobj.free(self.inst) # check if parent device status is now AVAILABLE parent = self._get_parent_by_address(devobj.parent_addr) self.assertEqual(fields.PciDeviceStatus.AVAILABLE, parent.status) class TestSRIOVPciDeviceListObject(test_objects._LocalTest, _TestSRIOVPciDeviceObject): pass class TestSRIOVPciDeviceListObjectRemote(test_objects._RemoteTest, _TestSRIOVPciDeviceObject): pass
cloudbase/nova
nova/tests/unit/objects/test_pci_device.py
Python
apache-2.0
26,912
/* * Copyright 2001-2009 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.fixture import org.scalatest._ import events.TestFailed import org.scalatest.exceptions.DuplicateTestNameException import org.scalatest.exceptions.TestFailedException import org.scalatest.exceptions.TestRegistrationClosedException class PropSpecSpec extends org.scalatest.FunSpec with PrivateMethodTester with SharedHelpers { describe("A fixture.PropSpec") { it("should return the test names in order of registration from testNames") { val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("that") { fixture => } property("this") { fixture => } } expectResult(List("that", "this")) { a.testNames.iterator.toList } val b = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} } expectResult(List[String]()) { b.testNames.iterator.toList } val c = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("this") { fixture => } property("that") { fixture => } } expectResult(List("this", "that")) { c.testNames.iterator.toList } } it("should throw NotAllowedException if a duplicate test name registration is attempted") { intercept[DuplicateTestNameException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("test this") { fixture => } property("test this") { fixture => } } } intercept[DuplicateTestNameException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("test this") { fixture => } ignore("test this") { fixture => } } } intercept[DuplicateTestNameException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} ignore("test this") { fixture => } ignore("test this") { fixture => } } } intercept[DuplicateTestNameException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} ignore("test this") { fixture => } property("test this") { fixture => } } } } it("should pass in the fixture to every test method") { val a = new PropSpec { type FixtureParam = String val hello = "Hello, world!" def withFixture(test: OneArgTest) { test(hello) } property("this") { fixture => assert(fixture === hello) } property("that") { fixture => assert(fixture === hello) } } val rep = new EventRecordingReporter a.run(None, Args(rep)) assert(!rep.eventsReceived.exists(_.isInstanceOf[TestFailed])) } it("should throw NullPointerException if a null test tag is provided") { // test intercept[NullPointerException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("hi", null) { fixture => } } } val caught = intercept[NullPointerException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("hi", mytags.SlowAsMolasses, null) { fixture => } } } assert(caught.getMessage === "a test tag was null") intercept[NullPointerException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} property("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) { fixture => } } } // ignore intercept[NullPointerException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} ignore("hi", null) { fixture => } } } val caught2 = intercept[NullPointerException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} ignore("hi", mytags.SlowAsMolasses, null) { fixture => } } } assert(caught2.getMessage === "a test tag was null") intercept[NullPointerException] { new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) {} ignore("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) { fixture => } } } } class TestWasCalledSuite extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false property("this") { fixture => theTestThisCalled = true } property("that") { fixture => theTestThatCalled = true } } it("should execute all tests when run is called with testName None") { val b = new TestWasCalledSuite b.run(None, Args(SilentReporter)) assert(b.theTestThisCalled) assert(b.theTestThatCalled) } it("should execute one test when run is called with a defined testName") { val a = new TestWasCalledSuite a.run(Some("this"), Args(SilentReporter)) assert(a.theTestThisCalled) assert(!a.theTestThatCalled) } it("should report as ignored, and not run, tests marked ignored") { val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false property("test this") { fixture => theTestThisCalled = true } property("test that") { fixture => theTestThatCalled = true } } val repA = new TestIgnoredTrackingReporter a.run(None, Args(repA)) assert(!repA.testIgnoredReceived) assert(a.theTestThisCalled) assert(a.theTestThatCalled) val b = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false ignore("test this") { fixture => theTestThisCalled = true } property("test that") { fixture => theTestThatCalled = true } } val repB = new TestIgnoredTrackingReporter b.run(None, Args(repB)) assert(repB.testIgnoredReceived) assert(repB.lastEvent.isDefined) assert(repB.lastEvent.get.testName endsWith "test this") assert(!b.theTestThisCalled) assert(b.theTestThatCalled) val c = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false property("test this") { fixture => theTestThisCalled = true } ignore("test that") { fixture => theTestThatCalled = true } } val repC = new TestIgnoredTrackingReporter c.run(None, Args(repC)) assert(repC.testIgnoredReceived) assert(repC.lastEvent.isDefined) assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName) assert(c.theTestThisCalled) assert(!c.theTestThatCalled) // The order I want is order of appearance in the file. // Will try and implement that tomorrow. Subtypes will be able to change the order. val d = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false ignore("test this") { fixture => theTestThisCalled = true } ignore("test that") { fixture => theTestThatCalled = true } } val repD = new TestIgnoredTrackingReporter d.run(None, Args(repD)) assert(repD.testIgnoredReceived) assert(repD.lastEvent.isDefined) assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance assert(!d.theTestThisCalled) assert(!d.theTestThatCalled) } it("should ignore a test marked as ignored if run is invoked with that testName") { // If I provide a specific testName to run, then it should ignore an Ignore on that test // method and actually invoke it. val e = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false ignore("test this") { fixture => theTestThisCalled = true } property("test that") { fixture => theTestThatCalled = true } } val repE = new TestIgnoredTrackingReporter e.run(Some("test this"), Args(repE)) assert(repE.testIgnoredReceived) assert(!e.theTestThisCalled) assert(!e.theTestThatCalled) } it("should run only those tests selected by the tags to include and exclude sets") { // Nothing is excluded val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false property("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true } property("test that") { fixture => theTestThatCalled = true } } val repA = new TestIgnoredTrackingReporter a.run(None, Args(repA)) assert(!repA.testIgnoredReceived) assert(a.theTestThisCalled) assert(a.theTestThatCalled) // SlowAsMolasses is included, one test should be excluded val b = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false property("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true } property("test that") { fixture => theTestThatCalled = true } } val repB = new TestIgnoredTrackingReporter b.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker, Set.empty)) assert(!repB.testIgnoredReceived) assert(b.theTestThisCalled) assert(!b.theTestThatCalled) // SlowAsMolasses is included, and both tests should be included val c = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false property("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } } val repC = new TestIgnoredTrackingReporter c.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker, Set.empty)) assert(!repC.testIgnoredReceived) assert(c.theTestThisCalled) assert(c.theTestThatCalled) // SlowAsMolasses is included. both tests should be included but one ignored val d = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false ignore("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } } val repD = new TestIgnoredTrackingReporter d.run(None, Args(repD, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), Map(), None, new Tracker, Set.empty)) assert(repD.testIgnoredReceived) assert(!d.theTestThisCalled) assert(d.theTestThatCalled) // SlowAsMolasses included, FastAsLight excluded val e = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } property("test the other") { fixture => theTestTheOtherCalled = true } } val repE = new TestIgnoredTrackingReporter e.run(None, Args(repE, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")), Map(), None, new Tracker, Set.empty)) assert(!repE.testIgnoredReceived) assert(!e.theTestThisCalled) assert(e.theTestThatCalled) assert(!e.theTestTheOtherCalled) // An Ignored test that was both included and excluded should not generate a TestIgnored event val f = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } property("test the other") { fixture => theTestTheOtherCalled = true } } val repF = new TestIgnoredTrackingReporter f.run(None, Args(repF, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")), Map(), None, new Tracker, Set.empty)) assert(!repF.testIgnoredReceived) assert(!f.theTestThisCalled) assert(f.theTestThatCalled) assert(!f.theTestTheOtherCalled) // An Ignored test that was not included should not generate a TestIgnored event val g = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } ignore("test the other") { fixture => theTestTheOtherCalled = true } } val repG = new TestIgnoredTrackingReporter g.run(None, Args(repG, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")), Map(), None, new Tracker, Set.empty)) assert(!repG.testIgnoredReceived) assert(!g.theTestThisCalled) assert(g.theTestThatCalled) assert(!g.theTestTheOtherCalled) // No tagsToInclude set, FastAsLight excluded val h = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } property("test the other") { fixture => theTestTheOtherCalled = true } } val repH = new TestIgnoredTrackingReporter h.run(None, Args(repH, Stopper.default, Filter(None, Set("org.scalatest.FastAsLight")), Map(), None, new Tracker, Set.empty)) assert(!repH.testIgnoredReceived) assert(!h.theTestThisCalled) assert(h.theTestThatCalled) assert(h.theTestTheOtherCalled) // No tagsToInclude set, SlowAsMolasses excluded val i = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } property("test the other") { fixture => theTestTheOtherCalled = true } } val repI = new TestIgnoredTrackingReporter i.run(None, Args(repI, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker, Set.empty)) assert(!repI.testIgnoredReceived) assert(!i.theTestThisCalled) assert(!i.theTestThatCalled) assert(i.theTestTheOtherCalled) // No tagsToInclude set, SlowAsMolasses excluded, TestIgnored should not be received on excluded ones val j = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } ignore("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } property("test the other") { fixture => theTestTheOtherCalled = true } } val repJ = new TestIgnoredTrackingReporter j.run(None, Args(repJ, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker, Set.empty)) assert(!repI.testIgnoredReceived) assert(!j.theTestThisCalled) assert(!j.theTestThatCalled) assert(j.theTestTheOtherCalled) // Same as previous, except Ignore specifically mentioned in excludes set val k = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false var theTestTheOtherCalled = false ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true } ignore("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true } ignore("test the other") { fixture => theTestTheOtherCalled = true } } val repK = new TestIgnoredTrackingReporter k.run(None, Args(repK, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), Map(), None, new Tracker, Set.empty)) assert(repK.testIgnoredReceived) assert(!k.theTestThisCalled) assert(!k.theTestThatCalled) assert(!k.theTestTheOtherCalled) } it("should return the correct test count from its expectedTestCount method") { val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("test this") { fixture => } property("test that") { fixture => } } assert(a.expectedTestCount(Filter()) === 2) val b = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } ignore("test this") { fixture => } property("test that") { fixture => } } assert(b.expectedTestCount(Filter()) === 1) val c = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("test this", mytags.FastAsLight) { fixture => } property("test that") { fixture => } } assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1) assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1) val d = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("test this", mytags.FastAsLight, mytags.SlowAsMolasses) { fixture => } property("test that", mytags.SlowAsMolasses) { fixture => } property("test the other thing") { fixture => } } assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1) assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1) assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1) assert(d.expectedTestCount(Filter()) === 3) val e = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("test this", mytags.FastAsLight, mytags.SlowAsMolasses) { fixture => } property("test that", mytags.SlowAsMolasses) { fixture => } ignore("test the other thing") { fixture => } } assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1) assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1) assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0) assert(e.expectedTestCount(Filter()) === 2) val f = new Suites(a, b, c, d, e) assert(f.expectedTestCount(Filter()) === 10) } it("should generate a TestPending message when the test body is (pending)") { val a = new PropSpec { type FixtureParam = String val hello = "Hello, world!" def withFixture(test: OneArgTest) { test(hello) } property("should do this") (pending) property("should do that") { fixture => assert(fixture === hello) } property("should do something else") { fixture => assert(fixture === hello) pending } } val rep = new EventRecordingReporter a.run(None, Args(rep)) val tp = rep.testPendingEventsReceived assert(tp.size === 2) } it("should allow tests without fixtures to be combined with tests with fixtures") { val a = new PropSpec { var theTestWithFixtureWasRun = false var theTestWithoutFixtureWasRun = false type FixtureParam = String val hello = "Hello, world!" def withFixture(test: OneArgTest) { test(hello) } property("should do this") (pending) property("should do that") { fixture => assert(fixture === hello) theTestWithFixtureWasRun = true } property("should do something else") { fixture => assert(fixture === hello) pending } property("should do that without a fixture") { () => assert(2 + 2 === 4) theTestWithoutFixtureWasRun = true } } val rep = new EventRecordingReporter a.run(None, Args(rep)) val tp = rep.testPendingEventsReceived assert(tp.size === 2) assert(a.theTestWithFixtureWasRun) assert(a.theTestWithoutFixtureWasRun) } it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " + "known in JDK 1.5, excluding AssertionError") { val a = new PropSpec { type FixtureParam = String val hello = "Hello, world!" def withFixture(test: OneArgTest) { test(hello) } property("throws AssertionError") { s => throw new AssertionError } property("throws plain old Error") { s => throw new Error } property("throws Throwable") { s => throw new Throwable } } val rep = new EventRecordingReporter a.run(None, Args(rep)) val tf = rep.testFailedEventsReceived assert(tf.size === 3) } it("should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " + "AssertionError, causing Suites and Runs to abort.") { val a = new PropSpec { type FixtureParam = String val hello = "Hello, world!" def withFixture(test: OneArgTest) { test(hello) } property("throws AssertionError") { s => throw new OutOfMemoryError } } intercept[OutOfMemoryError] { a.run(None, Args(SilentReporter)) } } it("should allow both tests that take fixtures and tests that don't") { val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("Hello, world!") } var takesNoArgsInvoked = false property("take no args") { () => takesNoArgsInvoked = true } var takesAFixtureInvoked = false property("takes a fixture") { s => takesAFixtureInvoked = true } } a.run(None, Args(SilentReporter)) assert(a.testNames.size === 2, a.testNames) assert(a.takesNoArgsInvoked) assert(a.takesAFixtureInvoked) } it("should work with test functions whose inferred result type is not Unit") { val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("Hello, world!") } var takesNoArgsInvoked = false property("take no args") { () => takesNoArgsInvoked = true; true } var takesAFixtureInvoked = false property("takes a fixture") { s => takesAFixtureInvoked = true; true } } assert(!a.takesNoArgsInvoked) assert(!a.takesAFixtureInvoked) a.run(None, Args(SilentReporter)) assert(a.testNames.size === 2, a.testNames) assert(a.takesNoArgsInvoked) assert(a.takesAFixtureInvoked) } it("should work with ignored tests whose inferred result type is not Unit") { val a = new PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } var theTestThisCalled = false var theTestThatCalled = false ignore("test this") { () => theTestThisCalled = true; "hi" } ignore("test that") { fixture => theTestThatCalled = true; 42 } } assert(!a.theTestThisCalled) assert(!a.theTestThatCalled) val reporter = new EventRecordingReporter a.run(None, Args(reporter)) assert(reporter.testIgnoredEventsReceived.size === 2) assert(!a.theTestThisCalled) assert(!a.theTestThatCalled) } it("should pass a NoArgTest to withFixture for tests that take no fixture") { class MySuite extends PropSpec { type FixtureParam = String var aNoArgTestWasPassed = false var aOneArgTestWasPassed = false override def withFixture(test: NoArgTest) { aNoArgTestWasPassed = true } def withFixture(test: OneArgTest) { aOneArgTestWasPassed = true } property("something") { () => assert(1 + 1 === 2) } } val s = new MySuite s.run(None, Args(SilentReporter)) assert(s.aNoArgTestWasPassed) assert(!s.aOneArgTestWasPassed) } it("should not pass a NoArgTest to withFixture for tests that take a Fixture") { class MySuite extends PropSpec { type FixtureParam = String var aNoArgTestWasPassed = false var aOneArgTestWasPassed = false override def withFixture(test: NoArgTest) { aNoArgTestWasPassed = true } def withFixture(test: OneArgTest) { aOneArgTestWasPassed = true } property("something") { fixture => assert(1 + 1 === 2) } } val s = new MySuite s.run(None, Args(SilentReporter)) assert(!s.aNoArgTestWasPassed) assert(s.aOneArgTestWasPassed) } it("should pass a NoArgTest that invokes the no-arg test when the " + "NoArgTest's no-arg apply method is invoked") { class MySuite extends PropSpec { type FixtureParam = String var theNoArgTestWasInvoked = false def withFixture(test: OneArgTest) { // Shouldn't be called, but just in case don't invoke a OneArgTest } property("something") { () => theNoArgTestWasInvoked = true } } val s = new MySuite s.run(None, Args(SilentReporter)) assert(s.theNoArgTestWasInvoked) } it("should pass the correct test name in the OneArgTest passed to withFixture") { val a = new PropSpec { type FixtureParam = String var correctTestNameWasPassed = false def withFixture(test: OneArgTest) { correctTestNameWasPassed = test.name == "something" test("hi") } property("something") { fixture => } } a.run(None, Args(SilentReporter)) assert(a.correctTestNameWasPassed) } it("should pass the correct config map in the OneArgTest passed to withFixture") { val a = new PropSpec { type FixtureParam = String var correctConfigMapWasPassed = false def withFixture(test: OneArgTest) { correctConfigMapWasPassed = (test.configMap == Map("hi" -> 7)) test("hi") } property("something") { fixture => } } a.run(None, Args(SilentReporter, Stopper.default, Filter(), Map("hi" -> 7), None, new Tracker(), Set.empty)) assert(a.correctConfigMapWasPassed) } describe("(when a nesting rule has been violated)") { it("should, if they call a nested it from within an it clause, result in a TestFailedException when running the test") { class MySuite extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("should blow up") { fixture => property("should never run") { fixture => assert(1 === 1) } } } val spec = new MySuite ensureTestFailedEventReceived(spec, "should blow up") } it("should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test") { class MySuite extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("should blow up") { fixture => property("should never run", mytags.SlowAsMolasses) { fixture => assert(1 === 1) } } } val spec = new MySuite ensureTestFailedEventReceived(spec, "should blow up") } it("should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test") { class MySuite extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("should blow up") { fixture => ignore("should never run") { fixture => assert(1 === 1) } } } val spec = new MySuite ensureTestFailedEventReceived(spec, "should blow up") } it("should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test") { class MySuite extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("should blow up") { fixture => ignore("should never run", mytags.SlowAsMolasses) { fixture => assert(1 === 1) } } } val spec = new MySuite ensureTestFailedEventReceived(spec, "should blow up") } } it("should throw IllegalArgumentException if passed a testName that doesn't exist") { class MySuite extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("one") {s => () } property("two") {s => () } } val suite = new MySuite intercept[IllegalArgumentException] { suite.run(Some("three"), Args(SilentReporter)) } } } describe("when failure happens") { it("should fire TestFailed event with correct stack depth info when test failed") { class TestSpec extends PropSpec { type FixtureParam = String def withFixture(test: OneArgTest) { test("hi") } property("fail scenario") { fixture => assert(1 === 2) } } val rep = new EventRecordingReporter val s1 = new TestSpec s1.run(None, Args(rep)) assert(rep.testFailedEventsReceived.size === 1) assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get === "PropSpecSpec.scala") assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get === thisLineNumber - 8) } it("should generate TestRegistrationClosedException with correct stack depth info when has a property nested inside a property") { class TestSpec extends PropSpec { var registrationClosedThrown = false type FixtureParam = String property("a scenario") { fixture => property("nested scenario") { fixture => assert(1 === 2) } } override def withFixture(test: OneArgTest) { try { test.apply("hi") } catch { case e: TestRegistrationClosedException => registrationClosedThrown = true throw e } } } val rep = new EventRecordingReporter val s = new TestSpec s.run(None, Args(rep)) assert(s.registrationClosedThrown == true) val testFailedEvents = rep.testFailedEventsReceived assert(testFailedEvents.size === 1) assert(testFailedEvents(0).throwable.get.getClass() === classOf[TestRegistrationClosedException]) val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException] assert("PropSpecSpec.scala" === trce.failedCodeFileName.get) assert(trce.failedCodeLineNumber.get === thisLineNumber - 24) } } }
vivosys/scalatest
src/test/scala/org/scalatest/fixture/PropSpecSpec.scala
Scala
apache-2.0
34,782
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.soteradefense.dga.graphx.wcc import com.esotericsoftware.kryo.Kryo import com.esotericsoftware.kryo.io.{Output, Input} import org.apache.spark.graphx.Graph import scala.reflect.ClassTag class WCCTestRunner extends AbstractWCCRunner { override type S = Graph[Long, Long] override def save[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]): S = graph.asInstanceOf[Graph[Long, Long]] override def write(p1: Kryo, p2: Output): Unit = { // Nothing needs written. } override def read(p1: Kryo, p2: Input): Unit = { // Nothing needs read. } }
atomicjets/distributed-graph-analytics
dga-graphx/src/test/scala/com/soteradefense/dga/graphx/wcc/WCCTestRunner.scala
Scala
apache-2.0
1,396
#ifndef LCB_AUTH_PRIV_H #define LCB_AUTH_PRIV_H #include <libcouchbase/auth.h> #ifdef __cplusplus #include <string> #include <map> namespace lcb { class Authenticator { public: typedef std::map<std::string,std::string> Map; // Gets the "global" username const std::string& username() const { return m_username; } // Gets the "global" password const std::string& password() const { return m_password; } // Get the username and password for a specific bucket const std::string& username_for(const char *bucket) const; const std::string& password_for(const char *bucket) const; const Map& buckets() const { return m_buckets; } Authenticator() : m_refcount(1), m_mode(LCBAUTH_MODE_CLASSIC) {} Authenticator(const Authenticator&); size_t refcount() const { return m_refcount; } void incref() { ++m_refcount; } void decref() { if (!--m_refcount) { delete this; } } lcb_error_t set_mode(lcbauth_MODE mode_) { if (m_buckets.size() || m_username.size() || m_password.size()) { return LCB_ERROR; } else { m_mode = mode_; return LCB_SUCCESS; } } lcbauth_MODE mode() const { return m_mode; } lcb_error_t add(const char *user, const char *pass, int flags); lcb_error_t add(const std::string& user, const std::string& pass, int flags) { return add(user.c_str(), pass.c_str(), flags); } private: Map m_buckets; std::string m_username; std::string m_password; size_t m_refcount; lcbauth_MODE m_mode; }; } #endif #endif /* LCB_AUTH_H */
mody/libcouchbase
src/auth-priv.h
C
apache-2.0
1,595