repo_name
stringlengths
4
116
path
stringlengths
3
942
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
ThiagoGarciaAlves/drools
drools-workbench-models/drools-workbench-models-guided-dtree/src/main/java/org/drools/workbench/models/guided/dtree/shared/model/nodes/TypeNode.java
791
/* * Copyright 2014 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.workbench.models.guided.dtree.shared.model.nodes; public interface TypeNode extends BoundNode { String getClassName(); void setClassName( final String className ); }
apache-2.0
likelyzhao/mxnet
cmake/Utils.cmake
14281
# For cmake_parse_arguments include(CMakeParseArguments) ################################################################################################ # Command alias for debugging messages # Usage: # dmsg(<message>) function(dmsg) message(STATUS ${ARGN}) endfunction() ################################################################################################ # Removes duplicates from list(s) # Usage: # mxnet_list_unique(<list_variable> [<list_variable>] [...]) macro(mxnet_list_unique) foreach(__lst ${ARGN}) if(${__lst}) list(REMOVE_DUPLICATES ${__lst}) endif() endforeach() endmacro() ################################################################################################ # Clears variables from list # Usage: # mxnet_clear_vars(<variables_list>) macro(mxnet_clear_vars) foreach(_var ${ARGN}) unset(${_var}) endforeach() endmacro() ################################################################################################ # Removes duplicates from string # Usage: # mxnet_string_unique(<string_variable>) function(mxnet_string_unique __string) if(${__string}) set(__list ${${__string}}) separate_arguments(__list) list(REMOVE_DUPLICATES __list) foreach(__e ${__list}) set(__str "${__str} ${__e}") endforeach() set(${__string} ${__str} PARENT_SCOPE) endif() endfunction() ################################################################################################ # Prints list element per line # Usage: # mxnet_print_list(<list>) function(mxnet_print_list) foreach(e ${ARGN}) message(STATUS ${e}) endforeach() endfunction() ################################################################################################ # Function merging lists of compiler flags to single string. # Usage: # mxnet_merge_flag_lists(out_variable <list1> [<list2>] [<list3>] ...) function(mxnet_merge_flag_lists out_var) set(__result "") foreach(__list ${ARGN}) foreach(__flag ${${__list}}) string(STRIP ${__flag} __flag) set(__result "${__result} ${__flag}") endforeach() endforeach() string(STRIP ${__result} __result) set(${out_var} ${__result} PARENT_SCOPE) endfunction() ################################################################################################ # Converts all paths in list to absolute # Usage: # mxnet_convert_absolute_paths(<list_variable>) function(mxnet_convert_absolute_paths variable) set(__dlist "") foreach(__s ${${variable}}) get_filename_component(__abspath ${__s} ABSOLUTE) list(APPEND __list ${__abspath}) endforeach() set(${variable} ${__list} PARENT_SCOPE) endfunction() ################################################################################################ # Reads set of version defines from the header file # Usage: # mxnet_parse_header(<file> <define1> <define2> <define3> ..) macro(mxnet_parse_header FILENAME FILE_VAR) set(vars_regex "") set(__parnet_scope OFF) set(__add_cache OFF) foreach(name ${ARGN}) if("${name}" STREQUAL "PARENT_SCOPE") set(__parnet_scope ON) elseif("${name}" STREQUAL "CACHE") set(__add_cache ON) elseif(vars_regex) set(vars_regex "${vars_regex}|${name}") else() set(vars_regex "${name}") endif() endforeach() if(EXISTS "${FILENAME}") file(STRINGS "${FILENAME}" ${FILE_VAR} REGEX "#define[ \t]+(${vars_regex})[ \t]+[0-9]+" ) else() unset(${FILE_VAR}) endif() foreach(name ${ARGN}) if(NOT "${name}" STREQUAL "PARENT_SCOPE" AND NOT "${name}" STREQUAL "CACHE") if(${FILE_VAR}) if(${FILE_VAR} MATCHES ".+[ \t]${name}[ \t]+([0-9]+).*") string(REGEX REPLACE ".+[ \t]${name}[ \t]+([0-9]+).*" "\\1" ${name} "${${FILE_VAR}}") else() set(${name} "") endif() if(__add_cache) set(${name} ${${name}} CACHE INTERNAL "${name} parsed from ${FILENAME}" FORCE) elseif(__parnet_scope) set(${name} "${${name}}" PARENT_SCOPE) endif() else() unset(${name} CACHE) endif() endif() endforeach() endmacro() ################################################################################################ # Reads single version define from the header file and parses it # Usage: # mxnet_parse_header_single_define(<library_name> <file> <define_name>) function(mxnet_parse_header_single_define LIBNAME HDR_PATH VARNAME) set(${LIBNAME}_H "") if(EXISTS "${HDR_PATH}") file(STRINGS "${HDR_PATH}" ${LIBNAME}_H REGEX "^#define[ \t]+${VARNAME}[ \t]+\"[^\"]*\".*$" LIMIT_COUNT 1) endif() if(${LIBNAME}_H) string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MAJOR "${${LIBNAME}_H}") string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MINOR "${${LIBNAME}_H}") string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_PATCH "${${LIBNAME}_H}") set(${LIBNAME}_VERSION_MAJOR ${${LIBNAME}_VERSION_MAJOR} ${ARGN} PARENT_SCOPE) set(${LIBNAME}_VERSION_MINOR ${${LIBNAME}_VERSION_MINOR} ${ARGN} PARENT_SCOPE) set(${LIBNAME}_VERSION_PATCH ${${LIBNAME}_VERSION_PATCH} ${ARGN} PARENT_SCOPE) set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_MAJOR}.${${LIBNAME}_VERSION_MINOR}.${${LIBNAME}_VERSION_PATCH}" PARENT_SCOPE) # append a TWEAK version if it exists: set(${LIBNAME}_VERSION_TWEAK "") if("${${LIBNAME}_H}" MATCHES "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+).*$") set(${LIBNAME}_VERSION_TWEAK "${CMAKE_MATCH_1}" ${ARGN} PARENT_SCOPE) endif() if(${LIBNAME}_VERSION_TWEAK) set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}.${${LIBNAME}_VERSION_TWEAK}" ${ARGN} PARENT_SCOPE) else() set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}" ${ARGN} PARENT_SCOPE) endif() endif() endfunction() ######################################################################################################## # An option that the user can select. Can accept condition to control when option is available for user. # Usage: # mxnet_option(<option_variable> "doc string" <initial value or boolean expression> [IF <condition>]) function(mxnet_option variable description value) set(__value ${value}) set(__condition "") set(__varname "__value") foreach(arg ${ARGN}) if(arg STREQUAL "IF" OR arg STREQUAL "if") set(__varname "__condition") else() list(APPEND ${__varname} ${arg}) endif() endforeach() unset(__varname) if("${__condition}" STREQUAL "") set(__condition 2 GREATER 1) endif() if(${__condition}) if("${__value}" MATCHES ";") if(${__value}) option(${variable} "${description}" ON) else() option(${variable} "${description}" OFF) endif() elseif(DEFINED ${__value}) if(${__value}) option(${variable} "${description}" ON) else() option(${variable} "${description}" OFF) endif() else() option(${variable} "${description}" ${__value}) endif() else() unset(${variable} CACHE) endif() endfunction() ################################################################################################ # Utility macro for comparing two lists. Used for CMake debugging purposes # Usage: # mxnet_compare_lists(<list_variable> <list2_variable> [description]) function(mxnet_compare_lists list1 list2 desc) set(__list1 ${${list1}}) set(__list2 ${${list2}}) list(SORT __list1) list(SORT __list2) list(LENGTH __list1 __len1) list(LENGTH __list2 __len2) if(NOT ${__len1} EQUAL ${__len2}) message(FATAL_ERROR "Lists are not equal. ${__len1} != ${__len2}. ${desc}") endif() foreach(__i RANGE 1 ${__len1}) math(EXPR __index "${__i}- 1") list(GET __list1 ${__index} __item1) list(GET __list2 ${__index} __item2) if(NOT ${__item1} STREQUAL ${__item2}) message(FATAL_ERROR "Lists are not equal. Differ at element ${__index}. ${desc}") endif() endforeach() endfunction() ################################################################################################ # Command for disabling warnings for different platforms (see below for gcc and VisualStudio) # Usage: # mxnet_warnings_disable(<CMAKE_[C|CXX]_FLAGS[_CONFIGURATION]> -Wshadow /wd4996 ..,) macro(mxnet_warnings_disable) set(_flag_vars "") set(_msvc_warnings "") set(_gxx_warnings "") foreach(arg ${ARGN}) if(arg MATCHES "^CMAKE_") list(APPEND _flag_vars ${arg}) elseif(arg MATCHES "^/wd") list(APPEND _msvc_warnings ${arg}) elseif(arg MATCHES "^-W") list(APPEND _gxx_warnings ${arg}) endif() endforeach() if(NOT _flag_vars) set(_flag_vars CMAKE_C_FLAGS CMAKE_CXX_FLAGS) endif() if(MSVC AND _msvc_warnings) foreach(var ${_flag_vars}) foreach(warning ${_msvc_warnings}) set(${var} "${${var}} ${warning}") endforeach() endforeach() elseif((CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX) AND _gxx_warnings) foreach(var ${_flag_vars}) foreach(warning ${_gxx_warnings}) if(NOT warning MATCHES "^-Wno-") string(REPLACE "${warning}" "" ${var} "${${var}}") string(REPLACE "-W" "-Wno-" warning "${warning}") endif() set(${var} "${${var}} ${warning}") endforeach() endforeach() endif() mxnet_clear_vars(_flag_vars _msvc_warnings _gxx_warnings) endmacro() ################################################################################################ # Helper function get current definitions # Usage: # mxnet_get_current_definitions(<definitions_variable>) function(mxnet_get_current_definitions definitions_var) get_property(current_definitions DIRECTORY PROPERTY COMPILE_DEFINITIONS) set(result "") foreach(d ${current_definitions}) list(APPEND result -D${d}) endforeach() mxnet_list_unique(result) set(${definitions_var} ${result} PARENT_SCOPE) endfunction() ################################################################################################ # Helper function get current includes/definitions # Usage: # mxnet_get_current_cflags(<cflagslist_variable>) function(mxnet_get_current_cflags cflags_var) get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES) mxnet_convert_absolute_paths(current_includes) mxnet_get_current_definitions(cflags) foreach(i ${current_includes}) list(APPEND cflags "-I${i}") endforeach() mxnet_list_unique(cflags) set(${cflags_var} ${cflags} PARENT_SCOPE) endfunction() ################################################################################################ # Helper function to parse current linker libs into link directories, libflags and osx frameworks # Usage: # mxnet_parse_linker_libs(<mxnet_LINKER_LIBS_var> <directories_var> <libflags_var> <frameworks_var>) function(mxnet_parse_linker_libs mxnet_LINKER_LIBS_variable folders_var flags_var frameworks_var) set(__unspec "") set(__debug "") set(__optimized "") set(__framework "") set(__varname "__unspec") # split libs into debug, optimized, unspecified and frameworks foreach(list_elem ${${mxnet_LINKER_LIBS_variable}}) if(list_elem STREQUAL "debug") set(__varname "__debug") elseif(list_elem STREQUAL "optimized") set(__varname "__optimized") elseif(list_elem MATCHES "^-framework[ \t]+([^ \t].*)") list(APPEND __framework -framework ${CMAKE_MATCH_1}) else() list(APPEND ${__varname} ${list_elem}) set(__varname "__unspec") endif() endforeach() # attach debug or optimized libs to unspecified according to current configuration if(CMAKE_BUILD_TYPE MATCHES "Debug") set(__libs ${__unspec} ${__debug}) else() set(__libs ${__unspec} ${__optimized}) endif() set(libflags "") set(folders "") # convert linker libraries list to link flags foreach(lib ${__libs}) if(TARGET ${lib}) list(APPEND folders $<TARGET_LINKER_FILE_DIR:${lib}>) list(APPEND libflags -l${lib}) elseif(lib MATCHES "^-l.*") list(APPEND libflags ${lib}) elseif(IS_ABSOLUTE ${lib}) get_filename_component(name_we ${lib} NAME_WE) get_filename_component(folder ${lib} PATH) string(REGEX MATCH "^lib(.*)" __match ${name_we}) list(APPEND libflags -l${CMAKE_MATCH_1}) list(APPEND folders ${folder}) else() message(FATAL_ERROR "Logic error. Need to update cmake script") endif() endforeach() mxnet_list_unique(libflags folders) set(${folders_var} ${folders} PARENT_SCOPE) set(${flags_var} ${libflags} PARENT_SCOPE) set(${frameworks_var} ${__framework} PARENT_SCOPE) endfunction() ################################################################################################ # Helper function to detect Darwin version, i.e. 10.8, 10.9, 10.10, .... # Usage: # mxnet_detect_darwin_version(<version_variable>) function(mxnet_detect_darwin_version output_var) if(APPLE) execute_process(COMMAND /usr/bin/sw_vers -productVersion RESULT_VARIABLE __sw_vers OUTPUT_VARIABLE __sw_vers_out ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) set(${output_var} ${__sw_vers_out} PARENT_SCOPE) else() set(${output_var} "" PARENT_SCOPE) endif() endfunction() ################################################################################################ # Convenient command to setup source group for IDEs that support this feature (VS, XCode) # Usage: # caffe_source_group(<group> GLOB[_RECURSE] <globbing_expression>) function(mxnet_source_group group) message(WARNING "mxnet_source_group function is obsolete, it not do anything now.") endfunction() function(assign_source_group group) foreach(_source IN ITEMS ${ARGN}) if (IS_ABSOLUTE "${_source}") file(RELATIVE_PATH _source_rel "${CMAKE_CURRENT_SOURCE_DIR}" "${_source}") else() set(_source_rel "${_source}") endif() get_filename_component(_source_path "${_source_rel}" PATH) string(REPLACE "/" "\\" _source_path_msvc "${_source_path}") source_group("${group}\\${_source_path_msvc}" FILES "${_source}") endforeach() endfunction(assign_source_group)
apache-2.0
ninetian/ffmpeginstaller
gpac-0.7.1/extra_lib/include/ffmpeg_android/libavutil/murmur3.h
2444
/* * Copyright (C) 2013 Reimar Döffinger <[email protected]> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVUTIL_MURMUR3_H #define AVUTIL_MURMUR3_H #include <stdint.h> struct AVMurMur3 *av_murmur3_alloc(void); void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); void av_murmur3_init(struct AVMurMur3 *c); void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); #endif /* AVUTIL_MURMUR3_H */ /* * Copyright (C) 2013 Reimar Döffinger <[email protected]> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVUTIL_MURMUR3_H #define AVUTIL_MURMUR3_H #include <stdint.h> struct AVMurMur3 *av_murmur3_alloc(void); void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); void av_murmur3_init(struct AVMurMur3 *c); void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); #endif /* AVUTIL_MURMUR3_H */
apache-2.0
culmat/gitblit
src/main/java/com/gitblit/utils/GitBlitDiffFormatter.java
6842
/* * Copyright 2011 gitblit.com. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.gitblit.utils; import static org.eclipse.jgit.lib.Constants.encode; import static org.eclipse.jgit.lib.Constants.encodeASCII; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.text.MessageFormat; import org.eclipse.jgit.diff.DiffEntry; import org.eclipse.jgit.diff.DiffFormatter; import org.eclipse.jgit.diff.RawText; import org.eclipse.jgit.util.RawParseUtils; import com.gitblit.models.PathModel.PathChangeModel; import com.gitblit.utils.DiffUtils.DiffStat; /** * Generates an html snippet of a diff in Gitblit's style, tracks changed paths, * and calculates diff stats. * * @author James Moger * */ public class GitBlitDiffFormatter extends DiffFormatter { private final OutputStream os; private final DiffStat diffStat; private PathChangeModel currentPath; private int left, right; public GitBlitDiffFormatter(OutputStream os, String commitId) { super(os); this.os = os; this.diffStat = new DiffStat(commitId); } @Override public void format(DiffEntry ent) throws IOException { currentPath = diffStat.addPath(ent); super.format(ent); } /** * Output a hunk header * * @param aStartLine * within first source * @param aEndLine * within first source * @param bStartLine * within second source * @param bEndLine * within second source * @throws IOException */ @Override protected void writeHunkHeader(int aStartLine, int aEndLine, int bStartLine, int bEndLine) throws IOException { os.write("<tr><th>..</th><th>..</th><td class='hunk_header'>".getBytes()); os.write('@'); os.write('@'); writeRange('-', aStartLine + 1, aEndLine - aStartLine); writeRange('+', bStartLine + 1, bEndLine - bStartLine); os.write(' '); os.write('@'); os.write('@'); os.write("</td></tr>\n".getBytes()); left = aStartLine + 1; right = bStartLine + 1; } protected void writeRange(final char prefix, final int begin, final int cnt) throws IOException { os.write(' '); os.write(prefix); switch (cnt) { case 0: // If the range is empty, its beginning number must // be the // line just before the range, or 0 if the range is // at the // start of the file stream. Here, begin is always 1 // based, // so an empty file would produce "0,0". // os.write(encodeASCII(begin - 1)); os.write(','); os.write('0'); break; case 1: // If the range is exactly one line, produce only // the number. // os.write(encodeASCII(begin)); break; default: os.write(encodeASCII(begin)); os.write(','); os.write(encodeASCII(cnt)); break; } } @Override protected void writeLine(final char prefix, final RawText text, final int cur) throws IOException { // update entry diffstat currentPath.update(prefix); // output diff os.write("<tr>".getBytes()); switch (prefix) { case '+': os.write(("<th></th><th>" + (right++) + "</th>").getBytes()); os.write("<td><div class=\"diff add2\">".getBytes()); break; case '-': os.write(("<th>" + (left++) + "</th><th></th>").getBytes()); os.write("<td><div class=\"diff remove2\">".getBytes()); break; default: os.write(("<th>" + (left++) + "</th><th>" + (right++) + "</th>").getBytes()); os.write("<td>".getBytes()); break; } os.write(prefix); String line = text.getString(cur); line = StringUtils.escapeForHtml(line, false); os.write(encode(line)); switch (prefix) { case '+': case '-': os.write("</div>".getBytes()); break; default: os.write("</td>".getBytes()); } os.write("</tr>\n".getBytes()); } /** * Workaround function for complex private methods in DiffFormatter. This * sets the html for the diff headers. * * @return */ public String getHtml() { ByteArrayOutputStream bos = (ByteArrayOutputStream) os; String html = RawParseUtils.decode(bos.toByteArray()); String[] lines = html.split("\n"); StringBuilder sb = new StringBuilder(); boolean inFile = false; String oldnull = "a/dev/null"; for (String line : lines) { if (line.startsWith("index")) { // skip index lines } else if (line.startsWith("new file")) { // skip new file lines } else if (line.startsWith("\\ No newline")) { // skip no new line } else if (line.startsWith("---") || line.startsWith("+++")) { // skip --- +++ lines } else if (line.startsWith("diff")) { line = StringUtils.convertOctal(line); if (line.indexOf(oldnull) > -1) { // a is null, use b line = line.substring(("diff --git " + oldnull).length()).trim(); // trim b/ line = line.substring(2).trim(); } else { // use a line = line.substring("diff --git ".length()).trim(); line = line.substring(line.startsWith("\"a/") ? 3 : 2); line = line.substring(0, line.indexOf(" b/") > -1 ? line.indexOf(" b/") : line.indexOf("\"b/")).trim(); } if (line.charAt(0) == '"') { line = line.substring(1); } if (line.charAt(line.length() - 1) == '"') { line = line.substring(0, line.length() - 1); } if (inFile) { sb.append("</tbody></table></div>\n"); inFile = false; } sb.append(MessageFormat.format("<div class='header'><div class=\"diffHeader\" id=\"{0}\"><i class=\"icon-file\"></i> ", line)).append(line).append("</div></div>"); sb.append("<div class=\"diff\">"); sb.append("<table><tbody>"); inFile = true; } else { boolean gitLinkDiff = line.length() > 0 && line.substring(1).startsWith("Subproject commit"); if (gitLinkDiff) { sb.append("<tr><th></th><th></th>"); if (line.charAt(0) == '+') { sb.append("<td><div class=\"diff add2\">"); } else { sb.append("<td><div class=\"diff remove2\">"); } } sb.append(line); if (gitLinkDiff) { sb.append("</div></td></tr>"); } } } sb.append("</table></div>"); return sb.toString(); } public DiffStat getDiffStat() { return diffStat; } }
apache-2.0
punkhorn/camel-upstream
components/camel-docker/src/test/java/org/apache/camel/component/docker/headers/WaitContainerCmdHeaderTest.java
2429
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.docker.headers; import java.util.Map; import com.github.dockerjava.api.command.WaitContainerCmd; import com.github.dockerjava.core.command.WaitContainerResultCallback; import org.apache.camel.component.docker.DockerConstants; import org.apache.camel.component.docker.DockerOperation; import org.junit.Test; import org.mockito.Mock; import org.mockito.Mockito; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; /** * Validates Wait Container Request headers are applied properly */ public class WaitContainerCmdHeaderTest extends BaseDockerHeaderTest<WaitContainerCmd> { @Mock private WaitContainerCmd mockObject; @Mock private WaitContainerResultCallback callback; @Test public void waitContainerHeaderTest() { String containerId = "9c09acd48a25"; Map<String, Object> headers = getDefaultParameters(); headers.put(DockerConstants.DOCKER_CONTAINER_ID, containerId); template.sendBodyAndHeaders("direct:in", "", headers); Mockito.verify(dockerClient, Mockito.times(1)).waitContainerCmd(containerId); } @Override protected void setupMocks() { Mockito.when(dockerClient.waitContainerCmd(anyString())).thenReturn(mockObject); Mockito.when(mockObject.exec(any())).thenReturn(callback); Mockito.when(callback.awaitStatusCode()).thenReturn(anyInt()); } @Override protected DockerOperation getOperation() { return DockerOperation.WAIT_CONTAINER; } }
apache-2.0
tubemogul/druid
server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java
9453
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.server.coordinator.rules; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.MinMaxPriorityQueue; import com.metamx.common.IAE; import com.metamx.emitter.EmittingLogger; import io.druid.server.coordinator.BalancerStrategy; import io.druid.server.coordinator.CoordinatorStats; import io.druid.server.coordinator.DruidCoordinator; import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; import io.druid.server.coordinator.LoadPeonCallback; import io.druid.server.coordinator.ReplicationThrottler; import io.druid.server.coordinator.ServerHolder; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; import java.util.List; import java.util.Map; import java.util.Set; /** * LoadRules indicate the number of replicants a segment should have in a given tier. */ public abstract class LoadRule implements Rule { private static final EmittingLogger log = new EmittingLogger(LoadRule.class); private static final String assignedCount = "assignedCount"; private static final String droppedCount = "droppedCount"; @Override public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment) { final CoordinatorStats stats = new CoordinatorStats(); final Set<DataSegment> availableSegments = params.getAvailableSegments(); final Map<String, Integer> loadStatus = Maps.newHashMap(); int totalReplicantsInCluster = params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier()); for (Map.Entry<String, Integer> entry : getTieredReplicants().entrySet()) { final String tier = entry.getKey(); final int expectedReplicantsInTier = entry.getValue(); final int totalReplicantsInTier = params.getSegmentReplicantLookup() .getTotalReplicants(segment.getIdentifier(), tier); final int loadedReplicantsInTier = params.getSegmentReplicantLookup() .getLoadedReplicants(segment.getIdentifier(), tier); final MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().getServersByTier(tier); if (serverQueue == null) { log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", tier).emit(); continue; } final List<ServerHolder> serverHolderList = Lists.newArrayList(serverQueue); final DateTime referenceTimestamp = params.getBalancerReferenceTimestamp(); final BalancerStrategy strategy = params.getBalancerStrategyFactory().createBalancerStrategy(referenceTimestamp); if (availableSegments.contains(segment)) { CoordinatorStats assignStats = assign( params.getReplicationManager(), tier, totalReplicantsInCluster, expectedReplicantsInTier, totalReplicantsInTier, strategy, serverHolderList, segment ); stats.accumulate(assignStats); totalReplicantsInCluster += assignStats.getPerTierStats().get(assignedCount).get(tier).get(); } loadStatus.put(tier, expectedReplicantsInTier - loadedReplicantsInTier); } // Remove over-replication stats.accumulate(drop(loadStatus, segment, params)); return stats; } private CoordinatorStats assign( final ReplicationThrottler replicationManager, final String tier, final int totalReplicantsInCluster, final int expectedReplicantsInTier, final int totalReplicantsInTier, final BalancerStrategy strategy, final List<ServerHolder> serverHolderList, final DataSegment segment ) { final CoordinatorStats stats = new CoordinatorStats(); stats.addToTieredStat(assignedCount, tier, 0); int currReplicantsInTier = totalReplicantsInTier; int currTotalReplicantsInCluster = totalReplicantsInCluster; while (currReplicantsInTier < expectedReplicantsInTier) { boolean replicate = currTotalReplicantsInCluster > 0; if (replicate && !replicationManager.canCreateReplicant(tier)) { break; } final ServerHolder holder = strategy.findNewSegmentHomeReplicator(segment, serverHolderList); if (holder == null) { log.warn( "Not enough [%s] servers or node capacity to assign segment[%s]! Expected Replicants[%d]", tier, segment.getIdentifier(), expectedReplicantsInTier ); break; } if (replicate) { replicationManager.registerReplicantCreation( tier, segment.getIdentifier(), holder.getServer().getHost() ); } holder.getPeon().loadSegment( segment, new LoadPeonCallback() { @Override public void execute() { replicationManager.unregisterReplicantCreation( tier, segment.getIdentifier(), holder.getServer().getHost() ); } } ); stats.addToTieredStat(assignedCount, tier, 1); ++currReplicantsInTier; ++currTotalReplicantsInCluster; } return stats; } private CoordinatorStats drop( final Map<String, Integer> loadStatus, final DataSegment segment, final DruidCoordinatorRuntimeParams params ) { CoordinatorStats stats = new CoordinatorStats(); // Make sure we have enough loaded replicants in the correct tiers in the cluster before doing anything for (Integer leftToLoad : loadStatus.values()) { if (leftToLoad > 0) { return stats; } } final ReplicationThrottler replicationManager = params.getReplicationManager(); // Find all instances of this segment across tiers Map<String, Integer> replicantsByTier = params.getSegmentReplicantLookup().getClusterTiers(segment.getIdentifier()); for (Map.Entry<String, Integer> entry : replicantsByTier.entrySet()) { final String tier = entry.getKey(); int loadedNumReplicantsForTier = entry.getValue(); int expectedNumReplicantsForTier = getNumReplicants(tier); stats.addToTieredStat(droppedCount, tier, 0); MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().get(tier); if (serverQueue == null) { log.makeAlert("No holders found for tier[%s]", entry.getKey()).emit(); continue; } List<ServerHolder> droppedServers = Lists.newArrayList(); while (loadedNumReplicantsForTier > expectedNumReplicantsForTier) { final ServerHolder holder = serverQueue.pollLast(); if (holder == null) { log.warn("Wtf, holder was null? I have no servers serving [%s]?", segment.getIdentifier()); break; } if (holder.isServingSegment(segment)) { if (expectedNumReplicantsForTier > 0) { // don't throttle unless we are removing extra replicants if (!replicationManager.canDestroyReplicant(tier)) { serverQueue.add(holder); break; } replicationManager.registerReplicantTermination( tier, segment.getIdentifier(), holder.getServer().getHost() ); } holder.getPeon().dropSegment( segment, new LoadPeonCallback() { @Override public void execute() { replicationManager.unregisterReplicantTermination( tier, segment.getIdentifier(), holder.getServer().getHost() ); } } ); --loadedNumReplicantsForTier; stats.addToTieredStat(droppedCount, tier, 1); } droppedServers.add(holder); } serverQueue.addAll(droppedServers); } return stats; } protected void validateTieredReplicants(Map<String, Integer> tieredReplicants){ if(tieredReplicants.size() == 0) throw new IAE("A rule with empty tiered replicants is invalid"); for (Map.Entry<String, Integer> entry: tieredReplicants.entrySet()) { if (entry.getValue() == null) throw new IAE("Replicant value cannot be empty"); if (entry.getValue() < 0) throw new IAE("Replicant value [%d] is less than 0, which is not allowed", entry.getValue()); } } public abstract Map<String, Integer> getTieredReplicants(); public abstract int getNumReplicants(String tier); }
apache-2.0
rdicroce/deltaspike
deltaspike/core/api/src/main/java/org/apache/deltaspike/core/util/ServiceUtils.java
3022
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.deltaspike.core.util; import org.apache.deltaspike.core.spi.activation.Deactivatable; import javax.enterprise.inject.Typed; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.ServiceLoader; import java.util.logging.Logger; /** * Allows handling the lookup (with fallbacks) in a central place. * See DELTASPIKE-97 */ @Typed() public abstract class ServiceUtils { private static final Logger LOG = Logger.getLogger(ServiceUtils.class.getName()); private ServiceUtils() { // prevent instantiation } public static <T> List<T> loadServiceImplementations(Class<T> serviceType) { return loadServiceImplementations(serviceType, false); } public static <T> List<T> loadServiceImplementations(Class<T> serviceType, boolean ignoreServicesWithMissingDependencies) { List<T> result = new ArrayList<T>(); Iterator<T> servicesIterator = ServiceLoader.load(serviceType).iterator(); if (!servicesIterator.hasNext()) { ClassLoader fallbackClassLoader = ServiceUtils.class.getClassLoader(); servicesIterator = ServiceLoader.load(serviceType, fallbackClassLoader).iterator(); } while (servicesIterator.hasNext()) { try { T service = servicesIterator.next(); if (service instanceof Deactivatable && !ClassDeactivationUtils.isActivated((Class<? extends Deactivatable>) service.getClass())) { LOG.info("deactivated service: " + service.getClass().getName()); continue; } result.add(service); } catch (Throwable t) { if (!ignoreServicesWithMissingDependencies) { throw ExceptionUtils.throwAsRuntimeException(t); } else { LOG.info("service filtered - caused by " + t.getMessage()); } } } return result; } }
apache-2.0
kidaa/jena
jena-core/src/main/java/jena/rdfcopy.java
5015
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package jena; import static org.apache.jena.atlas.logging.LogCtl.setCmdLogging; import java.net.*; import java.io.*; import org.apache.jena.rdf.model.* ; import org.apache.jena.shared.JenaException ; /** A program which read an RDF model and copy it to the standard output stream. * * <p>This program will read an RDF model, in a variety of languages, * and copy it to the output stream in a possibly different language. * Input can be read either from a URL or from a file. * The program writes its results to the standard output stream and sets * its exit code to 0 if the program terminate normally, and * to -1 if it encounters an error.</p> * * <p></p> * * <pre>java jena.rdfcopy model [inlang [outlang]] * * model1 and model2 can be file names or URL's * inlang and outlang specify the language of the input and output * respectively and can be: * RDF/XML * N-TRIPLE * TURTLE * N3 * The input language defaults to RDF/XML and the output language * defaults to N-TRIPLE. * </pre> */ public class rdfcopy extends java.lang.Object { static { setCmdLogging(); } /** * @param args the command line arguments */ public static void main(String ... args) { if ( ( args.length < 1 ) || ( "-h".equals(args[0]) ) ) { usage(); System.exit(-1); } String in = args[0]; String inlang = "RDF/XML"; int j; for (j = 1; j < args.length && args[j].contains( "=" ); j++) {} int lastInProp = j; if (j < args.length) { inlang = args[j]; } j++; String outlang = "N-TRIPLE"; for (; j < args.length && args[j].contains( "=" ); j++) {} int lastOutProp = j; if (j < args.length) { outlang = args[j]; } if (j + 1 < args.length) { // System.err.println(j+"<<"+args.length); usage(); System.exit(-1); } try { Model m = ModelFactory.createDefaultModel(); String base = in ; RDFReader rdr = m.getReader(inlang); for (j = 1; j < lastInProp; j++) { int eq = args[j].indexOf("="); rdr.setProperty( args[j].substring(0, eq), args[j].substring(eq + 1)); } try { rdr.read(m, in); } catch (JenaException ex) { if ( ! ( ex.getCause() instanceof MalformedURLException ) ) throw ex ; // Tried as a URL. Try as a file name. // Make absolute File f = new File(in) ; base = "file:///"+f.getCanonicalPath().replace('\\','/') ; rdr.read(m, new FileInputStream(in), base) ; } RDFWriter w = m.getWriter(outlang); j++; for (; j < lastOutProp; j++) { int eq = args[j].indexOf("="); w.setProperty( args[j].substring(0, eq), args[j].substring(eq + 1)); } w.write(m, System.out, null) ; System.exit(0); } catch (Exception e) { System.err.println("Unhandled exception:"); System.err.println(" " + e.toString()); System.exit(-1); } } protected static void usage() { System.err.println("usage:"); System.err.println(" java jena.rdfcopy in {inprop=inval}* [ inlang {outprop=outval}* outlang]]"); System.err.println(); System.err.println(" in can be a URL or a filename"); System.err.println(" inlang and outlang can take values:"); System.err.println(" RDF/XML"); System.err.println(" RDF/XML-ABBREV"); System.err.println(" N-TRIPLE"); System.err.println(" TURTLE"); System.err.println(" N3"); System.err.println( " inlang defaults to RDF/XML, outlang to N-TRIPLE"); System.err.println(" The legal values for inprop and outprop depend on inlang and outlang."); System.err.println(" The legal values for inval and outval depend on inprop and outprop."); System.err.println(); } protected static void read(Model model, String in, String lang) throws java.io.FileNotFoundException { try { URL url = new URL(in); model.read(in, lang); } catch (java.net.MalformedURLException e) { model.read(new FileInputStream(in), "", lang); } } }
apache-2.0
cxfeng1/incubator-weex
runtime/frameworks/legacy/app/register.js
3146
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ let nativeModules = {} // for testing /** * for testing */ export function getModule (moduleName) { return nativeModules[moduleName] } /** * for testing */ export function clearModules () { nativeModules = {} } // for framework /** * init modules for an app instance * the second param determines whether to replace an existed method */ export function initModules (modules, ifReplace) { for (const moduleName in modules) { // init `modules[moduleName][]` let methods = nativeModules[moduleName] if (!methods) { methods = {} nativeModules[moduleName] = methods } // push each non-existed new method modules[moduleName].forEach(function (method) { if (typeof method === 'string') { method = { name: method } } if (!methods[method.name] || ifReplace) { methods[method.name] = method } }) } } /** * init app methods */ export function initMethods (Vm, apis) { const p = Vm.prototype for (const apiName in apis) { if (!p.hasOwnProperty(apiName)) { p[apiName] = apis[apiName] } } } /** * get a module of methods for an app instance */ export function requireModule (app, name) { const methods = nativeModules[name] const target = {} for (const methodName in methods) { Object.defineProperty(target, methodName, { configurable: true, enumerable: true, get: function moduleGetter () { return (...args) => app.callTasks({ module: name, method: methodName, args: args }) }, set: function moduleSetter (value) { if (typeof value === 'function') { return app.callTasks({ module: name, method: methodName, args: [value] }) } } }) } return target } /** * get a custom component options */ export function requireCustomComponent (app, name) { const { customComponentMap } = app return customComponentMap[name] } /** * register a custom component options */ export function registerCustomComponent (app, name, def) { const { customComponentMap } = app if (customComponentMap[name]) { console.error(`[JS Framework] define a component(${name}) that already exists`) return } customComponentMap[name] = def }
apache-2.0
kasungayan/carbon-identity
components/application-mgt/org.wso2.carbon.identity.application.common/src/main/java/org/wso2/carbon/identity/application/common/model/InboundAuthenticationRequestConfig.java
4407
/* * Copyright (c) 2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.identity.application.common.model; import org.apache.axiom.om.OMElement; import org.apache.commons.collections.CollectionUtils; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; public class InboundAuthenticationRequestConfig implements Serializable { /** * */ private static final long serialVersionUID = -4619706374988196634L; private String inboundAuthKey; private String inboundAuthType; private Property[] properties = new Property[0]; /* * <InboundAuthenticationRequestConfig> <InboundAuthKey></InboundAuthKey> * <InboundAuthType></InboundAuthType> <Properties></Properties> * </InboundAuthenticationRequestConfig> */ public static InboundAuthenticationRequestConfig build( OMElement inboundAuthenticationRequestConfigOM) { if (inboundAuthenticationRequestConfigOM == null) { return null; } InboundAuthenticationRequestConfig inboundAuthenticationRequestConfig; inboundAuthenticationRequestConfig = new InboundAuthenticationRequestConfig(); Iterator<?> members = inboundAuthenticationRequestConfigOM.getChildElements(); while (members.hasNext()) { OMElement member = (OMElement) members.next(); if ("InboundAuthKey".equalsIgnoreCase(member.getLocalName())) { inboundAuthenticationRequestConfig.setInboundAuthKey(member.getText()); } else if ("InboundAuthType".equalsIgnoreCase(member.getLocalName())) { inboundAuthenticationRequestConfig.setInboundAuthType(member.getText()); } else if ("Properties".equalsIgnoreCase(member.getLocalName())) { Iterator<?> propertiesIter = member.getChildElements(); List<Property> propertiesArrList = new ArrayList<Property>(); if (propertiesIter != null) { while (propertiesIter.hasNext()) { OMElement propertiesElement = (OMElement) (propertiesIter.next()); Property prop = Property.build(propertiesElement); if (prop != null) { propertiesArrList.add(prop); } } } if (CollectionUtils.isNotEmpty(propertiesArrList)) { Property[] propertiesArr = propertiesArrList.toArray(new Property[0]); inboundAuthenticationRequestConfig.setProperties(propertiesArr); } } } return inboundAuthenticationRequestConfig; } /** * @return */ public String getInboundAuthKey() { return inboundAuthKey; } /** * @param inboundAuthKey */ public void setInboundAuthKey(String inboundAuthKey) { this.inboundAuthKey = inboundAuthKey; } /** * @return */ public String getInboundAuthType() { return inboundAuthType; } /** * @param inboundAuthType */ public void setInboundAuthType(String inboundAuthType) { this.inboundAuthType = inboundAuthType; } /** * @return */ public Property[] getProperties() { return properties; } /** * @param properties */ public void setProperties(Property[] properties) { if (properties == null) { return; } Set<Property> propertySet = new HashSet<Property>(Arrays.asList(properties)); this.properties = propertySet.toArray(new Property[propertySet.size()]); } }
apache-2.0
liyu1990/tensorflow
third_party/eigen3/Eigen/src/Eigenvalues/EigenSolver.h
22623
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <[email protected]> // Copyright (C) 2010,2012 Jitse Niesen <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EIGENSOLVER_H #define EIGEN_EIGENSOLVER_H #include "./RealSchur.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class EigenSolver * * \brief Computes eigenvalues and eigenvectors of general matrices * * \tparam _MatrixType the type of the matrix of which we are computing the * eigendecomposition; this is expected to be an instantiation of the Matrix * class template. Currently, only real matrices are supported. * * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V = * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition. * * The eigenvalues and eigenvectors of a matrix may be complex, even when the * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to * have blocks of the form * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f] * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call * this variant of the eigendecomposition the pseudo-eigendecomposition. * * Call the function compute() to compute the eigenvalues and eigenvectors of * a given matrix. Alternatively, you can use the * EigenSolver(const MatrixType&, bool) constructor which computes the * eigenvalues and eigenvectors at construction time. Once the eigenvalue and * eigenvectors are computed, they can be retrieved with the eigenvalues() and * eigenvectors() functions. The pseudoEigenvalueMatrix() and * pseudoEigenvectors() methods allow the construction of the * pseudo-eigendecomposition. * * The documentation for EigenSolver(const MatrixType&, bool) contains an * example of the typical use of this class. * * \note The implementation is adapted from * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain). * Their code is based on EISPACK. * * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver */ template<typename _MatrixType> class EigenSolver { public: /** \brief Synonym for the template parameter \p _MatrixType. */ typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename MatrixType::Index Index; /** \brief Complex scalar type for #MatrixType. * * This is \c std::complex<Scalar> if #Scalar is real (e.g., * \c float or \c double) and just \c Scalar if #Scalar is * complex. */ typedef std::complex<RealScalar> ComplexScalar; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * * This is a column vector with entries of type #ComplexScalar. * The length of the vector is the size of #MatrixType. */ typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType; /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). * * This is a square matrix with entries of type #ComplexScalar. * The size is the same as the size of #MatrixType. */ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType; /** \brief Default constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via EigenSolver::compute(const MatrixType&, bool). * * \sa compute() for an example. */ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {} /** \brief Default constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa EigenSolver() */ EigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_isInitialized(false), m_eigenvectorsOk(false), m_realSchur(size), m_matT(size, size), m_tmp(size) {} /** \brief Constructor; computes eigendecomposition of given matrix. * * \param[in] matrix Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * * This constructor calls compute() to compute the eigenvalues * and eigenvectors. * * Example: \include EigenSolver_EigenSolver_MatrixType.cpp * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out * * \sa compute() */ EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true) : m_eivec(matrix.rows(), matrix.cols()), m_eivalues(matrix.cols()), m_isInitialized(false), m_eigenvectorsOk(false), m_realSchur(matrix.cols()), m_matT(matrix.rows(), matrix.cols()), m_tmp(matrix.cols()) { compute(matrix, computeEigenvectors); } /** \brief Returns the eigenvectors of given matrix. * * \returns %Matrix whose columns are the (possibly complex) eigenvectors. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before, and * \p computeEigenvectors was set to true (the default). * * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The * eigenvectors are normalized to have (Euclidean) norm equal to one. The * matrix returned by this function is the matrix \f$ V \f$ in the * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists. * * Example: \include EigenSolver_eigenvectors.cpp * Output: \verbinclude EigenSolver_eigenvectors.out * * \sa eigenvalues(), pseudoEigenvectors() */ EigenvectorsType eigenvectors() const; /** \brief Returns the pseudo-eigenvectors of given matrix. * * \returns Const reference to matrix whose columns are the pseudo-eigenvectors. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before, and * \p computeEigenvectors was set to true (the default). * * The real matrix \f$ V \f$ returned by this function and the * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix() * satisfy \f$ AV = VD \f$. * * Example: \include EigenSolver_pseudoEigenvectors.cpp * Output: \verbinclude EigenSolver_pseudoEigenvectors.out * * \sa pseudoEigenvalueMatrix(), eigenvectors() */ const MatrixType& pseudoEigenvectors() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition. * * \returns A block-diagonal matrix. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before. * * The matrix \f$ D \f$ returned by this function is real and * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2 * blocks of the form * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$. * These blocks are not sorted in any particular order. * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by * pseudoEigenvectors() satisfy \f$ AV = VD \f$. * * \sa pseudoEigenvectors() for an example, eigenvalues() */ MatrixType pseudoEigenvalueMatrix() const; /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before. * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. The eigenvalues * are not sorted in any particular order. * * Example: \include EigenSolver_eigenvalues.cpp * Output: \verbinclude EigenSolver_eigenvalues.out * * \sa eigenvectors(), pseudoEigenvalueMatrix(), * MatrixBase::eigenvalues() */ const EigenvalueType& eigenvalues() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); return m_eivalues; } /** \brief Computes eigendecomposition of given matrix. * * \param[in] matrix Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * \returns Reference to \c *this * * This function computes the eigenvalues of the real matrix \p matrix. * The eigenvalues() function can be used to retrieve them. If * \p computeEigenvectors is true, then the eigenvectors are also computed * and can be retrieved by calling eigenvectors(). * * The matrix is first reduced to real Schur form using the RealSchur * class. The Schur decomposition is then used to compute the eigenvalues * and eigenvectors. * * The cost of the computation is dominated by the cost of the * Schur decomposition, which is very approximately \f$ 25n^3 \f$ * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false. * * This method reuses of the allocated data in the EigenSolver object. * * Example: \include EigenSolver_compute.cpp * Output: \verbinclude EigenSolver_compute.out */ EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true); /** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); return m_info; } /** \brief Sets the maximum number of iterations allowed. */ EigenSolver& setMaxIterations(Index maxIters) { m_realSchur.setMaxIterations(maxIters); return *this; } /** \brief Returns the maximum number of iterations. */ Index getMaxIterations() { return m_realSchur.getMaxIterations(); } private: void doComputeEigenvectors(); protected: MatrixType m_eivec; EigenvalueType m_eivalues; bool m_isInitialized; bool m_eigenvectorsOk; ComputationInfo m_info; RealSchur<MatrixType> m_realSchur; MatrixType m_matT; typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType; ColumnVectorType m_tmp; }; template<typename MatrixType> MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); Index n = m_eivalues.rows(); MatrixType matD = MatrixType::Zero(n,n); for (Index i=0; i<n; ++i) { if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)))) matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i)); else { matD.template block<2,2>(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)), -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)); ++i; } } return matD; } template<typename MatrixType> typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); Index n = m_eivec.cols(); EigenvectorsType matV(n,n); for (Index j=0; j<n; ++j) { if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j))) || j+1==n) { // we have a real eigen value matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>(); matV.col(j).normalize(); } else { // we have a pair of complex eigen values for (Index i=0; i<n; ++i) { matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1)); matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1)); } matV.col(j).normalize(); matV.col(j+1).normalize(); ++j; } } return matV; } template<typename MatrixType> EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors) { using std::sqrt; using std::abs; using std::max; using numext::isfinite; eigen_assert(matrix.cols() == matrix.rows()); // Reduce to real Schur form. m_realSchur.compute(matrix, computeEigenvectors); m_info = m_realSchur.info(); if (m_info == Success) { m_matT = m_realSchur.matrixT(); if (computeEigenvectors) m_eivec = m_realSchur.matrixU(); // Compute eigenvalues from matT m_eivalues.resize(matrix.cols()); Index i = 0; while (i < matrix.cols()) { if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) { m_eivalues.coeffRef(i) = m_matT.coeff(i, i); if(!isfinite(m_eivalues.coeffRef(i))) { m_isInitialized = true; m_eigenvectorsOk = false; m_info = NumericalIssue; return *this; } ++i; } else { Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1)); Scalar z; // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); // without overflow { Scalar t0 = m_matT.coeff(i+1, i); Scalar t1 = m_matT.coeff(i, i+1); Scalar maxval = (max)(abs(p),(max)(abs(t0),abs(t1))); t0 /= maxval; t1 /= maxval; Scalar p0 = p/maxval; z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); } m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z); m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z); if(!(isfinite(m_eivalues.coeffRef(i)) && isfinite(m_eivalues.coeffRef(i+1)))) { m_isInitialized = true; m_eigenvectorsOk = false; m_info = NumericalIssue; return *this; } i += 2; } } // Compute eigenvectors. if (computeEigenvectors) doComputeEigenvectors(); } m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; return *this; } // Complex scalar division. template<typename Scalar> std::complex<Scalar> cdiv(const Scalar& xr, const Scalar& xi, const Scalar& yr, const Scalar& yi) { using std::abs; Scalar r,d; if (abs(yr) > abs(yi)) { r = yi/yr; d = yr + r*yi; return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d); } else { r = yr/yi; d = yi + r*yr; return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d); } } template<typename MatrixType> void EigenSolver<MatrixType>::doComputeEigenvectors() { using std::abs; const Index size = m_eivec.cols(); const Scalar eps = NumTraits<Scalar>::epsilon(); // inefficient! this is already computed in RealSchur Scalar norm(0); for (Index j = 0; j < size; ++j) { norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); } // Backsubstitute to find vectors of upper triangular form if (norm == 0.0) { return; } for (Index n = size-1; n >= 0; n--) { Scalar p = m_eivalues.coeff(n).real(); Scalar q = m_eivalues.coeff(n).imag(); // Scalar vector if (q == Scalar(0)) { Scalar lastr(0), lastw(0); Index l = n; m_matT.coeffRef(n,n) = 1.0; for (Index i = n-1; i >= 0; i--) { Scalar w = m_matT.coeff(i,i) - p; Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); if (m_eivalues.coeff(i).imag() < 0.0) { lastw = w; lastr = r; } else { l = i; if (m_eivalues.coeff(i).imag() == 0.0) { if (w != 0.0) m_matT.coeffRef(i,n) = -r / w; else m_matT.coeffRef(i,n) = -r / (eps * norm); } else // Solve real equations { Scalar x = m_matT.coeff(i,i+1); Scalar y = m_matT.coeff(i+1,i); Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); Scalar t = (x * lastr - lastw * r) / denom; m_matT.coeffRef(i,n) = t; if (abs(x) > abs(lastw)) m_matT.coeffRef(i+1,n) = (-r - w * t) / x; else m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw; } // Overflow control Scalar t = abs(m_matT.coeff(i,n)); if ((eps * t) * t > Scalar(1)) m_matT.col(n).tail(size-i) /= t; } } } else if (q < Scalar(0) && n > 0) // Complex vector { Scalar lastra(0), lastsa(0), lastw(0); Index l = n-1; // Last vector component imaginary so matrix is triangular if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n))) { m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1); m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1); } else { std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q); m_matT.coeffRef(n-1,n-1) = numext::real(cc); m_matT.coeffRef(n-1,n) = numext::imag(cc); } m_matT.coeffRef(n,n-1) = 0.0; m_matT.coeffRef(n,n) = 1.0; for (Index i = n-2; i >= 0; i--) { Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1)); Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); Scalar w = m_matT.coeff(i,i) - p; if (m_eivalues.coeff(i).imag() < 0.0) { lastw = w; lastra = ra; lastsa = sa; } else { l = i; if (m_eivalues.coeff(i).imag() == RealScalar(0)) { std::complex<Scalar> cc = cdiv(-ra,-sa,w,q); m_matT.coeffRef(i,n-1) = numext::real(cc); m_matT.coeffRef(i,n) = numext::imag(cc); } else { // Solve complex equations Scalar x = m_matT.coeff(i,i+1); Scalar y = m_matT.coeff(i+1,i); Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; if ((vr == 0.0) && (vi == 0.0)) vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi); m_matT.coeffRef(i,n-1) = numext::real(cc); m_matT.coeffRef(i,n) = numext::imag(cc); if (abs(x) > (abs(lastw) + abs(q))) { m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x; m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x; } else { cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q); m_matT.coeffRef(i+1,n-1) = numext::real(cc); m_matT.coeffRef(i+1,n) = numext::imag(cc); } } // Overflow control Scalar t = numext::maxi(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n))); if ((eps * t) * t > Scalar(1)) m_matT.block(i, n-1, size-i, 2) /= t; } } // We handled a pair of complex conjugate eigenvalues, so need to skip them both n--; } else { eigen_assert(0 && "Internal bug in EigenSolver (INF or NaN has not been detected)"); // this should not happen } } // Back transformation to get eigenvectors of original matrix for (Index j = size-1; j >= 0; j--) { m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1); m_eivec.col(j) = m_tmp; } } } // end namespace Eigen #endif // EIGEN_EIGENSOLVER_H
apache-2.0
atpham256/azure-powershell
src/ServiceManagement/Services/Commands.Utilities/Websites/KuduRemoteClientBase.cs
2292
// ---------------------------------------------------------------------------------- // // Copyright Microsoft Corporation // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ---------------------------------------------------------------------------------- using System; using System.Net; using System.Net.Http; using Microsoft.WindowsAzure.Commands.Common; using Microsoft.WindowsAzure.Commands.Utilities.Common; using Microsoft.Azure.Commands.Common.Authentication; using Microsoft.Azure.Commands.Common.Authentication.Factories; namespace Microsoft.WindowsAzure.Commands.Utilities.Websites { public abstract class KuduRemoteClientBase { /// <summary> /// Parameterless constructor for mocking /// </summary> protected KuduRemoteClientBase() { } protected KuduRemoteClientBase( string serviceUrl, ICredentials credentials = null, HttpMessageHandler handler = null) { if (serviceUrl == null) { throw new ArgumentNullException("serviceUrl"); } ServiceUrl = GeneralUtilities.EnsureTrailingSlash(serviceUrl); Credentials = credentials; if (credentials != null) { Client = AzureSession.Instance.ClientFactory.CreateHttpClient(serviceUrl, ClientFactory.CreateHttpClientHandler(serviceUrl, credentials)); } else { Client = AzureSession.Instance.ClientFactory.CreateHttpClient(serviceUrl, handler); } } public string ServiceUrl { get; private set; } public ICredentials Credentials { get; private set; } public HttpClient Client { get; private set; } } }
apache-2.0
dodsont/android-motion-detection
javadocs/com/jwetherell/motion_detection/detection/package-summary.html
6920
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.6.0_27) on Thu Dec 26 19:52:19 EST 2013 --> <title>com.jwetherell.motion_detection.detection</title> <meta name="date" content="2013-12-26"> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="com.jwetherell.motion_detection.detection"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li class="navBarCell1Rev">Package</li> <li>Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../com/jwetherell/motion_detection/data/package-summary.html">PREV PACKAGE</a></li> <li><a href="../../../../com/jwetherell/motion_detection/image/package-summary.html">NEXT PACKAGE</a></li> </ul> <ul class="navList"> <li><a href="../../../../index.html?com/jwetherell/motion_detection/detection/package-summary.html" target="_top">FRAMES</a></li> <li><a href="package-summary.html" target="_top">NO FRAMES</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h1 title="Package" class="title">Package&nbsp;com.jwetherell.motion_detection.detection</h1> </div> <div class="contentContainer"> <ul class="blockList"> <li class="blockList"> <table class="packageSummary" border="0" cellpadding="3" cellspacing="0" summary="Interface Summary table, listing interfaces, and an explanation"> <caption><span>Interface Summary</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Interface</th> <th class="colLast" scope="col">Description</th> </tr> <tbody> <tr class="altColor"> <td class="colFirst"><a href="../../../../com/jwetherell/motion_detection/detection/IMotionDetection.html" title="interface in com.jwetherell.motion_detection.detection">IMotionDetection</a></td> <td class="colLast"> <div class="block">This interface is used to represent a class that can detect motion</div> </td> </tr> </tbody> </table> </li> <li class="blockList"> <table class="packageSummary" border="0" cellpadding="3" cellspacing="0" summary="Class Summary table, listing classes, and an explanation"> <caption><span>Class Summary</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Class</th> <th class="colLast" scope="col">Description</th> </tr> <tbody> <tr class="altColor"> <td class="colFirst"><a href="../../../../com/jwetherell/motion_detection/detection/AggregateLumaMotionDetection.html" title="class in com.jwetherell.motion_detection.detection">AggregateLumaMotionDetection</a></td> <td class="colLast"> <div class="block">This class is used to process integer arrays containing luma data and detects motion using an aggregate map.</div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><a href="../../../../com/jwetherell/motion_detection/detection/Comparer.html" title="class in com.jwetherell.motion_detection.detection">Comparer</a></td> <td class="colLast"> <div class="block">This class is adapted from the web site below.</div> </td> </tr> <tr class="altColor"> <td class="colFirst"><a href="../../../../com/jwetherell/motion_detection/detection/LumaMotionDetection.html" title="class in com.jwetherell.motion_detection.detection">LumaMotionDetection</a></td> <td class="colLast"> <div class="block">This class is used to process integer arrays containing Luma data and detects motion.</div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><a href="../../../../com/jwetherell/motion_detection/detection/RgbMotionDetection.html" title="class in com.jwetherell.motion_detection.detection">RgbMotionDetection</a></td> <td class="colLast"> <div class="block">This class is used to process integer arrays containing RGB data and detects motion.</div> </td> </tr> <tr class="altColor"> <td class="colFirst"><a href="../../../../com/jwetherell/motion_detection/detection/State.html" title="class in com.jwetherell.motion_detection.detection">State</a></td> <td class="colLast"> <div class="block">This class is adapted from the web site below.</div> </td> </tr> </tbody> </table> </li> </ul> </div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li class="navBarCell1Rev">Package</li> <li>Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../com/jwetherell/motion_detection/data/package-summary.html">PREV PACKAGE</a></li> <li><a href="../../../../com/jwetherell/motion_detection/image/package-summary.html">NEXT PACKAGE</a></li> </ul> <ul class="navList"> <li><a href="../../../../index.html?com/jwetherell/motion_detection/detection/package-summary.html" target="_top">FRAMES</a></li> <li><a href="package-summary.html" target="_top">NO FRAMES</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
apache-2.0
hengxin/cassandra
test/unit/org/apache/cassandra/utils/SerializationsTest.java
6082
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cassandra.utils; import java.io.DataInputStream; import java.io.IOException; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.apache.cassandra.AbstractSerializationsTester; import org.apache.cassandra.Util; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.db.DecoratedKey; import org.apache.cassandra.db.marshal.Int32Type; import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus; import org.apache.cassandra.io.util.DataOutputStreamPlus; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Murmur3Partitioner; import java.io.File; import java.io.FileInputStream; public class SerializationsTest extends AbstractSerializationsTester { @BeforeClass public static void initDD() { DatabaseDescriptor.daemonInitialization(); } private static void testBloomFilterWrite(boolean offheap) throws IOException { IPartitioner partitioner = Util.testPartitioner(); try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap)) { for (int i = 0; i < 100; i++) bf.add(partitioner.decorateKey(partitioner.getTokenFactory().toByteArray(partitioner.getRandomToken()))); try (DataOutputStreamPlus out = getOutput("3.0", "utils.BloomFilter.bin")) { FilterFactory.serialize(bf, out); } } } private static void testBloomFilterWrite1000(boolean offheap) throws IOException { try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap)) { for (int i = 0; i < 1000; i++) bf.add(Util.dk(Int32Type.instance.decompose(i))); try (DataOutputStreamPlus out = getOutput("3.0", "utils.BloomFilter1000.bin")) { FilterFactory.serialize(bf, out); } } } @Test public void testBloomFilterRead1000() throws IOException { if (EXECUTE_WRITES) testBloomFilterWrite1000(true); try (DataInputStream in = getInput("3.0", "utils.BloomFilter1000.bin"); IFilter filter = FilterFactory.deserialize(in, true)) { boolean present; for (int i = 0 ; i < 1000 ; i++) { present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i))); Assert.assertTrue(present); } for (int i = 1000 ; i < 2000 ; i++) { present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i))); Assert.assertFalse(present); } } } @Test public void testBloomFilterTable() throws Exception { testBloomFilterTable("test/data/bloom-filter/la/foo/la-1-big-Filter.db"); } private static void testBloomFilterTable(String file) throws Exception { Murmur3Partitioner partitioner = new Murmur3Partitioner(); try (DataInputStream in = new DataInputStream(new FileInputStream(new File(file))); IFilter filter = FilterFactory.deserialize(in, true)) { for (int i = 1; i <= 10; i++) { DecoratedKey decoratedKey = partitioner.decorateKey(Int32Type.instance.decompose(i)); boolean present = filter.isPresent(decoratedKey); Assert.assertTrue(present); } int positives = 0; for (int i = 11; i <= 1000010; i++) { DecoratedKey decoratedKey = partitioner.decorateKey(Int32Type.instance.decompose(i)); boolean present = filter.isPresent(decoratedKey); if (present) positives++; } double fpr = positives; fpr /= 1000000; Assert.assertTrue(fpr <= 0.011d); } } private static void testEstimatedHistogramWrite() throws IOException { EstimatedHistogram hist0 = new EstimatedHistogram(); EstimatedHistogram hist1 = new EstimatedHistogram(5000); long[] offsets = new long[1000]; long[] data = new long[offsets.length + 1]; for (int i = 0; i < offsets.length; i++) { offsets[i] = i; data[i] = 10 * i; } data[offsets.length] = 100000; EstimatedHistogram hist2 = new EstimatedHistogram(offsets, data); try (DataOutputStreamPlus out = getOutput("utils.EstimatedHistogram.bin")) { EstimatedHistogram.serializer.serialize(hist0, out); EstimatedHistogram.serializer.serialize(hist1, out); EstimatedHistogram.serializer.serialize(hist2, out); } } @Test public void testEstimatedHistogramRead() throws IOException { if (EXECUTE_WRITES) testEstimatedHistogramWrite(); try (DataInputStreamPlus in = getInput("utils.EstimatedHistogram.bin")) { Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in)); Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in)); Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in)); } } }
apache-2.0
wjiangjay/origin
pkg/gitserver/initializer.go
5915
package gitserver import ( "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "os/exec" "path/filepath" "regexp" "strings" "github.com/golang/glog" "github.com/openshift/origin/pkg/git" s2igit "github.com/openshift/source-to-image/pkg/scm/git" ) var lazyInitMatch = regexp.MustCompile("^/([^\\/]+?)/info/refs$") // lazyInitRepositoryHandler creates a handler that will initialize a Git repository // if it does not yet exist. func lazyInitRepositoryHandler(config *Config, handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { handler.ServeHTTP(w, r) return } match := lazyInitMatch.FindStringSubmatch(r.URL.Path) if match == nil { handler.ServeHTTP(w, r) return } name := match[1] if name == "." || name == ".." { handler.ServeHTTP(w, r) return } if !strings.HasSuffix(name, ".git") { name += ".git" } path := filepath.Join(config.Home, name) _, err := os.Stat(path) if !os.IsNotExist(err) { handler.ServeHTTP(w, r) return } self := RepositoryURL(config, name, r) log.Printf("Lazily initializing bare repository %s", self.String()) defaultHooks, err := loadHooks(config.HookDirectory) if err != nil { log.Printf("error: unable to load default hooks: %v", err) http.Error(w, fmt.Sprintf("unable to initialize repository: %v", err), http.StatusInternalServerError) return } // TODO: capture init hook output for Git if _, err := newRepository(config, path, defaultHooks, self, nil); err != nil { log.Printf("error: unable to initialize repo %s: %v", path, err) http.Error(w, fmt.Sprintf("unable to initialize repository: %v", err), http.StatusInternalServerError) os.RemoveAll(path) return } eventCounter.WithLabelValues(name, "init").Inc() handler.ServeHTTP(w, r) }) } // RepositoryURL creates the public URL for the named git repo. If both config.URL and // request are nil, the returned URL will be nil. func RepositoryURL(config *Config, name string, r *http.Request) *url.URL { var url url.URL switch { case config.InternalURL != nil: url = *config.InternalURL case config.URL != nil: url = *config.URL case r != nil: url = *r.URL url.Host = r.Host url.Scheme = "http" default: return nil } url.Path = "/" + name url.RawQuery = "" url.Fragment = "" return &url } func newRepository(config *Config, path string, hooks map[string]string, self *url.URL, origin *s2igit.URL) ([]byte, error) { var out []byte repo := git.NewRepositoryForBinary(config.GitBinary) barePath := path if !strings.HasSuffix(barePath, ".git") { barePath += ".git" } aliasPath := strings.TrimSuffix(barePath, ".git") if origin != nil { if err := repo.CloneMirror(barePath, origin.StringNoFragment()); err != nil { return out, err } } else { if err := repo.Init(barePath, true); err != nil { return out, err } } if self != nil { if err := repo.AddLocalConfig(barePath, "gitserver.self.url", self.String()); err != nil { return out, err } } // remove all sample hooks, ignore errors here if files, err := ioutil.ReadDir(filepath.Join(barePath, "hooks")); err == nil { for _, file := range files { os.Remove(filepath.Join(barePath, "hooks", file.Name())) } } for name, hook := range hooks { dest := filepath.Join(barePath, "hooks", name) if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { return out, err } glog.V(5).Infof("Creating hook symlink %s -> %s", dest, hook) if err := os.Symlink(hook, dest); err != nil { return out, err } } if initHook, ok := hooks["init"]; ok { glog.V(5).Infof("Init hook exists, invoking it") cmd := exec.Command(initHook) cmd.Dir = barePath result, err := cmd.CombinedOutput() glog.V(5).Infof("Init output:\n%s", result) if err != nil { return out, fmt.Errorf("init hook failed: %v\n%s", err, string(result)) } out = result } if err := os.Symlink(barePath, aliasPath); err != nil { return out, fmt.Errorf("cannot create alias path %s: %v", aliasPath, err) } return out, nil } // clone clones the provided git repositories func clone(config *Config) error { defaultHooks, err := loadHooks(config.HookDirectory) if err != nil { return err } errs := []error{} for name, v := range config.InitialClones { hooks := mergeHooks(defaultHooks, v.Hooks) path := filepath.Join(config.Home, name) ok, err := git.IsBareRoot(path) if err != nil { errs = append(errs, err) continue } if ok { if !config.CleanBeforeClone { continue } log.Printf("Removing %s", path) if err := os.RemoveAll(path); err != nil { errs = append(errs, err) continue } } log.Printf("Cloning %s into %s", v.URL.StringNoFragment(), path) self := RepositoryURL(config, name, nil) if _, err := newRepository(config, path, hooks, self, &v.URL); err != nil { // TODO: tear this directory down errs = append(errs, err) continue } } if len(errs) > 0 { s := []string{} for _, err := range errs { s = append(s, err.Error()) } return fmt.Errorf("initial clone failed:\n* %s", strings.Join(s, "\n* ")) } return nil } func loadHooks(path string) (map[string]string, error) { glog.V(5).Infof("Loading hooks from directory %s", path) hooks := make(map[string]string) if len(path) == 0 { return hooks, nil } files, err := ioutil.ReadDir(path) if err != nil { return nil, err } for _, file := range files { if file.IsDir() || (file.Mode().Perm()&0111) == 0 { continue } hook := filepath.Join(path, file.Name()) name := filepath.Base(hook) glog.V(5).Infof("Adding hook %s at %s", name, hook) hooks[name] = hook } return hooks, nil } func mergeHooks(hooks ...map[string]string) map[string]string { hook := make(map[string]string) for _, m := range hooks { for k, v := range m { hook[k] = v } } return hook }
apache-2.0
biwoodfengs/AndroidDemoProjects
VectorDrawables/app/src/androidTest/java/com/tutsplus/vectordrawables/ApplicationTest.java
359
package com.tutsplus.vectordrawables; import android.app.Application; import android.test.ApplicationTestCase; /** * <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a> */ public class ApplicationTest extends ApplicationTestCase<Application> { public ApplicationTest() { super(Application.class); } }
apache-2.0
keycloak/keycloak
testsuite/integration-arquillian/tests/base/src/main/java/org/keycloak/testsuite/util/LDAPRule.java
15403
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.testsuite.util; import org.jboss.logging.Logger; import org.junit.Assume; import org.junit.runners.model.Statement; import org.junit.runner.Description; import org.junit.rules.ExternalResource; import org.keycloak.models.LDAPConstants; import org.keycloak.util.ldap.LDAPEmbeddedServer; import java.io.File; import java.lang.annotation.Annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.util.Map; import java.util.Properties; import static org.keycloak.testsuite.utils.io.IOUtil.PROJECT_BUILD_DIRECTORY; /** * @author <a href="mailto:[email protected]">Marek Posolda</a> */ public class LDAPRule extends ExternalResource { private static final Logger log = Logger.getLogger(LDAPRule.class); // Note: Be sure to annotate the testing class with the "EnableVault" annotation // to get the necessary FilePlaintext vault created automatically for the test private static final String VAULT_EXPRESSION = "${vault.ldap_bindCredential}"; public static final String LDAP_CONNECTION_PROPERTIES_LOCATION = "classpath:ldap/ldap-connection.properties"; private static final String PROPERTY_ENABLE_ACCESS_CONTROL = "enableAccessControl"; private static final String PROPERTY_ENABLE_ANONYMOUS_ACCESS = "enableAnonymousAccess"; private static final String PROPERTY_ENABLE_SSL = "enableSSL"; private static final String PROPERTY_ENABLE_STARTTLS = "enableStartTLS"; private static final String PROPERTY_KEYSTORE_FILE = "keystoreFile"; private static final String PRIVATE_KEY = "dependency/keystore/keycloak.jks"; private static final String PROPERTY_CERTIFICATE_PASSWORD = "certificatePassword"; LDAPTestConfiguration ldapTestConfiguration; private LDAPEmbeddedServer ldapEmbeddedServer; private LDAPAssume assume; protected Properties defaultProperties = new Properties(); public LDAPRule assumeTrue(LDAPAssume assume) { this.assume = assume; return this; } @Override protected void before() throws Throwable { String connectionPropsLocation = getConnectionPropertiesLocation(); ldapTestConfiguration = LDAPTestConfiguration.readConfiguration(connectionPropsLocation); Assume.assumeTrue("Assumption in LDAPRule is false. Skiping the test", assume==null || assume.assumeTrue(ldapTestConfiguration)); if (ldapTestConfiguration.isStartEmbeddedLdapServer()) { ldapEmbeddedServer = createServer(); ldapEmbeddedServer.init(); ldapEmbeddedServer.start(); } } @Override public Statement apply(Statement base, Description description) { // Default bind credential value defaultProperties.setProperty(LDAPConstants.BIND_CREDENTIAL, "secret"); // Default values of the authentication / access control method and connection encryption to use on the embedded // LDAP server upon start if not (re)set later via the LDAPConnectionParameters annotation directly on the test defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ACCESS_CONTROL, "true"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ANONYMOUS_ACCESS, "false"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_SSL, "true"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_STARTTLS, "false"); // Default LDAP server confidentiality required value defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_SET_CONFIDENTIALITY_REQUIRED, "false"); // Don't auto-update LDAP connection URL read from properties file for LDAP over SSL case even if it's wrong // (AKA don't try to guess, let the user to get it corrected in the properties file first) defaultProperties.setProperty("AUTO_UPDATE_LDAP_CONNECTION_URL", "false"); Annotation ldapConnectionAnnotation = description.getAnnotation(LDAPConnectionParameters.class); if (ldapConnectionAnnotation != null) { // Mark the LDAP connection URL as auto-adjustable to correspond to specific annotation as necessary defaultProperties.setProperty("AUTO_UPDATE_LDAP_CONNECTION_URL", "true"); LDAPConnectionParameters connectionParameters = (LDAPConnectionParameters) ldapConnectionAnnotation; // Configure the bind credential type of the LDAP rule depending on the provided annotation arguments switch (connectionParameters.bindCredential()) { case SECRET: log.debug("Setting bind credential to secret."); defaultProperties.setProperty(LDAPConstants.BIND_CREDENTIAL, "secret"); break; case VAULT: log.debug("Setting bind credential to vault."); defaultProperties.setProperty(LDAPConstants.BIND_CREDENTIAL, VAULT_EXPRESSION); break; } // Configure the authentication method of the LDAP rule depending on the provided annotation arguments switch (connectionParameters.bindType()) { case NONE: log.debug("Enabling anonymous authentication method on the LDAP server."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ANONYMOUS_ACCESS, "true"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ACCESS_CONTROL, "false"); break; case SIMPLE: log.debug("Disabling anonymous authentication method on the LDAP server."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ANONYMOUS_ACCESS, "false"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ACCESS_CONTROL, "true"); break; } // Configure the connection encryption of the LDAP rule depending on the provided annotation arguments switch (connectionParameters.encryption()) { case NONE: log.debug("Disabling connection encryption on the LDAP server."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_SSL, "false"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_STARTTLS, "false"); break; case SSL: log.debug("Enabling SSL connection encryption on the LDAP server."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_SSL, "true"); // Require the LDAP server to accept only secured connections with SSL enabled log.debug("Configuring the LDAP server to accepts only requests with a secured connection."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_SET_CONFIDENTIALITY_REQUIRED, "true"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_STARTTLS, "false"); break; case STARTTLS: log.debug("Enabling StartTLS connection encryption on the LDAP server."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_STARTTLS, "true"); // Require the LDAP server to accept only secured connections with StartTLS enabled log.debug("Configuring the LDAP server to accepts only requests with a secured connection."); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_SET_CONFIDENTIALITY_REQUIRED, "true"); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_SSL, "false"); break; } } return super.apply(base, description); } @Override protected void after() { try { if (ldapEmbeddedServer != null) { ldapEmbeddedServer.stop(); ldapEmbeddedServer = null; ldapTestConfiguration = null; } } catch (Exception e) { throw new RuntimeException("Error tearDown Embedded LDAP server.", e); } } protected String getConnectionPropertiesLocation() { return LDAP_CONNECTION_PROPERTIES_LOCATION; } protected LDAPEmbeddedServer createServer() { defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_DSF, LDAPEmbeddedServer.DSF_INMEMORY); defaultProperties.setProperty(LDAPEmbeddedServer.PROPERTY_LDIF_FILE, "classpath:ldap/users.ldif"); defaultProperties.setProperty(PROPERTY_CERTIFICATE_PASSWORD, "secret"); defaultProperties.setProperty(PROPERTY_KEYSTORE_FILE, new File(PROJECT_BUILD_DIRECTORY, PRIVATE_KEY).getAbsolutePath()); return new LDAPEmbeddedServer(defaultProperties); } public Map<String, String> getConfig() { Map<String, String> config = ldapTestConfiguration.getLDAPConfig(); String ldapConnectionUrl = config.get(LDAPConstants.CONNECTION_URL); if (ldapConnectionUrl != null && defaultProperties.getProperty("AUTO_UPDATE_LDAP_CONNECTION_URL").equals("true")) { if ( ldapConnectionUrl.startsWith("ldap://") && defaultProperties.getProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_SSL).equals("true") ) { // Switch protocol prefix to "ldaps://" in connection URL if LDAP over SSL is requested String updatedUrl = ldapConnectionUrl.replaceAll("ldap://", "ldaps://"); // Flip port number from LDAP to LDAPS updatedUrl = updatedUrl.replaceAll( String.valueOf(ldapEmbeddedServer.getBindPort()), String.valueOf(ldapEmbeddedServer.getBindLdapsPort()) ); config.put(LDAPConstants.CONNECTION_URL, updatedUrl); log.debugf("Using LDAP over SSL \"%s\" connection URL form over: \"%s\" since SSL connection was requested.", updatedUrl, ldapConnectionUrl); } if ( ldapConnectionUrl.startsWith("ldaps://") && !defaultProperties.getProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_SSL).equals("true") ) { // Switch protocol prefix back to "ldap://" in connection URL if LDAP over SSL flag is not set String updatedUrl = ldapConnectionUrl.replaceAll("ldaps://", "ldap://"); // Flip port number from LDAPS to LDAP updatedUrl = updatedUrl.replaceAll( String.valueOf(ldapEmbeddedServer.getBindLdapsPort()), String.valueOf(ldapEmbeddedServer.getBindPort()) ); config.put(LDAPConstants.CONNECTION_URL, updatedUrl); log.debugf("Using plaintext / startTLS \"%s\" connection URL form over: \"%s\" since plaintext / startTLS connection was requested.", updatedUrl, ldapConnectionUrl); } } switch (defaultProperties.getProperty(LDAPConstants.BIND_CREDENTIAL)) { case VAULT_EXPRESSION: config.put(LDAPConstants.BIND_CREDENTIAL, VAULT_EXPRESSION); break; } switch (defaultProperties.getProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_ANONYMOUS_ACCESS)) { case "true": config.put(LDAPConstants.AUTH_TYPE, LDAPConstants.AUTH_TYPE_NONE); break; default: // Default to username + password LDAP authentication method config.put(LDAPConstants.AUTH_TYPE, LDAPConstants.AUTH_TYPE_SIMPLE); } switch (defaultProperties.getProperty(LDAPEmbeddedServer.PROPERTY_ENABLE_STARTTLS)) { case "true": config.put(LDAPConstants.START_TLS, "true"); // Use truststore from TruststoreSPI also for StartTLS connections config.put(LDAPConstants.USE_TRUSTSTORE_SPI, LDAPConstants.USE_TRUSTSTORE_ALWAYS); break; default: // Default to startTLS disabled config.put(LDAPConstants.START_TLS, "false"); // By default use truststore from TruststoreSPI only for LDAP over SSL connections config.put(LDAPConstants.USE_TRUSTSTORE_SPI, LDAPConstants.USE_TRUSTSTORE_LDAPS_ONLY); } switch (defaultProperties.getProperty(LDAPEmbeddedServer.PROPERTY_SET_CONFIDENTIALITY_REQUIRED)) { case "true": System.setProperty("PROPERTY_SET_CONFIDENTIALITY_REQUIRED", "true"); break; default: // Configure the LDAP server to accept not secured connections from clients by default System.setProperty("PROPERTY_SET_CONFIDENTIALITY_REQUIRED", "false"); } return config; } public int getSleepTime() { return ldapTestConfiguration.getSleepTime(); } public LDAPEmbeddedServer getLdapEmbeddedServer() { return ldapEmbeddedServer; } /** Allows to run particular LDAP test just under specific conditions (eg. some test running just on Active Directory) **/ public interface LDAPAssume { boolean assumeTrue(LDAPTestConfiguration ldapConfig); } @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.METHOD) public @interface LDAPConnectionParameters { // Default to secret as the bind credential unless annotated otherwise BindCredential bindCredential() default LDAPConnectionParameters.BindCredential.SECRET; // Disable anonymous LDAP authentication by default unless annotated otherwise BindType bindType() default LDAPConnectionParameters.BindType.SIMPLE; // Enable SSL encrypted LDAP connections (along with the unencrypted ones) by default unless annotated otherwise Encryption encryption() default LDAPConnectionParameters.Encryption.SSL; public enum BindCredential { SECRET, VAULT } public enum BindType { NONE, SIMPLE } public enum Encryption { NONE, // Important: Choosing either of "SSL" or "STARTTLS" connection encryption methods below // will also configure the LDAP server to accept only a secured connection from clients // (IOW plaintext client connections will be prohibited). Use those two options with care! SSL, STARTTLS } } }
apache-2.0
GlenRSmith/elasticsearch
server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java
4241
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; public class SerialDiffPipelineAggregator extends PipelineAggregator { private DocValueFormat formatter; private GapPolicy gapPolicy; private int lag; SerialDiffPipelineAggregator( String name, String[] bucketsPaths, @Nullable DocValueFormat formatter, GapPolicy gapPolicy, int lag, Map<String, Object> metadata ) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; this.lag = lag; } @Override public InternalAggregation reduce(InternalAggregation aggregation, AggregationReduceContext reduceContext) { @SuppressWarnings("rawtypes") InternalMultiBucketAggregation< ? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket> histo = (InternalMultiBucketAggregation< ? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket>) aggregation; List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = histo.getBuckets(); HistogramFactory factory = (HistogramFactory) histo; List<Bucket> newBuckets = new ArrayList<>(); EvictingQueue<Double> lagWindow = new EvictingQueue<>(lag); int counter = 0; for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) { Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); Bucket newBucket = bucket; counter += 1; // Still under the initial lag period, add nothing and move on Double lagValue; if (counter <= lag) { lagValue = Double.NaN; } else { lagValue = lagWindow.peek(); // Peek here, because we rely on add'ing to always move the window } // Normalize null's to NaN if (thisBucketValue == null) { thisBucketValue = Double.NaN; } // Both have values, calculate diff and replace the "empty" bucket if (Double.isNaN(thisBucketValue) == false && Double.isNaN(lagValue) == false) { double diff = thisBucketValue - lagValue; List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); aggs.add(new InternalSimpleValue(name(), diff, formatter, metadata())); newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), InternalAggregations.from(aggs)); } newBuckets.add(newBucket); lagWindow.add(thisBucketValue); } return factory.createAggregation(newBuckets); } }
apache-2.0
joshfriend/homebrew
Library/brew.rb
4159
std_trap = trap("INT") { exit! 130 } # no backtrace thanks require "pathname" HOMEBREW_LIBRARY_PATH = Pathname.new(__FILE__).realpath.parent.join("Homebrew") $:.unshift(HOMEBREW_LIBRARY_PATH.to_s) require "global" if ARGV == %w[--version] || ARGV == %w[-v] puts "Homebrew #{Homebrew.homebrew_version_string}" puts "Homebrew/homebrew-core #{Homebrew.core_tap_version_string}" exit 0 end if OS.mac? && MacOS.version < "10.6" abort <<-EOABORT.undent Homebrew requires Snow Leopard or higher. For Tiger and Leopard support, see: https://github.com/mistydemeo/tigerbrew EOABORT end def require?(path) require path rescue LoadError => e # HACK: ( because we should raise on syntax errors but # not if the file doesn't exist. TODO make robust! raise unless e.to_s.include? path end begin trap("INT", std_trap) # restore default CTRL-C handler empty_argv = ARGV.empty? help_flag_list = %w[-h --help --usage -? help] help_flag = false internal_cmd = true cmd = nil ARGV.dup.each_with_index do |arg, i| if help_flag && cmd break elsif help_flag_list.include? arg help_flag = true elsif !cmd cmd = ARGV.delete_at(i) end end # Add contributed commands to PATH before checking. Dir["#{HOMEBREW_LIBRARY}/Taps/*/*/cmd"].each do |tap_cmd_dir| ENV["PATH"] += "#{File::PATH_SEPARATOR}#{tap_cmd_dir}" end # Add SCM wrappers. ENV["PATH"] += "#{File::PATH_SEPARATOR}#{HOMEBREW_SHIMS_PATH}/scm" if cmd internal_cmd = require? HOMEBREW_LIBRARY_PATH.join("cmd", cmd) if !internal_cmd && ARGV.homebrew_developer? internal_cmd = require? HOMEBREW_LIBRARY_PATH.join("dev-cmd", cmd) end end # Usage instructions should be displayed if and only if one of: # - a help flag is passed AND an internal command is matched # - a help flag is passed AND there is no command specified # - no arguments are passed # # It should never affect external commands so they can handle usage # arguments themselves. if empty_argv || (help_flag && (cmd.nil? || internal_cmd)) # TODO: - `brew help cmd` should display subcommand help require "cmd/help" if empty_argv $stderr.puts ARGV.usage else puts ARGV.usage end exit ARGV.any? ? 0 : 1 end if internal_cmd Homebrew.send cmd.to_s.tr("-", "_").downcase elsif which "brew-#{cmd}" %w[CACHE LIBRARY_PATH].each do |e| ENV["HOMEBREW_#{e}"] = Object.const_get("HOMEBREW_#{e}").to_s end exec "brew-#{cmd}", *ARGV elsif (path = which("brew-#{cmd}.rb")) && require?(path) exit Homebrew.failed? ? 1 : 0 else require "tap" possible_tap = case cmd when "brewdle", "brewdler", "bundle", "bundler" Tap.fetch("Homebrew", "bundle") when "cask" Tap.fetch("caskroom", "cask") when "services" Tap.fetch("Homebrew", "services") end if possible_tap && !possible_tap.installed? brew_uid = HOMEBREW_BREW_FILE.stat.uid tap_commands = [] if Process.uid.zero? && !brew_uid.zero? tap_commands += %W[/usr/bin/sudo -u ##{brew_uid}] end tap_commands += %W[#{HOMEBREW_BREW_FILE} tap #{possible_tap}] safe_system *tap_commands exec HOMEBREW_BREW_FILE, cmd, *ARGV else onoe "Unknown command: #{cmd}" exit 1 end end rescue FormulaUnspecifiedError abort "This command requires a formula argument" rescue KegUnspecifiedError abort "This command requires a keg argument" rescue UsageError onoe "Invalid usage" abort ARGV.usage rescue SystemExit => e onoe "Kernel.exit" if ARGV.verbose? && !e.success? $stderr.puts e.backtrace if ARGV.debug? raise rescue Interrupt => e $stderr.puts # seemingly a newline is typical exit 130 rescue BuildError => e e.dump exit 1 rescue RuntimeError, SystemCallError => e raise if e.message.empty? onoe e $stderr.puts e.backtrace if ARGV.debug? exit 1 rescue Exception => e onoe e if internal_cmd $stderr.puts "#{Tty.white}Please report this bug:" $stderr.puts " #{Tty.em}#{OS::ISSUES_URL}#{Tty.reset}" end $stderr.puts e.backtrace exit 1 else exit 1 if Homebrew.failed? end
bsd-2-clause
NorthIsUp/homebrew-cask
Casks/querious.rb
255
class Querious < Cask version 'latest' sha256 :no_check url 'http://www.araelium.com/querious/downloads/Querious.dmg' appcast 'https://store.araelium.com/updates/querious' homepage 'http://www.araelium.com/querious/' link 'Querious.app' end
bsd-2-clause
bureau14/qdb-benchmark
thirdparty/boost/libs/hana/example/and.cpp
499
// Copyright Louis Dionne 2013-2016 // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) #include <boost/hana/and.hpp> #include <boost/hana/assert.hpp> #include <boost/hana/bool.hpp> namespace hana = boost::hana; BOOST_HANA_CONSTANT_CHECK(hana::and_(hana::true_c, hana::true_c, hana::true_c, hana::true_c)); static_assert(!hana::and_(hana::true_c, false, hana::true_c, hana::true_c), ""); int main() { }
bsd-2-clause
markYoungH/chromium.src
tools/perf/page_sets/big_js.py
935
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class BigJsPageSet(page_set_module.PageSet): """ Sites which load and run big JavaScript files.""" def __init__(self): super(BigJsPageSet, self).__init__( archive_data_file='data/big_js.json', bucket=page_set_module.PARTNER_BUCKET, user_agent_type='desktop') # Page sets with only one page don't work well, since we end up reusing a # renderer all the time and it keeps its memory caches alive (see # crbug.com/403735). Add a dummy second page here. urls_list = [ 'http://beta.unity3d.com/jonas/DT2/', 'http://www.foo.com', ] for url in urls_list: self.AddUserStory(page_module.Page(url, self))
bsd-3-clause
som-snytt/xsbt
sbt/src/sbt-test/source-dependencies/fbounded-existentials/fbounds.scala
495
class Dep { // The API representation for `bla`'s result type contains a cycle // (an existential's type variable's bound is the existential type itself) // This results in a stack overflow while showing the API diff. // Note that the actual result type in the compiler is not cyclic // (the f-bounded existential for Comparable is truncated) def bla(c: Boolean) = if (c) new Value else "bla" } class Value extends java.lang.Comparable[Value] { def compareTo(that: Value): Int = 1 }
bsd-3-clause
aYukiSekiguchi/ACCESS-Chromium
remoting/host/user_authenticator_mac.cc
2647
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "remoting/host/user_authenticator.h" #include <Security/Security.h> #include <string> #include "base/basictypes.h" #include "base/mac/mac_logging.h" namespace remoting { namespace { class UserAuthenticatorMac : public UserAuthenticator { public: UserAuthenticatorMac() {} virtual ~UserAuthenticatorMac() {} virtual bool Authenticate(const std::string& username, const std::string& password); private: DISALLOW_COPY_AND_ASSIGN(UserAuthenticatorMac); }; const char kAuthorizationRightName[] = "system.login.tty"; bool UserAuthenticatorMac::Authenticate(const std::string& username, const std::string& password) { // The authorization right being requested. This particular right allows // testing of a username/password, as if the user were logging on to the // system locally. AuthorizationItem right; right.name = kAuthorizationRightName; right.valueLength = 0; right.value = NULL; right.flags = 0; AuthorizationRights rights; rights.count = 1; rights.items = &right; // Passing the username/password as an "environment" parameter causes these // to be submitted to the Security Framework, instead of the interactive // password prompt appearing on the host system. Valid on OS X 10.4 and // later versions. AuthorizationItem environment_items[2]; environment_items[0].name = kAuthorizationEnvironmentUsername; environment_items[0].valueLength = username.size(); environment_items[0].value = const_cast<char*>(username.data()); environment_items[0].flags = 0; environment_items[1].name = kAuthorizationEnvironmentPassword; environment_items[1].valueLength = password.size(); environment_items[1].value = const_cast<char*>(password.data()); environment_items[1].flags = 0; AuthorizationEnvironment environment; environment.count = 2; environment.items = environment_items; OSStatus status = AuthorizationCreate(&rights, &environment, kAuthorizationFlagExtendRights, NULL); switch (status) { case errAuthorizationSuccess: return true; case errAuthorizationDenied: return false; default: OSSTATUS_LOG(ERROR, status) << "AuthorizationCreate"; return false; } } } // namespace // static UserAuthenticator* UserAuthenticator::Create() { return new UserAuthenticatorMac(); } } // namespace remoting
bsd-3-clause
Chilledheart/chromium
chrome/test/data/extensions/api_test/messaging/externally_connectable/sites/assertions.js
11033
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. (function() { // We are going to kill all of the builtins, so hold onto the ones we need. var defineGetter = Object.prototype.__defineGetter__; var defineSetter = Object.prototype.__defineSetter__; var Error = window.Error; var forEach = Array.prototype.forEach; var push = Array.prototype.push; var hasOwnProperty = Object.prototype.hasOwnProperty; var getOwnPropertyNames = Object.getOwnPropertyNames; var stringify = JSON.stringify; // Kill all of the builtins functions to give us a fairly high confidence that // the environment our bindings run in can't interfere with our code. // These are taken from the ECMAScript spec. var builtinTypes = [ Object, Function, Array, String, Boolean, Number, Math, Date, RegExp, JSON, ]; function clobber(obj, name, qualifiedName) { // Clobbering constructors would break everything. // Clobbering toString is annoying. // Clobbering __proto__ breaks in ways that grep can't find. // Clobbering function name will break because // SafeBuiltins does not support getters yet. See crbug.com/463526. // Clobbering Function.call would make it impossible to implement these tests. // Clobbering Object.valueOf breaks v8. // Clobbering %FunctionPrototype%.caller and .arguments will break because // these properties are poisoned accessors in ES6. if (name == 'constructor' || name == 'toString' || name == '__proto__' || name == 'name' && typeof obj == 'function' || qualifiedName == 'Function.call' || (obj !== Function && qualifiedName == 'Function.caller') || (obj !== Function && qualifiedName == 'Function.arguments') || qualifiedName == 'Object.valueOf') { return; } if (typeof obj[name] == 'function') { obj[name] = function() { throw new Error('Clobbered ' + qualifiedName + ' function'); }; } else { defineGetter.call(obj, name, function() { throw new Error('Clobbered ' + qualifiedName + ' getter'); }); } } forEach.call(builtinTypes, function(builtin) { var prototype = builtin.prototype; var typename = '<unknown>'; if (prototype) { typename = prototype.constructor.name; forEach.call(getOwnPropertyNames(prototype), function(name) { clobber(prototype, name, typename + '.' + name); }); } forEach.call(getOwnPropertyNames(builtin), function(name) { clobber(builtin, name, typename + '.' + name); }); if (builtin.name) clobber(window, builtin.name, 'window.' + builtin.name); }); // Codes for test results. Must match ExternallyConnectableMessagingTest::Result // in c/b/extensions/extension_messages_apitest.cc. var results = { OK: 0, NAMESPACE_NOT_DEFINED: 1, FUNCTION_NOT_DEFINED: 2, COULD_NOT_ESTABLISH_CONNECTION_ERROR: 3, OTHER_ERROR: 4, INCORRECT_RESPONSE_SENDER: 5, INCORRECT_RESPONSE_MESSAGE: 6, }; // Make the messages sent vaguely complex, but unambiguously JSON-ifiable. var kMessage = [{'a': {'b': 10}}, 20, 'c\x10\x11']; // Our tab's location. Normally this would be our document's location but if // we're an iframe it will be the location of the parent - in which case, // expect to be told. var tabLocationHref = null; if (parent == window) { tabLocationHref = document.location.href; } else { window.addEventListener('message', function listener(event) { window.removeEventListener('message', listener); tabLocationHref = event.data; }); } function checkLastError(reply) { if (!chrome.runtime.lastError) return true; var kCouldNotEstablishConnection = 'Could not establish connection. Receiving end does not exist.'; if (chrome.runtime.lastError.message == kCouldNotEstablishConnection) reply(results.COULD_NOT_ESTABLISH_CONNECTION_ERROR); else reply(results.OTHER_ERROR); return false; } function checkResponse(response, reply, expectedMessage, isApp) { // The response will be an echo of both the original message *and* the // MessageSender (with the tab field stripped down). // // First check the sender was correct. var incorrectSender = false; if (!isApp) { // Only extensions get access to a 'tab' property. if (!hasOwnProperty.call(response.sender, 'tab')) { console.warn('Expected a tab, got none'); incorrectSender = true; } if (response.sender.tab.url != tabLocationHref) { console.warn('Expected tab url ' + tabLocationHref + ' got ' + response.sender.tab.url); incorrectSender = true; } } if (hasOwnProperty.call(response.sender, 'id')) { console.warn('Expected no id, got "' + response.sender.id + '"'); incorrectSender = true; } if (response.sender.url != document.location.href) { console.warn('Expected url ' + document.location.href + ' got ' + response.sender.url); incorrectSender = true; } if (incorrectSender) { reply(results.INCORRECT_RESPONSE_SENDER); return false; } // Check the correct content was echoed. var expectedJson = stringify(expectedMessage); var actualJson = stringify(response.message); if (actualJson == expectedJson) return true; console.warn('Expected message ' + expectedJson + ' got ' + actualJson); reply(results.INCORRECT_RESPONSE_MESSAGE); return false; } function sendToBrowser(msg) { domAutomationController.send(msg); } function sendToBrowserForTlsChannelId(result) { // Because the TLS channel ID tests read the TLS either an error code or the // TLS channel ID string from the same value, they require the result code // to be sent as a string. // String() is clobbered, so coerce string creation with +. sendToBrowser("" + result); } function checkRuntime(reply) { if (!reply) reply = sendToBrowser; if (!chrome.runtime) { reply(results.NAMESPACE_NOT_DEFINED); return false; } if (!chrome.runtime.connect || !chrome.runtime.sendMessage) { reply(results.FUNCTION_NOT_DEFINED); return false; } return true; } function checkRuntimeForTlsChannelId() { return checkRuntime(sendToBrowserForTlsChannelId); } function checkTlsChannelIdResponse(response) { if (chrome.runtime.lastError) { if (chrome.runtime.lastError.message == kCouldNotEstablishConnection) sendToBrowserForTlsChannelId( results.COULD_NOT_ESTABLISH_CONNECTION_ERROR); else sendToBrowserForTlsChannelId(results.OTHER_ERROR); return; } if (response.sender.tlsChannelId !== undefined) sendToBrowserForTlsChannelId(response.sender.tlsChannelId); else sendToBrowserForTlsChannelId(''); } window.actions = { appendIframe: function(src) { var iframe = document.createElement('iframe'); // When iframe has loaded, notify it of our tab location (probably // document.location) to use in its assertions, then continue. iframe.addEventListener('load', function listener() { iframe.removeEventListener('load', listener); iframe.contentWindow.postMessage(tabLocationHref, '*'); sendToBrowser(true); }); iframe.src = src; document.body.appendChild(iframe); } }; window.assertions = { canConnectAndSendMessages: function(extensionId, isApp, message) { if (!checkRuntime()) return; if (!message) message = kMessage; function canSendMessage(reply) { chrome.runtime.sendMessage(extensionId, message, function(response) { if (checkLastError(reply) && checkResponse(response, reply, message, isApp)) { reply(results.OK); } }); } function canConnectAndSendMessages(reply) { var port = chrome.runtime.connect(extensionId); port.postMessage(message, function() { checkLastError(reply); }); port.postMessage(message, function() { checkLastError(reply); }); var pendingResponses = 2; var ok = true; port.onMessage.addListener(function(response) { pendingResponses--; ok = ok && checkLastError(reply) && checkResponse(response, reply, message, isApp); if (pendingResponses == 0 && ok) reply(results.OK); }); } canSendMessage(function(result) { if (result != results.OK) sendToBrowser(result); else canConnectAndSendMessages(sendToBrowser); }); }, trySendMessage: function(extensionId) { chrome.runtime.sendMessage(extensionId, kMessage, function(response) { // The result is unimportant. All that matters is the attempt. }); }, tryIllegalArguments: function() { // Tests that illegal arguments to messaging functions throw exceptions. // Regression test for crbug.com/472700, where they crashed the renderer. function runIllegalFunction(fun) { try { fun(); } catch(e) { return true; } console.error('Function did not throw exception: ' + fun); sendToBrowser(false); return false; } var result = runIllegalFunction(chrome.runtime.connect) && runIllegalFunction(function() { chrome.runtime.connect(''); }) && runIllegalFunction(function() { chrome.runtime.connect(42); }) && runIllegalFunction(function() { chrome.runtime.connect('', 42); }) && runIllegalFunction(function() { chrome.runtime.connect({name: 'noname'}); }) && runIllegalFunction(chrome.runtime.sendMessage) && runIllegalFunction(function() { chrome.runtime.sendMessage(''); }) && runIllegalFunction(function() { chrome.runtime.sendMessage(42); }) && runIllegalFunction(function() { chrome.runtime.sendMessage('', 42); }) && sendToBrowser(true); }, areAnyRuntimePropertiesDefined: function(names) { var result = false; if (chrome.runtime) { forEach.call(names, function(name) { if (chrome.runtime[name]) { console.log('runtime.' + name + ' is defined'); result = true; } }); } sendToBrowser(result); }, getTlsChannelIdFromPortConnect: function(extensionId, includeTlsChannelId, message) { if (!checkRuntimeForTlsChannelId()) return; if (!message) message = kMessage; var port = chrome.runtime.connect(extensionId, {'includeTlsChannelId': includeTlsChannelId}); port.onMessage.addListener(checkTlsChannelIdResponse); port.postMessage(message); }, getTlsChannelIdFromSendMessage: function(extensionId, includeTlsChannelId, message) { if (!checkRuntimeForTlsChannelId()) return; if (!message) message = kMessage; chrome.runtime.sendMessage(extensionId, message, {'includeTlsChannelId': includeTlsChannelId}, checkTlsChannelIdResponse); } }; }());
bsd-3-clause
axinging/chromium-crosswalk
sync/engine/backoff_delay_provider.h
1935
// Copyright 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SYNC_ENGINE_BACKOFF_DELAY_PROVIDER_H_ #define SYNC_ENGINE_BACKOFF_DELAY_PROVIDER_H_ #include "base/macros.h" #include "base/time/time.h" #include "sync/base/sync_export.h" namespace syncer { namespace sessions { struct ModelNeutralState; } // A component used to get time delays associated with exponential backoff. class SYNC_EXPORT BackoffDelayProvider { public: // Factory function to create a standard BackoffDelayProvider. static BackoffDelayProvider* FromDefaults(); // Similar to above, but causes sync to retry very quickly (see // polling_constants.h) when it encounters an error before exponential // backoff. // // *** NOTE *** This should only be used if kSyncShortInitialRetryOverride // was passed to command line. static BackoffDelayProvider* WithShortInitialRetryOverride(); virtual ~BackoffDelayProvider(); // DDOS avoidance function. Calculates how long we should wait before trying // again after a failed sync attempt, where the last delay was |base_delay|. // TODO(tim): Look at URLRequestThrottlerEntryInterface. virtual base::TimeDelta GetDelay(const base::TimeDelta& last_delay); // Helper to calculate the initial value for exponential backoff. // See possible values and comments in polling_constants.h. virtual base::TimeDelta GetInitialDelay( const sessions::ModelNeutralState& state) const; protected: BackoffDelayProvider(const base::TimeDelta& default_initial_backoff, const base::TimeDelta& short_initial_backoff); private: const base::TimeDelta default_initial_backoff_; const base::TimeDelta short_initial_backoff_; DISALLOW_COPY_AND_ASSIGN(BackoffDelayProvider); }; } // namespace syncer #endif // SYNC_ENGINE_BACKOFF_DELAY_PROVIDER_H_
bsd-3-clause
Honry/crosswalk-test-suite
webapi/webapi-appsecurity-external-tests/appsecurityapi/AppSecurityApi_getCreator_correct_instance.html
2771
<!DOCTYPE html> <!-- Copyright (c) 2015 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Zhu, YongyongX <[email protected]> --> <meta charset='utf-8'> <title>Secure Data Test: AppSecurityApi_getCreator_correct_instance</title> <link rel="author" title="Intel" href="http://www.intel.com/"> <link rel="help" href="https://software.intel.com/en-us/app-security-api/api"> <script src="../resources/testharness.js"></script> <script src="../resources/testharnessreport.js"></script> <script src="../../../cordova.js"></script> <script src="js/appSecurityApi.js"></script> <script src="js/q.js"></script> <div id="log"></div> <script type="text/javascript"> setTimeout(function() { test(); }, 500); function test() { async_test(function(t) { intel.security.secureData.createFromData({'data': 'plaintext data', 'tag': 'Test', 'extraKey': 0, 'appAccessControl': 0, 'deviceLocality': 0, 'sensitivityLevel': 0, 'noStore': false, 'noRead': false, 'creator': 0, 'owners': [0], 'webOwners': []}) .then(intel.security.secureData.getCreator) .then(function(creator) { t.step(function() { assert_equals(creator, 0); }); t.done(); }) .catch(function (errorObj) { t.step(function() { assert_true(false, 'fail:code = ' + errorObj.code + ', message = ' + errorObj.message); }); t.done(); }); }, document.title); } </script>
bsd-3-clause
js0701/chromium-crosswalk
chrome/browser/google/google_update_win.h
5384
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_GOOGLE_GOOGLE_UPDATE_WIN_H_ #define CHROME_BROWSER_GOOGLE_GOOGLE_UPDATE_WIN_H_ #include "base/callback_forward.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/strings/string16.h" #include "base/win/scoped_comptr.h" #include "google_update/google_update_idl.h" #include "ui/gfx/native_widget_types.h" namespace base { class SingleThreadTaskRunner; } // namespace base // These values are used for a histogram. Do not reorder. enum GoogleUpdateErrorCode { // The upgrade completed successfully (or hasn't been started yet). GOOGLE_UPDATE_NO_ERROR = 0, // Google Update only supports upgrading if Chrome is installed in the default // location. This error will appear for developer builds and with // installations unzipped to random locations. CANNOT_UPGRADE_CHROME_IN_THIS_DIRECTORY = 1, // Failed to create Google Update JobServer COM class. DEPRECATED. // GOOGLE_UPDATE_JOB_SERVER_CREATION_FAILED = 2, // Failed to create Google Update OnDemand COM class. GOOGLE_UPDATE_ONDEMAND_CLASS_NOT_FOUND = 3, // Google Update OnDemand COM class reported an error during a check for // update (or while upgrading). GOOGLE_UPDATE_ONDEMAND_CLASS_REPORTED_ERROR = 4, // A call to GetResults failed. DEPRECATED. // GOOGLE_UPDATE_GET_RESULT_CALL_FAILED = 5, // A call to GetVersionInfo failed. DEPRECATED // GOOGLE_UPDATE_GET_VERSION_INFO_FAILED = 6, // An error occurred while upgrading (or while checking for update). // Check the Google Update log in %TEMP% for more details. GOOGLE_UPDATE_ERROR_UPDATING = 7, // Updates can not be downloaded because the administrator has disabled all // types of updating. GOOGLE_UPDATE_DISABLED_BY_POLICY = 8, // Updates can not be downloaded because the administrator has disabled // manual (on-demand) updates. Automatic background updates are allowed. GOOGLE_UPDATE_DISABLED_BY_POLICY_AUTO_ONLY = 9, NUM_ERROR_CODES }; // A delegate by which a caller of BeginUpdateCheck is notified of the status // and results of an update check. class UpdateCheckDelegate { public: virtual ~UpdateCheckDelegate() {} // Invoked following a successful update check. |new_version|, if not empty, // indicates the new version that is available. Otherwise (if |new_version| is // empty), Chrome is up to date. This method will only be invoked when // BeginUpdateCheck is called with |install_update_if_possible| == false. virtual void OnUpdateCheckComplete(const base::string16& new_version) = 0; // Invoked zero or more times during an upgrade. |progress|, a number between // 0 and 100 (inclusive), is an estimation as to what percentage of the // upgrade has completed. |new_version| indicates the version that is being // download and installed. This method will only be invoked when // BeginUpdateCheck is called with |install_update_if_possible| == true. virtual void OnUpgradeProgress(int progress, const base::string16& new_version) = 0; // Invoked following a successful upgrade. |new_version| indicates the version // to which Chrome was updated. This method will only be invoked when // BeginUpdateCheck is called with |install_update_if_possible| == true. virtual void OnUpgradeComplete(const base::string16& new_version) = 0; // Invoked following an unrecoverable error, indicated by |error_code|. // |html_error_message|, if not empty, must be a localized string containing // all information required by users to act on the error as well as for // support staff to diagnose it (i.e. |error_code| and any other related // state information). |new_version|, if not empty, indicates the version // to which an upgrade attempt was made. virtual void OnError(GoogleUpdateErrorCode error_code, const base::string16& html_error_message, const base::string16& new_version) = 0; protected: UpdateCheckDelegate() {} }; // Begins an asynchronous update check on |task_runner|. If a new version is // available and |install_update_if_possible| is true, the new version will be // automatically downloaded and installed. |elevation_window| is the window // which should own any necessary elevation UI. Methods on |delegate| will be // invoked on the caller's thread to provide feedback on the operation, with // messages localized to |locale| if possible. void BeginUpdateCheck( const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, const std::string& locale, bool install_update_if_possible, gfx::AcceleratedWidget elevation_window, const base::WeakPtr<UpdateCheckDelegate>& delegate); // A type of callback supplied by tests to provide a custom IGoogleUpdate3Web // implementation (see src/google_update/google_update_idl.idl). typedef base::Callback<HRESULT(base::win::ScopedComPtr<IGoogleUpdate3Web>*)> GoogleUpdate3ClassFactory; // For use by tests that wish to provide a custom IGoogleUpdate3Web // implementation independent of Google Update's. void SetGoogleUpdateFactoryForTesting( const GoogleUpdate3ClassFactory& google_update_factory); #endif // CHROME_BROWSER_GOOGLE_GOOGLE_UPDATE_WIN_H_
bsd-3-clause
annatisch/autorest
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyFormData/autorestswaggerbatformdataservice/operations/formdata_operations.py
5590
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from .. import models class FormdataOperations(object): """FormdataOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def upload_file( self, file_content, file_name, custom_headers=None, raw=False, callback=None, **operation_config): """Upload file. :param file_content: File to upload. :type file_content: Generator :param file_name: File name to upload. Name has to be spelled exactly as written here. :type file_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: Generator :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestsbodyformdata.models.ErrorException>` """ # Construct URL url = '/formdata/stream/uploadfile' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'multipart/form-data' if custom_headers: header_parameters.update(custom_headers) # Construct form data form_data_content = { 'fileContent': file_content, 'fileName': file_name, } # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send_formdata( request, header_parameters, form_data_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._client.stream_download(response, callback) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def upload_file_via_body( self, file_content, custom_headers=None, raw=False, callback=None, **operation_config): """Upload file. :param file_content: File to upload. :type file_content: Generator :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: Generator :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestsbodyformdata.models.ErrorException>` """ # Construct URL url = '/formdata/stream/uploadfile' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(file_content, callback) # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._client.stream_download(response, callback) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
mit
odajima-yu/closure-rails
vendor/assets/javascripts/goog/events/eventtargettester.js
32410
// Copyright 2012 The Closure Library Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @fileoverview goog.events.EventTarget tester. */ goog.provide('goog.events.eventTargetTester'); goog.setTestOnly('goog.events.eventTargetTester'); goog.provide('goog.events.eventTargetTester.KeyType'); goog.setTestOnly('goog.events.eventTargetTester.KeyType'); goog.provide('goog.events.eventTargetTester.UnlistenReturnType'); goog.setTestOnly('goog.events.eventTargetTester.UnlistenReturnType'); goog.require('goog.array'); goog.require('goog.events'); goog.require('goog.events.Event'); goog.require('goog.events.EventTarget'); goog.require('goog.testing.asserts'); goog.require('goog.testing.recordFunction'); /** * Setup step for the test functions. This needs to be called from the * test setUp. * @param {Function} listenFn Function that, given the same signature * as goog.events.listen, will add listener to the given event * target. * @param {Function} unlistenFn Function that, given the same * signature as goog.events.unlisten, will remove listener from * the given event target. * @param {Function} unlistenByKeyFn Function that, given 2 * parameters: src and key, will remove the corresponding * listener. * @param {Function} listenOnceFn Function that, given the same * signature as goog.events.listenOnce, will add a one-time * listener to the given event target. * @param {Function} dispatchEventFn Function that, given the same * signature as goog.events.dispatchEvent, will dispatch the event * on the given event target. * @param {Function} removeAllFn Function that, given the same * signature as goog.events.removeAll, will remove all listeners * according to the contract of goog.events.removeAll. * @param {Function} getListenersFn Function that, given the same * signature as goog.events.getListeners, will retrieve listeners. * @param {Function} getListenerFn Function that, given the same * signature as goog.events.getListener, will retrieve the * listener object. * @param {Function} hasListenerFn Function that, given the same * signature as goog.events.hasListener, will determine whether * listeners exist. * @param {goog.events.eventTargetTester.KeyType} listenKeyType The * key type returned by listen call. * @param {goog.events.eventTargetTester.UnlistenReturnType} * unlistenFnReturnType * Whether we should check return value from * unlisten call. If unlisten does not return a value, this should * be set to false. * @param {boolean} objectListenerSupported Whether listener of type * Object is supported. */ goog.events.eventTargetTester.setUp = function( listenFn, unlistenFn, unlistenByKeyFn, listenOnceFn, dispatchEventFn, removeAllFn, getListenersFn, getListenerFn, hasListenerFn, listenKeyType, unlistenFnReturnType, objectListenerSupported) { listen = listenFn; unlisten = unlistenFn; unlistenByKey = unlistenByKeyFn; listenOnce = listenOnceFn; dispatchEvent = dispatchEventFn; removeAll = removeAllFn; getListeners = getListenersFn; getListener = getListenerFn; hasListener = hasListenerFn; keyType = listenKeyType; unlistenReturnType = unlistenFnReturnType; objectTypeListenerSupported = objectListenerSupported; listeners = []; for (var i = 0; i < goog.events.eventTargetTester.MAX_; i++) { listeners[i] = createListener(); } eventTargets = []; for (i = 0; i < goog.events.eventTargetTester.MAX_; i++) { eventTargets[i] = new goog.events.EventTarget(); } }; /** * Teardown step for the test functions. This needs to be called from * test teardown. */ goog.events.eventTargetTester.tearDown = function() { for (var i = 0; i < goog.events.eventTargetTester.MAX_; i++) { goog.dispose(eventTargets[i]); } }; /** * The type of key returned by key-returning functions (listen). * @enum {number} */ goog.events.eventTargetTester.KeyType = { /** * Returns number for key. */ NUMBER: 0, /** * Returns undefined (no return value). */ UNDEFINED: 1 }; /** * The type of unlisten function's return value. */ goog.events.eventTargetTester.UnlistenReturnType = { /** * Returns boolean indicating whether unlisten is successful. */ BOOLEAN: 0, /** * Returns undefind (no return value). */ UNDEFINED: 1 }; /** * Expando property used on "listener" function to determine if a * listener has already been checked. This is what allows us to * implement assertNoOtherListenerIsCalled. * @type {string} */ goog.events.eventTargetTester.ALREADY_CHECKED_PROP = '__alreadyChecked'; /** * Expando property used on "listener" function to record the number * of times it has been called the last time assertListenerIsCalled is * done. This allows us to verify that it has not been called more * times in assertNoOtherListenerIsCalled. */ goog.events.eventTargetTester.NUM_CALLED_PROP = '__numCalled'; /** * The maximum number of initialized event targets (in eventTargets * array) and listeners (in listeners array). * @type {number} * @private */ goog.events.eventTargetTester.MAX_ = 10; /** * Contains test event types. * @enum {string} */ var EventType = { A: goog.events.getUniqueId('a'), B: goog.events.getUniqueId('b'), C: goog.events.getUniqueId('c') }; var listen, unlisten, unlistenByKey, listenOnce, dispatchEvent; var removeAll, getListeners, getListener, hasListener; var keyType, unlistenReturnType, objectTypeListenerSupported; var eventTargets, listeners; /** * Custom event object for testing. * @constructor * @extends {goog.events.Event} */ var TestEvent = function() { goog.base(this, EventType.A); }; goog.inherits(TestEvent, goog.events.Event); /** * Creates a listener that executes the given function (optional). * @param {!Function=} opt_listenerFn The optional function to execute. * @return {!Function} The listener function. */ function createListener(opt_listenerFn) { return goog.testing.recordFunction(opt_listenerFn); } /** * Asserts that the given listener is called numCount number of times. * @param {!Function} listener The listener to check. * @param {number} numCount The number of times. See also the times() * function below. */ function assertListenerIsCalled(listener, numCount) { assertEquals('Listeners is not called the correct number of times.', numCount, listener.getCallCount()); listener[goog.events.eventTargetTester.ALREADY_CHECKED_PROP] = true; listener[goog.events.eventTargetTester.NUM_CALLED_PROP] = numCount; } /** * Asserts that no other listeners, other than those verified via * assertListenerIsCalled, have been called since the last * resetListeners(). */ function assertNoOtherListenerIsCalled() { goog.array.forEach(listeners, function(l, index) { if (!l[goog.events.eventTargetTester.ALREADY_CHECKED_PROP]) { assertEquals( 'Listeners ' + index + ' is unexpectedly called.', 0, l.getCallCount()); } else { assertEquals( 'Listeners ' + index + ' is unexpectedly called.', l[goog.events.eventTargetTester.NUM_CALLED_PROP], l.getCallCount()); } }); } /** * Resets all listeners call count to 0. */ function resetListeners() { goog.array.forEach(listeners, function(l) { l.reset(); l[goog.events.eventTargetTester.ALREADY_CHECKED_PROP] = false; }); } /** * The number of times a listener should have been executed. This * exists to make assertListenerIsCalled more readable. This is used * like so: assertListenerIsCalled(listener, times(2)); * @param {number} n The number of times a listener should have been * executed. * @return {number} The number n. */ function times(n) { return n; } function testNoListener() { dispatchEvent(eventTargets[0], EventType.A); assertNoOtherListenerIsCalled(); } function testOneListener() { listen(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertNoOtherListenerIsCalled(); resetListeners(); dispatchEvent(eventTargets[0], EventType.B); dispatchEvent(eventTargets[0], EventType.C); assertNoOtherListenerIsCalled(); } function testTwoListenersOfSameType() { var key1 = listen(eventTargets[0], EventType.A, listeners[0]); var key2 = listen(eventTargets[0], EventType.A, listeners[1]); if (keyType == goog.events.eventTargetTester.KeyType.NUMBER) { assertNotEquals(key1, key2); } else { assertUndefined(key1); assertUndefined(key2); } dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertNoOtherListenerIsCalled(); } function testInstallingSameListeners() { var key1 = listen(eventTargets[0], EventType.A, listeners[0]); var key2 = listen(eventTargets[0], EventType.A, listeners[0]); var key3 = listen(eventTargets[0], EventType.B, listeners[0]); if (keyType == goog.events.eventTargetTester.KeyType.NUMBER) { assertEquals(key1, key2); assertNotEquals(key1, key3); } else { assertUndefined(key1); assertUndefined(key2); assertUndefined(key3); } dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); dispatchEvent(eventTargets[0], EventType.B); assertListenerIsCalled(listeners[0], times(2)); assertNoOtherListenerIsCalled(); } function testScope() { listeners[0] = createListener(function(e) { assertEquals('Wrong scope with undefined scope', eventTargets[0], this); }); listeners[1] = createListener(function(e) { assertEquals('Wrong scope with null scope', eventTargets[0], this); }); var scope = {}; listeners[2] = createListener(function(e) { assertEquals('Wrong scope with specific scope object', scope, this); }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1], false, null); listen(eventTargets[0], EventType.A, listeners[2], false, scope); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(1)); } function testDispatchEventDoesNotThrowWithDisposedEventTarget() { goog.dispose(eventTargets[0]); assertTrue(dispatchEvent(eventTargets[0], EventType.A)); } function testDispatchEventWithObjectLiteral() { listen(eventTargets[0], EventType.A, listeners[0]); assertTrue(dispatchEvent(eventTargets[0], {type: EventType.A})); assertListenerIsCalled(listeners[0], times(1)); assertNoOtherListenerIsCalled(); } function testDispatchEventWithCustomEventObject() { listen(eventTargets[0], EventType.A, listeners[0]); var e = new TestEvent(); assertTrue(dispatchEvent(eventTargets[0], e)); assertListenerIsCalled(listeners[0], times(1)); assertNoOtherListenerIsCalled(); var actualEvent = listeners[0].getLastCall().getArgument(0); assertEquals(e, actualEvent); assertEquals(eventTargets[0], actualEvent.target); } function testDisposingEventTargetRemovesListeners() { listen(eventTargets[0], EventType.A, listeners[0]); goog.dispose(eventTargets[0]); dispatchEvent(eventTargets[0], EventType.A); assertNoOtherListenerIsCalled(); } /** * Unlisten/unlistenByKey should still work after disposal. There are * many circumstances when this is actually necessary. For example, a * user may have listened to an event target and stored the key * (e.g. in a goog.events.EventHandler) and only unlisten after the * target has been disposed. */ function testUnlistenWorksAfterDisposal() { var key = listen(eventTargets[0], EventType.A, listeners[0]); goog.dispose(eventTargets[0]); unlisten(eventTargets[0], EventType.A, listeners[1]); if (unlistenByKey) { unlistenByKey(eventTargets[0], key); } } function testRemovingListener() { var ret1 = unlisten(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[0]); var ret2 = unlisten(eventTargets[0], EventType.A, listeners[1]); var ret3 = unlisten(eventTargets[0], EventType.B, listeners[0]); var ret4 = unlisten(eventTargets[1], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); var ret5 = unlisten(eventTargets[0], EventType.A, listeners[0]); var ret6 = unlisten(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertNoOtherListenerIsCalled(); if (unlistenReturnType == goog.events.eventTargetTester.UnlistenReturnType.BOOLEAN) { assertFalse(ret1); assertFalse(ret2); assertFalse(ret3); assertFalse(ret4); assertTrue(ret5); assertFalse(ret6); } else { assertUndefined(ret1); assertUndefined(ret2); assertUndefined(ret3); assertUndefined(ret4); assertUndefined(ret5); assertUndefined(ret6); } } function testCapture() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); eventTargets[9].setParentEventTarget(eventTargets[0]); var ordering = 0; listeners[0] = createListener( function(e) { assertEquals(eventTargets[2], e.currentTarget); assertEquals(eventTargets[0], e.target); assertEquals('First capture listener is not called first', 0, ordering); ordering++; }); listeners[1] = createListener( function(e) { assertEquals(eventTargets[1], e.currentTarget); assertEquals(eventTargets[0], e.target); assertEquals('2nd capture listener is not called 2nd', 1, ordering); ordering++; }); listeners[2] = createListener( function(e) { assertEquals(eventTargets[0], e.currentTarget); assertEquals(eventTargets[0], e.target); assertEquals('3rd capture listener is not called 3rd', 2, ordering); ordering++; }); listen(eventTargets[2], EventType.A, listeners[0], true); listen(eventTargets[1], EventType.A, listeners[1], true); listen(eventTargets[0], EventType.A, listeners[2], true); // These should not be called. listen(eventTargets[3], EventType.A, listeners[3], true); listen(eventTargets[0], EventType.B, listeners[4], true); listen(eventTargets[0], EventType.C, listeners[5], true); listen(eventTargets[1], EventType.B, listeners[6], true); listen(eventTargets[1], EventType.C, listeners[7], true); listen(eventTargets[2], EventType.B, listeners[8], true); listen(eventTargets[2], EventType.C, listeners[9], true); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(1)); assertNoOtherListenerIsCalled(); } function testBubble() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); eventTargets[9].setParentEventTarget(eventTargets[0]); var ordering = 0; listeners[0] = createListener( function(e) { assertEquals(eventTargets[0], e.currentTarget); assertEquals(eventTargets[0], e.target); assertEquals('First bubble listener is not called first', 0, ordering); ordering++; }); listeners[1] = createListener( function(e) { assertEquals(eventTargets[1], e.currentTarget); assertEquals(eventTargets[0], e.target); assertEquals('2nd bubble listener is not called 2nd', 1, ordering); ordering++; }); listeners[2] = createListener( function(e) { assertEquals(eventTargets[2], e.currentTarget); assertEquals(eventTargets[0], e.target); assertEquals('3rd bubble listener is not called 3rd', 2, ordering); ordering++; }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[1], EventType.A, listeners[1]); listen(eventTargets[2], EventType.A, listeners[2]); // These should not be called. listen(eventTargets[3], EventType.A, listeners[3]); listen(eventTargets[0], EventType.B, listeners[4]); listen(eventTargets[0], EventType.C, listeners[5]); listen(eventTargets[1], EventType.B, listeners[6]); listen(eventTargets[1], EventType.C, listeners[7]); listen(eventTargets[2], EventType.B, listeners[8]); listen(eventTargets[2], EventType.C, listeners[9]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(1)); assertNoOtherListenerIsCalled(); } function testCaptureAndBubble() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); listen(eventTargets[0], EventType.A, listeners[0], true); listen(eventTargets[1], EventType.A, listeners[1], true); listen(eventTargets[2], EventType.A, listeners[2], true); listen(eventTargets[0], EventType.A, listeners[3]); listen(eventTargets[1], EventType.A, listeners[4]); listen(eventTargets[2], EventType.A, listeners[5]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(1)); assertListenerIsCalled(listeners[3], times(1)); assertListenerIsCalled(listeners[4], times(1)); assertListenerIsCalled(listeners[5], times(1)); assertNoOtherListenerIsCalled(); } function testPreventDefaultByReturningFalse() { listeners[0] = createListener(function(e) { return false; }); listeners[1] = createListener(function(e) { return true; }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1]); var result = dispatchEvent(eventTargets[0], EventType.A); assertFalse(result); } function testPreventDefault() { listeners[0] = createListener(function(e) { e.preventDefault(); }); listeners[1] = createListener(function(e) { return true; }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1]); var result = dispatchEvent(eventTargets[0], EventType.A); assertFalse(result); } function testPreventDefaultAtCapture() { listeners[0] = createListener(function(e) { e.preventDefault(); }); listeners[1] = createListener(function(e) { return true; }); listen(eventTargets[0], EventType.A, listeners[0], true); listen(eventTargets[0], EventType.A, listeners[1], true); var result = dispatchEvent(eventTargets[0], EventType.A); assertFalse(result); } function testStopPropagation() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); listeners[0] = createListener(function(e) { e.stopPropagation(); }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1]); listen(eventTargets[1], EventType.A, listeners[2]); listen(eventTargets[2], EventType.A, listeners[3]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertNoOtherListenerIsCalled(); } function testStopPropagation2() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); listeners[1] = createListener(function(e) { e.stopPropagation(); }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1]); listen(eventTargets[1], EventType.A, listeners[2]); listen(eventTargets[2], EventType.A, listeners[3]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertNoOtherListenerIsCalled(); } function testStopPropagation3() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); listeners[2] = createListener(function(e) { e.stopPropagation(); }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1]); listen(eventTargets[1], EventType.A, listeners[2]); listen(eventTargets[2], EventType.A, listeners[3]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(1)); assertNoOtherListenerIsCalled(); } function testStopPropagationAtCapture() { eventTargets[0].setParentEventTarget(eventTargets[1]); eventTargets[1].setParentEventTarget(eventTargets[2]); listeners[0] = createListener(function(e) { e.stopPropagation(); }); listen(eventTargets[2], EventType.A, listeners[0], true); listen(eventTargets[1], EventType.A, listeners[1], true); listen(eventTargets[0], EventType.A, listeners[2], true); listen(eventTargets[0], EventType.A, listeners[3]); listen(eventTargets[1], EventType.A, listeners[4]); listen(eventTargets[2], EventType.A, listeners[5]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertNoOtherListenerIsCalled(); } function testHandleEvent() { if (!objectTypeListenerSupported) { return; } var obj = {}; obj.handleEvent = goog.testing.recordFunction(); listen(eventTargets[0], EventType.A, obj); dispatchEvent(eventTargets[0], EventType.A); assertEquals(1, obj.handleEvent.getCallCount()); } function testListenOnce() { if (!listenOnce) { return; } listenOnce(eventTargets[0], EventType.A, listeners[0], true); listenOnce(eventTargets[0], EventType.A, listeners[1]); listenOnce(eventTargets[0], EventType.B, listeners[2]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(0)); assertNoOtherListenerIsCalled(); resetListeners(); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(0)); assertListenerIsCalled(listeners[1], times(0)); assertListenerIsCalled(listeners[2], times(0)); dispatchEvent(eventTargets[0], EventType.B); assertListenerIsCalled(listeners[2], times(1)); assertNoOtherListenerIsCalled(); } function testUnlistenInListen() { listeners[1] = createListener( function(e) { unlisten(eventTargets[0], EventType.A, listeners[1]); unlisten(eventTargets[0], EventType.A, listeners[2]); }); listen(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[1]); listen(eventTargets[0], EventType.A, listeners[2]); listen(eventTargets[0], EventType.A, listeners[3]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(0)); assertListenerIsCalled(listeners[3], times(1)); assertNoOtherListenerIsCalled(); resetListeners(); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(0)); assertListenerIsCalled(listeners[2], times(0)); assertListenerIsCalled(listeners[3], times(1)); assertNoOtherListenerIsCalled(); } function testUnlistenByKeyInListen() { if (!unlistenByKey) { return; } var key1, key2; listeners[1] = createListener( function(e) { unlistenByKey(eventTargets[0], key1); unlistenByKey(eventTargets[0], key2); }); listen(eventTargets[0], EventType.A, listeners[0]); key1 = listen(eventTargets[0], EventType.A, listeners[1]); key2 = listen(eventTargets[0], EventType.A, listeners[2]); listen(eventTargets[0], EventType.A, listeners[3]); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(0)); assertListenerIsCalled(listeners[3], times(1)); assertNoOtherListenerIsCalled(); resetListeners(); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(0)); assertListenerIsCalled(listeners[2], times(0)); assertListenerIsCalled(listeners[3], times(1)); assertNoOtherListenerIsCalled(); } function testSetParentEventTarget() { assertNull(eventTargets[0].getParentEventTarget()); eventTargets[0].setParentEventTarget(eventTargets[1]); assertEquals(eventTargets[1], eventTargets[0].getParentEventTarget()); assertNull(eventTargets[1].getParentEventTarget()); eventTargets[0].setParentEventTarget(null); assertNull(eventTargets[0].getParentEventTarget()); } function testListenOnceAfterListenDoesNotChangeExistingListener() { if (!listenOnce) { return; } listen(eventTargets[0], EventType.A, listeners[0]); listenOnce(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(3)); assertNoOtherListenerIsCalled(); } function testListenOnceAfterListenOnceDoesNotChangeExistingListener() { if (!listenOnce) { return; } listenOnce(eventTargets[0], EventType.A, listeners[0]); listenOnce(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(1)); assertNoOtherListenerIsCalled(); } function testListenAfterListenOnceRemoveOnceness() { if (!listenOnce) { return; } listenOnce(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.A); assertListenerIsCalled(listeners[0], times(3)); assertNoOtherListenerIsCalled(); } function testUnlistenAfterListenOnce() { if (!listenOnce) { return; } listenOnce(eventTargets[0], EventType.A, listeners[0]); unlisten(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); listen(eventTargets[0], EventType.A, listeners[0]); listenOnce(eventTargets[0], EventType.A, listeners[0]); unlisten(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); listenOnce(eventTargets[0], EventType.A, listeners[0]); listen(eventTargets[0], EventType.A, listeners[0]); unlisten(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); listenOnce(eventTargets[0], EventType.A, listeners[0]); listenOnce(eventTargets[0], EventType.A, listeners[0]); unlisten(eventTargets[0], EventType.A, listeners[0]); dispatchEvent(eventTargets[0], EventType.A); assertNoOtherListenerIsCalled(); } function testRemoveAllWithType() { if (!removeAll) { return; } listen(eventTargets[0], EventType.A, listeners[0], true); listen(eventTargets[0], EventType.A, listeners[1]); listen(eventTargets[0], EventType.C, listeners[2], true); listen(eventTargets[0], EventType.C, listeners[3]); listen(eventTargets[0], EventType.B, listeners[4], true); listen(eventTargets[0], EventType.B, listeners[5], true); listen(eventTargets[0], EventType.B, listeners[6]); listen(eventTargets[0], EventType.B, listeners[7]); assertEquals(4, removeAll(eventTargets[0], EventType.B)); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.B); dispatchEvent(eventTargets[0], EventType.C); assertListenerIsCalled(listeners[0], times(1)); assertListenerIsCalled(listeners[1], times(1)); assertListenerIsCalled(listeners[2], times(1)); assertListenerIsCalled(listeners[3], times(1)); assertNoOtherListenerIsCalled(); } function testRemoveAll() { if (!removeAll) { return; } listen(eventTargets[0], EventType.A, listeners[0], true); listen(eventTargets[0], EventType.A, listeners[1]); listen(eventTargets[0], EventType.C, listeners[2], true); listen(eventTargets[0], EventType.C, listeners[3]); listen(eventTargets[0], EventType.B, listeners[4], true); listen(eventTargets[0], EventType.B, listeners[5], true); listen(eventTargets[0], EventType.B, listeners[6]); listen(eventTargets[0], EventType.B, listeners[7]); assertEquals(8, removeAll(eventTargets[0])); dispatchEvent(eventTargets[0], EventType.A); dispatchEvent(eventTargets[0], EventType.B); dispatchEvent(eventTargets[0], EventType.C); assertNoOtherListenerIsCalled(); } function testGetListeners() { if (!getListeners) { return; } listen(eventTargets[0], EventType.A, listeners[0], true); listen(eventTargets[0], EventType.A, listeners[1], true); listen(eventTargets[0], EventType.A, listeners[2]); listen(eventTargets[0], EventType.A, listeners[3]); var l = getListeners(eventTargets[0], EventType.A, true); assertEquals(2, l.length); assertEquals(listeners[0], l[0].listener); assertEquals(listeners[1], l[1].listener); l = getListeners(eventTargets[0], EventType.A, false); assertEquals(2, l.length); assertEquals(listeners[2], l[0].listener); assertEquals(listeners[3], l[1].listener); l = getListeners(eventTargets[0], EventType.B, true); assertEquals(0, l.length); } function testGetListener() { if (!getListener) { return; } listen(eventTargets[0], EventType.A, listeners[0], true); assertNotNull(getListener(eventTargets[0], EventType.A, listeners[0], true)); assertNull( getListener(eventTargets[0], EventType.A, listeners[0], true, {})); assertNull(getListener(eventTargets[1], EventType.A, listeners[0], true)); assertNull(getListener(eventTargets[0], EventType.B, listeners[0], true)); assertNull(getListener(eventTargets[0], EventType.A, listeners[1], true)); } function testHasListener() { if (!hasListener) { return; } assertFalse(hasListener(eventTargets[0])); listen(eventTargets[0], EventType.A, listeners[0], true); assertTrue(hasListener(eventTargets[0])); assertTrue(hasListener(eventTargets[0], EventType.A)); assertTrue(hasListener(eventTargets[0], EventType.A, true)); assertTrue(hasListener(eventTargets[0], undefined, true)); assertFalse(hasListener(eventTargets[0], EventType.A, false)); assertFalse(hasListener(eventTargets[0], undefined, false)); assertFalse(hasListener(eventTargets[0], EventType.B)); assertFalse(hasListener(eventTargets[0], EventType.B, true)); assertFalse(hasListener(eventTargets[1])); } function testFiringEventBeforeDisposeInternalWorks() { /** * @extends {goog.events.EventTarget} * @constructor */ var MockTarget = function() { goog.base(this); }; goog.inherits(MockTarget, goog.events.EventTarget); MockTarget.prototype.disposeInternal = function() { dispatchEvent(this, EventType.A); goog.base(this, 'disposeInternal'); }; var t = new MockTarget(); try { listen(t, EventType.A, listeners[0]); t.dispose(); assertListenerIsCalled(listeners[0], times(1)); } catch (e) { goog.dispose(t); } } function testLoopDetection() { var target = new goog.events.EventTarget(); target.setParentEventTarget(target); try { target.dispatchEvent('string'); fail('expected error'); } catch (e) { assertContains('infinite', e.message); } }
mit
madhavanks26/com.vliesaputra.deviceinformation
src/com/vliesaputra/cordova/plugins/android/support/samples/SupportAppNavigation/src/com/example/android/support/appnavigation/app/OutsideTaskActivity.java
1318
/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.android.support.appnavigation.app; import com.example.android.support.appnavigation.R; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import android.view.View; public class OutsideTaskActivity extends Activity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.outside_task); } public void onViewContent(View v) { Intent intent = new Intent(Intent.ACTION_VIEW) .setType("application/x-example") .addFlags(Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET); startActivity(intent); } }
mit
facebook/flow
src/parser/test/flow/invalid_syntax/migrated_0012.js
67
// No generics for getters and setters ({ set foo<T>(newFoo) {} })
mit
markogresak/DefinitelyTyped
types/poisson-disk-sampling/src/tiny-ndarray.d.ts
31
export const tinyNDArray: any;
mit
svenskan/pronounce
vendor/swftools/lib/h.263/h263tables.h
1492
/* h263tables.h Copyright (c) 2003 Matthias Kramm <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ struct huffcode { char*code; int len; int index; }; struct mcbpc_intra_params { int index; int mb_type; int cbpc; }; struct mcbpc_inter_params { int index; int mb_type; int cbpc; }; struct rle_params { int index; int last; int run; int level; }; #define MCBPC_INTRA_STUFFING 8 #define MCBPC_INTER_STUFFING 20 #define RLE_ESCAPE 102 extern struct huffcode mcbpc_intra[]; extern struct mcbpc_intra_params mcbpc_intra_params[]; extern struct mcbpc_inter_params mcbpc_inter_params[]; extern struct rle_params rle_params[]; extern struct huffcode mcbpc_inter[]; extern struct huffcode cbpy[]; extern struct huffcode mvd[]; extern struct huffcode rle[];
mit
sekcheong/referencesource
System.Data.SqlXml/System/Xml/Xsl/XsltOld/ValueOfAction.cs
3999
//------------------------------------------------------------------------------ // <copyright file="ValueOfAction.cs" company="Microsoft"> // Copyright (c) Microsoft Corporation. All rights reserved. // </copyright> // <owner current="true" primary="true">[....]</owner> //------------------------------------------------------------------------------ namespace System.Xml.Xsl.XsltOld { using Res = System.Xml.Utils.Res; using System; using System.Diagnostics; using System.Xml; using System.Xml.XPath; internal class ValueOfAction : CompiledAction { private const int ResultStored = 2; private int selectKey = Compiler.InvalidQueryKey; private bool disableOutputEscaping; private static Action s_BuiltInRule = new BuiltInRuleTextAction(); internal static Action BuiltInRule() { Debug.Assert(s_BuiltInRule != null); return s_BuiltInRule; } internal override void Compile(Compiler compiler) { CompileAttributes(compiler); CheckRequiredAttribute(compiler, selectKey != Compiler.InvalidQueryKey, "select"); CheckEmpty(compiler); } internal override bool CompileAttribute(Compiler compiler) { string name = compiler.Input.LocalName; string value = compiler.Input.Value; if (Ref.Equal(name, compiler.Atoms.Select)) { this.selectKey = compiler.AddQuery(value); } else if (Ref.Equal(name, compiler.Atoms.DisableOutputEscaping)) { this.disableOutputEscaping = compiler.GetYesNo(value); } else { return false; } return true; } internal override void Execute(Processor processor, ActionFrame frame) { Debug.Assert(processor != null && frame != null); switch (frame.State) { case Initialized: Debug.Assert(frame != null); Debug.Assert(frame.NodeSet != null); string value = processor.ValueOf(frame, this.selectKey); if (processor.TextEvent(value, disableOutputEscaping)) { frame.Finished(); } else { frame.StoredOutput = value; frame.State = ResultStored; } break; case ResultStored: Debug.Assert(frame.StoredOutput != null); processor.TextEvent(frame.StoredOutput); frame.Finished(); break; default: Debug.Fail("Invalid ValueOfAction execution state"); break; } } } internal class BuiltInRuleTextAction : Action { private const int ResultStored = 2; internal override void Execute(Processor processor, ActionFrame frame) { Debug.Assert(processor != null && frame != null); switch (frame.State) { case Initialized: Debug.Assert(frame != null); Debug.Assert(frame.NodeSet != null); string value = processor.ValueOf(frame.NodeSet.Current); if (processor.TextEvent(value, /*disableOutputEscaping:*/false)) { frame.Finished(); } else { frame.StoredOutput = value; frame.State = ResultStored; } break; case ResultStored: Debug.Assert(frame.StoredOutput != null); processor.TextEvent(frame.StoredOutput); frame.Finished(); break; default: Debug.Fail("Invalid BuiltInRuleTextAction execution state"); break; } } } }
mit
awerlang/angular
modules/@angular/upgrade/test/aot/integration/examples_spec.ts
3443
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import {Component, Directive, ElementRef, Injector, Input, NgModule, destroyPlatform} from '@angular/core'; import {async} from '@angular/core/testing'; import {BrowserModule} from '@angular/platform-browser'; import {platformBrowserDynamic} from '@angular/platform-browser-dynamic'; import * as angular from '@angular/upgrade/src/angular_js'; import {UpgradeComponent, UpgradeModule, downgradeComponent} from '@angular/upgrade/static'; import {bootstrap, html, multiTrim} from '../test_helpers'; export function main() { describe('examples', () => { beforeEach(() => destroyPlatform()); afterEach(() => destroyPlatform()); it('should have angular 1 loaded', () => expect(angular.version.major).toBe(1)); it('should verify UpgradeAdapter example', async(() => { // This is wrapping (upgrading) an Angular 1 component to be used in an Angular 2 // component @Directive({selector: 'ng1'}) class Ng1Component extends UpgradeComponent { @Input() title: string; constructor(elementRef: ElementRef, injector: Injector) { super('ng1', elementRef, injector); } } // This is an Angular 2 component that will be downgraded @Component({ selector: 'ng2', template: 'ng2[<ng1 [title]="nameProp">transclude</ng1>](<ng-content></ng-content>)' }) class Ng2Component { @Input('name') nameProp: string; } // This module represents the Angular 2 pieces of the application @NgModule({ declarations: [Ng1Component, Ng2Component], entryComponents: [Ng2Component], imports: [BrowserModule, UpgradeModule] }) class Ng2Module { ngDoBootstrap() { /* this is a placeholder to stop the boostrapper from complaining */ } } // This module represents the Angular 1 pieces of the application const ng1Module = angular .module('myExample', []) // This is an Angular 1 component that will be upgraded .directive( 'ng1', () => { return { scope: {title: '='}, transclude: true, template: 'ng1[Hello {{title}}!](<span ng-transclude></span>)' }; }) // This is wrapping (downgrading) an Angular 2 component to be used in Angular 1 .directive( 'ng2', downgradeComponent({component: Ng2Component, inputs: ['nameProp: name']})); // This is the (Angular 1) application bootstrap element // Notice that it is actually a downgraded Angular 2 component const element = html('<ng2 name="World">project</ng2>'); // Let's use a helper function to make this simpler bootstrap(platformBrowserDynamic(), Ng2Module, element, ng1Module).then(upgrade => { expect(multiTrim(element.textContent)) .toBe('ng2[ng1[Hello World!](transclude)](project)'); }); })); }); }
mit
rokn/Count_Words_2015
testing/openjdk2/nashorn/src/jdk/nashorn/internal/objects/annotations/Getter.java
2016
/* * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package jdk.nashorn.internal.objects.annotations; import static jdk.nashorn.internal.objects.annotations.Attribute.DEFAULT_ATTRIBUTES; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Annotation to specify the getter method for a JavaScript "data" property. */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.METHOD) public @interface Getter { /** * Name of the property. If empty, the name is inferred. */ public String name() default ""; /** * Attribute flags for this setter. */ public int attributes() default DEFAULT_ATTRIBUTES; /** * Where this getter lives? */ public Where where() default Where.INSTANCE; }
mit
Halleck45/PhpMetricsZendServer
zray/vendor/hoa/iterator/Glob.php
1879
<?php /** * Hoa * * * @license * * New BSD License * * Copyright © 2007-2015, Hoa community. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Hoa nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ namespace Hoa\Iterator; /** * Class \Hoa\Iterator\Glob. * * Extending the SPL GlobIterator class. * * @copyright Copyright © 2007-2015 Hoa community * @license New BSD License */ class Glob extends \GlobIterator { }
mit
base0225/-swift
1209ZJ 百思不得姐 精华标题/1129ZJ 百思不得姐/Other/AppDelegate.h
283
// // AppDelegate.h // 1129ZJ 百思不得姐 // // Created by base on 15/09/19. // Copyright © 2015年 base. All rights reserved. // #import <UIKit/UIKit.h> @interface AppDelegate : UIResponder <UIApplicationDelegate> @property (strong, nonatomic) UIWindow *window; @end
mit
pcu4dros/pandora-core
workspace/lib/python3.5/site-packages/alembic/testing/mock.py
791
# testing/mock.py # Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Import stub for mock library. NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; this should be removable when Alembic targets SQLAlchemy 1.0.0 """ from __future__ import absolute_import from ..util.compat import py33 if py33: from unittest.mock import MagicMock, Mock, call, patch, ANY else: try: from mock import MagicMock, Mock, call, patch, ANY # noqa except ImportError: raise ImportError( "SQLAlchemy's test suite requires the " "'mock' library as of 0.8.2.")
mit
markogresak/DefinitelyTyped
types/bootstrap4-toggle/index.d.ts
852
// Type definitions for bootstrap4-toggle 3.6 // Project: https://github.com/gitbrent/bootstrap4-toggle, https://gitbrent.github.io/bootstrap4-toggle/ // Definitions by: Mitchell Grice <https://github.com/gricey432> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped // TypeScript Version: 2.7 /// <reference types="jquery"/> interface BootstrapToggleOptions { on?: string | undefined; off?: string | undefined; size?: string | undefined; onstyle?: string | undefined; offstyle?: string | undefined; style?: string | undefined; width?: number | string | null | undefined; height?: number | string | null | undefined; } interface JQuery { bootstrapToggle(options?: BootstrapToggleOptions): JQuery; bootstrapToggle(command: "destroy" | "on" | "off" | "toggle" | "enable" | "disable"): JQuery; }
mit
lmazuel/autorest
src/dev/TestServer/server/app.js
18237
var express = require('express'); var path = require('path'); var favicon = require('serve-favicon'); var logger = require('morgan'); var cookieParser = require('cookie-parser'); var bodyParser = require('body-parser'); var fs = require('fs'); var morgan = require('morgan'); var routes = require('./routes/index'); var number = require('./routes/number'); var array = require('./routes/array'); var bool = require('./routes/bool'); var integer = require('./routes/int'); var string = require('./routes/string'); var byte = require('./routes/byte'); var date = require('./routes/date'); var datetime = require('./routes/datetime'); var datetimeRfc1123 = require('./routes/datetime-rfc1123'); var duration = require('./routes/duration'); var complex = require('./routes/complex'); var report = require('./routes/report'); var dictionary = require('./routes/dictionary'); var paths = require('./routes/paths'); var queries = require('./routes/queries'); var pathitem = require('./routes/pathitem'); var header = require('./routes/header'); var reqopt = require('./routes/reqopt'); var httpResponses = require('./routes/httpResponses'); var files = require('./routes/files'); var formData = require('./routes/formData'); var lros = require('./routes/lros'); var paging = require('./routes/paging'); var modelFlatten = require('./routes/model-flatten'); var azureUrl = require('./routes/azureUrl'); var azureSpecial = require('./routes/azureSpecials'); var parameterGrouping = require('./routes/azureParameterGrouping.js'); var validation = require('./routes/validation.js'); var customUri = require('./routes/customUri.js'); var xml = require('./routes/xml.js'); // XML serialization var util = require('util'); var app = express(); //set up server log var now = new Date(); var logFileName = 'AccTestServer-' + now.getHours() + now.getMinutes() + now.getSeconds() + '.log'; var testResultDir = path.join(__dirname, '../../../../TestResults'); if (!fs.existsSync(testResultDir)) { fs.mkdirSync(testResultDir); } var logfile = fs.createWriteStream(path.join(testResultDir, logFileName), {flags: 'a'}); app.use(morgan('combined', {stream: logfile})); var azurecoverage = {}; var optionalCoverage = {}; var coverage = { "getArrayNull": 0, "getArrayEmpty": 0, "putArrayEmpty": 0, "getArrayInvalid": 0, "getArrayBooleanValid": 0, "putArrayBooleanValid": 0, "getArrayBooleanWithNull": 0, "getArrayBooleanWithString": 0, "getArrayIntegerValid": 0, "putArrayIntegerValid": 0, "getArrayIntegerWithNull": 0, "getArrayIntegerWithString": 0, "getArrayLongValid": 0, "putArrayLongValid": 0, "getArrayLongWithNull": 0, "getArrayLongWithString": 0, "getArrayFloatValid": 0, "putArrayFloatValid": 0, "getArrayFloatWithNull": 0, "getArrayFloatWithString": 0, "getArrayDoubleValid": 0, "putArrayDoubleValid": 0, "getArrayDoubleWithNull": 0, "getArrayDoubleWithString": 0, "getArrayStringValid": 0, "putArrayStringValid": 0, "getArrayStringWithNull": 0, "getArrayStringWithNumber": 0, "getArrayDateValid": 0, "putArrayDateValid": 0, "getArrayDateWithNull": 0, "getArrayDateWithInvalidChars": 0, "getArrayDateTimeValid": 0, "putArrayDateTimeValid": 0, "getArrayDateTimeWithNull": 0, "getArrayDateTimeWithInvalidChars": 0, "getArrayDateTimeRfc1123Valid": 0, "putArrayDateTimeRfc1123Valid": 0, "getArrayDurationValid": 0, "putArrayDurationValid": 0, "getArrayUuidValid": 0, "getArrayUuidWithInvalidChars": 0, "putArrayUuidValid": 0, "getArrayByteValid": 0, "putArrayByteValid": 0, "getArrayByteWithNull": 0, "getArrayArrayNull": 0, "getArrayArrayEmpty": 0, "getArrayArrayItemNull": 0, "getArrayArrayItemEmpty": 0, "getArrayArrayValid": 0, "putArrayArrayValid": 0, "getArrayComplexNull": 0, "getArrayComplexEmpty": 0, "getArrayComplexItemNull": 0, "getArrayComplexItemEmpty": 0, "getArrayComplexValid": 0, "putArrayComplexValid": 0, "getArrayDictionaryNull": 0, "getArrayDictionaryEmpty": 0, "getArrayDictionaryItemNull": 0, "getArrayDictionaryItemEmpty": 0, "getArrayDictionaryValid": 0, "putArrayDictionaryValid": 0, "getBoolTrue" : 0, "putBoolTrue" : 0, "getBoolFalse" : 0, "putBoolFalse" : 0, "getBoolInvalid" : 0, "getBoolNull" : 0, "getByteNull": 0, "getByteEmpty": 0, "getByteNonAscii": 0, "putByteNonAscii": 0, "getByteInvalid": 0, "getDateNull": 0, "getDateInvalid": 0, "getDateOverflow": 0, "getDateUnderflow": 0, "getDateMax": 0, "putDateMax": 0, "getDateMin": 0, "putDateMin": 0, "getDateTimeNull": 0, "getDateTimeInvalid": 0, "getDateTimeOverflow": 0, "getDateTimeUnderflow": 0, "putDateTimeMaxUtc": 0, "getDateTimeMaxUtcLowercase": 0, "getDateTimeMaxUtcUppercase": 0, "getDateTimeMaxLocalPositiveOffsetLowercase": 0, "getDateTimeMaxLocalPositiveOffsetUppercase": 0, "getDateTimeMaxLocalNegativeOffsetLowercase": 0, "getDateTimeMaxLocalNegativeOffsetUppercase": 0, "getDateTimeMinUtc": 0, "putDateTimeMinUtc": 0, "getDateTimeMinLocalPositiveOffset": 0, "getDateTimeMinLocalNegativeOffset": 0, "getDateTimeRfc1123Null": 0, "getDateTimeRfc1123Invalid": 0, "getDateTimeRfc1123Overflow": 0, "getDateTimeRfc1123Underflow": 0, "getDateTimeRfc1123MinUtc": 0, "putDateTimeRfc1123Max": 0, "putDateTimeRfc1123Min": 0, "getDateTimeRfc1123MaxUtcLowercase": 0, "getDateTimeRfc1123MaxUtcUppercase": 0, "getIntegerNull": 0, "getIntegerInvalid": 0, "getIntegerOverflow" : 0, "getIntegerUnderflow": 0, "getLongOverflow": 0, "getLongUnderflow": 0, "putIntegerMax": 0, "putLongMax": 0, "putIntegerMin": 0, "putLongMin": 0, "getNumberNull": 0, "getFloatInvalid": 0, "getDoubleInvalid": 0, "getFloatBigScientificNotation": 0, "putFloatBigScientificNotation": 0, "getDoubleBigScientificNotation": 0, "putDoubleBigScientificNotation": 0, "getDoubleBigPositiveDecimal" : 0, "putDoubleBigPositiveDecimal" : 0, "getDoubleBigNegativeDecimal" : 0, "putDoubleBigNegativeDecimal" : 0, "getFloatSmallScientificNotation" : 0, "putFloatSmallScientificNotation" : 0, "getDoubleSmallScientificNotation" : 0, "putDoubleSmallScientificNotation" : 0, "getStringNull": 0, "putStringNull": 0, "getStringEmpty": 0, "putStringEmpty": 0, "getStringMultiByteCharacters": 0, "putStringMultiByteCharacters": 0, "getStringWithLeadingAndTrailingWhitespace" : 0, "putStringWithLeadingAndTrailingWhitespace" : 0, "getStringNotProvided": 0, "getEnumNotExpandable": 0, "putEnumNotExpandable":0, "putComplexBasicValid": 0, "getComplexBasicValid": 0, "getComplexBasicEmpty": 0, "getComplexBasicNotProvided": 0, "getComplexBasicNull": 0, "getComplexBasicInvalid": 0, "putComplexPrimitiveInteger": 0, "putComplexPrimitiveLong": 0, "putComplexPrimitiveFloat": 0, "putComplexPrimitiveDouble": 0, "putComplexPrimitiveBool": 0, "putComplexPrimitiveString": 0, "putComplexPrimitiveDate": 0, "putComplexPrimitiveDateTime": 0, "putComplexPrimitiveDateTimeRfc1123": 0, "putComplexPrimitiveDuration": 0, "putComplexPrimitiveByte": 0, "getComplexPrimitiveInteger": 0, "getComplexPrimitiveLong": 0, "getComplexPrimitiveFloat": 0, "getComplexPrimitiveDouble": 0, "getComplexPrimitiveBool": 0, "getComplexPrimitiveString": 0, "getComplexPrimitiveDate": 0, "getComplexPrimitiveDateTime": 0, "getComplexPrimitiveDateTimeRfc1123": 0, "getComplexPrimitiveDuration": 0, "getComplexPrimitiveByte": 0, "putComplexArrayValid": 0, "putComplexArrayEmpty": 0, "getComplexArrayValid": 0, "getComplexArrayEmpty": 0, "getComplexArrayNotProvided": 0, "putComplexDictionaryValid": 0, "putComplexDictionaryEmpty": 0, "getComplexDictionaryValid": 0, "getComplexDictionaryEmpty": 0, "getComplexDictionaryNull": 0, "getComplexDictionaryNotProvided": 0, "putComplexInheritanceValid": 0, "getComplexInheritanceValid": 0, "putComplexPolymorphismValid": 0, "getComplexPolymorphismValid": 0, "putComplexPolymorphicRecursiveValid": 0, "getComplexPolymorphicRecursiveValid": 0, "putComplexReadOnlyPropertyValid": 0, "UrlPathsBoolFalse": 0, "UrlPathsBoolTrue": 0, "UrlPathsIntPositive": 0, "UrlPathsIntNegative": 0, "UrlPathsLongPositive": 0, "UrlPathsLongNegative": 0, "UrlPathsFloatPositive": 0, "UrlPathsFloatNegative": 0, "UrlPathsDoublePositive": 0, "UrlPathsDoubleNegative": 0, "UrlPathsStringUrlEncoded": 0, "UrlPathsStringEmpty": 0, "UrlPathsEnumValid":0, "UrlPathsByteMultiByte": 0, "UrlPathsByteEmpty": 0, "UrlPathsDateValid": 0, "UrlPathsDateTimeValid": 0, "UrlQueriesBoolFalse": 0, "UrlQueriesBoolTrue": 0, "UrlQueriesBoolNull": 0, "UrlQueriesIntPositive": 0, "UrlQueriesIntNegative": 0, "UrlQueriesIntNull": 0, "UrlQueriesLongPositive": 0, "UrlQueriesLongNegative": 0, "UrlQueriesLongNull": 0, "UrlQueriesFloatPositive": 0, "UrlQueriesFloatNegative": 0, "UrlQueriesFloatNull": 0, "UrlQueriesDoublePositive": 0, "UrlQueriesDoubleNegative": 0, "UrlQueriesDoubleNull": 0, "UrlQueriesStringUrlEncoded": 0, "UrlQueriesStringEmpty": 0, "UrlQueriesStringNull": 0, "UrlQueriesEnumValid": 0, "UrlQueriesEnumNull": 0, "UrlQueriesByteMultiByte": 0, "UrlQueriesByteEmpty": 0, "UrlQueriesByteNull": 0, "UrlQueriesDateValid": 0, "UrlQueriesDateNull": 0, "UrlQueriesDateTimeValid": 0, "UrlQueriesDateTimeNull": 0, "UrlQueriesArrayCsvNull": 0, "UrlQueriesArrayCsvEmpty": 0, "UrlQueriesArrayCsvValid": 0, //Once all the languages implement this test, the scenario counter should be reset to zero. It is currently implemented in C# and Python "UrlQueriesArrayMultiNull": 1, "UrlQueriesArrayMultiEmpty": 1, "UrlQueriesArrayMultiValid": 1, "UrlQueriesArraySsvValid": 0, "UrlQueriesArrayPipesValid": 0, "UrlQueriesArrayTsvValid": 0, "UrlPathItemGetAll": 0, "UrlPathItemGetGlobalNull": 0, "UrlPathItemGetGlobalAndLocalNull": 0, "UrlPathItemGetPathItemAndLocalNull": 0, "putDictionaryEmpty": 0, "getDictionaryNull": 0, "getDictionaryEmpty": 0, "getDictionaryInvalid": 0, "getDictionaryNullValue": 0, "getDictionaryNullkey": 0, "getDictionaryKeyEmptyString": 0, "getDictionaryBooleanValid": 0, "getDictionaryBooleanWithNull": 0, "getDictionaryBooleanWithString": 0, "getDictionaryIntegerValid": 0, "getDictionaryIntegerWithNull": 0, "getDictionaryIntegerWithString": 0, "getDictionaryLongValid": 0, "getDictionaryLongWithNull": 0, "getDictionaryLongWithString": 0, "getDictionaryFloatValid": 0, "getDictionaryFloatWithNull": 0, "getDictionaryFloatWithString": 0, "getDictionaryDoubleValid": 0, "getDictionaryDoubleWithNull": 0, "getDictionaryDoubleWithString": 0, "getDictionaryStringValid": 0, "getDictionaryStringWithNull": 0, "getDictionaryStringWithNumber": 0, "getDictionaryDateValid": 0, "getDictionaryDateWithNull": 0, "getDictionaryDateWithInvalidChars": 0, "getDictionaryDateTimeValid": 0, "getDictionaryDateTimeWithNull": 0, "getDictionaryDateTimeWithInvalidChars": 0, "getDictionaryDateTimeRfc1123Valid": 0, "getDictionaryDurationValid": 0, "getDictionaryByteValid": 0, "getDictionaryByteWithNull": 0, "putDictionaryBooleanValid": 0, "putDictionaryIntegerValid": 0, "putDictionaryLongValid": 0, "putDictionaryFloatValid": 0, "putDictionaryDoubleValid": 0, "putDictionaryStringValid": 0, "putDictionaryDateValid": 0, "putDictionaryDateTimeValid": 0, "putDictionaryDateTimeRfc1123Valid": 0, "putDictionaryDurationValid": 0, "putDictionaryByteValid": 0, "getDictionaryComplexNull": 0, "getDictionaryComplexEmpty": 0, "getDictionaryComplexItemNull": 0, "getDictionaryComplexItemEmpty": 0, "getDictionaryComplexValid": 0, "putDictionaryComplexValid": 0, "getDictionaryArrayNull": 0, "getDictionaryArrayEmpty": 0, "getDictionaryArrayItemNull": 0, "getDictionaryArrayItemEmpty": 0, "getDictionaryArrayValid": 0, "putDictionaryArrayValid": 0, "getDictionaryDictionaryNull": 0, "getDictionaryDictionaryEmpty": 0, "getDictionaryDictionaryItemNull": 0, "getDictionaryDictionaryItemEmpty": 0, "getDictionaryDictionaryValid": 0, "putDictionaryDictionaryValid": 0, "putDurationPositive": 0, "getDurationNull": 0, "getDurationInvalid": 0, "getDurationPositive": 0, "HeaderParameterExistingKey": 0, "HeaderResponseExistingKey": 0, "HeaderResponseProtectedKey": 0, "HeaderParameterIntegerPositive": 0, "HeaderParameterIntegerNegative": 0, "HeaderParameterLongPositive": 0, "HeaderParameterLongNegative": 0, "HeaderParameterFloatPositive": 0, "HeaderParameterFloatNegative": 0, "HeaderParameterDoublePositive": 0, "HeaderParameterDoubleNegative": 0, "HeaderParameterBoolTrue": 0, "HeaderParameterBoolFalse": 0, "HeaderParameterStringValid": 0, "HeaderParameterStringNull": 0, "HeaderParameterStringEmpty": 0, "HeaderParameterDateValid": 0, "HeaderParameterDateMin": 0, "HeaderParameterDateTimeValid": 0, "HeaderParameterDateTimeMin": 0, "HeaderParameterDateTimeRfc1123Valid": 0, "HeaderParameterDateTimeRfc1123Min": 0, "HeaderParameterBytesValid": 0, "HeaderParameterDurationValid": 0, "HeaderResponseIntegerPositive": 0, "HeaderResponseIntegerNegative": 0, "HeaderResponseLongPositive": 0, "HeaderResponseLongNegative": 0, "HeaderResponseFloatPositive": 0, "HeaderResponseFloatNegative": 0, "HeaderResponseDoublePositive": 0, "HeaderResponseDoubleNegative": 0, "HeaderResponseBoolTrue": 0, "HeaderResponseBoolFalse": 0, "HeaderResponseStringValid": 0, "HeaderResponseStringNull": 0, "HeaderResponseStringEmpty": 0, "HeaderParameterEnumValid": 0, "HeaderParameterEnumNull": 0, "HeaderResponseEnumValid": 0, "HeaderResponseEnumNull": 0, "HeaderResponseDateValid": 0, "HeaderResponseDateMin": 0, "HeaderResponseDateTimeValid": 0, "HeaderResponseDateTimeMin": 0, "HeaderResponseDateTimeRfc1123Valid": 0, "HeaderResponseDateTimeRfc1123Min": 0, "HeaderResponseBytesValid": 0, "HeaderResponseDurationValid": 0, "FormdataStreamUploadFile": 0, "StreamUploadFile": 0, "ConstantsInPath": 0, "ConstantsInBody": 0, "CustomBaseUri": 0, //Once all the languages implement this test, the scenario counter should be reset to zero. It is currently implemented in C#, Python and node.js "CustomBaseUriMoreOptions": 1, 'getModelFlattenArray': 0, 'putModelFlattenArray': 0, 'getModelFlattenDictionary': 0, 'putModelFlattenDictionary': 0, 'getModelFlattenResourceCollection': 0, 'putModelFlattenResourceCollection': 0, 'putModelFlattenCustomBase': 0, 'postModelFlattenCustomParameter': 0, 'putModelFlattenCustomGroupedParameter': 0, /* TODO: only C#, Python and node.js support the base64url format currently. Exclude these tests from code coverage until it is implemented in other languages */ "getStringBase64Encoded": 1, "getStringBase64UrlEncoded": 1, "putStringBase64UrlEncoded": 1, "getStringNullBase64UrlEncoding": 1, "getArrayBase64Url": 1, "getDictionaryBase64Url": 1, "UrlPathsStringBase64Url": 1, "UrlPathsArrayCSVInPath": 1, /* TODO: only C# and Python support the unixtime format currently. Exclude these tests from code coverage until it is implemented in other languages */ "getUnixTime": 1, "getInvalidUnixTime": 1, "getNullUnixTime": 1, "putUnixTime": 1, "UrlPathsIntUnixTime": 1, /* TODO: Once all the languages implement these tests, the scenario counters should be reset to zero. It is currently implemented in Python */ "getDecimalInvalid": 1, "getDecimalBig": 1, "getDecimalSmall": 1, "getDecimalBigPositiveDecimal" : 1, "getDecimalBigNegativeDecimal" : 1, "putDecimalBig": 1, "putDecimalSmall": 1, "putDecimalBigPositiveDecimal" : 1, "getEnumReferenced" : 1, "putEnumReferenced" : 1, "getEnumReferencedConstant" : 1, "putEnumReferencedConstant" : 1 }; // view engine setup app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); // uncomment after placing your favicon in /public //app.use(favicon(__dirname + '/public/favicon.ico')); app.use(logger('dev')); app.use(bodyParser.json({strict: false})); app.use(bodyParser.urlencoded({ extended: false })); app.use(cookieParser()); app.use(express.static(path.join(__dirname, 'public'))); app.use('/', routes); app.use('/bool', new bool(coverage).router); app.use('/int', new integer(coverage).router); app.use('/number', new number(coverage).router); app.use('/string', new string(coverage).router); app.use('/byte', new byte(coverage).router); app.use('/date', new date(coverage).router); app.use('/datetime', new datetime(coverage, optionalCoverage).router); app.use('/datetimeRfc1123', new datetimeRfc1123(coverage).router); app.use('/duration', new duration(coverage, optionalCoverage).router); app.use('/array', new array(coverage).router); app.use('/complex', new complex(coverage).router); app.use('/dictionary', new dictionary(coverage).router); app.use('/paths', new paths(coverage).router); app.use('/queries', new queries(coverage).router); app.use('/pathitem', new pathitem(coverage).router); app.use('/header', new header(coverage, optionalCoverage).router); app.use('/reqopt', new reqopt(coverage).router); app.use('/files', new files(coverage).router); app.use('/formdata', new formData(coverage).router); app.use('/http', new httpResponses(coverage, optionalCoverage).router); app.use('/model-flatten', new modelFlatten(coverage).router); app.use('/lro', new lros(azurecoverage).router); app.use('/paging', new paging(azurecoverage).router); app.use('/azurespecials', new azureSpecial(azurecoverage).router); app.use('/report', new report(coverage, azurecoverage).router); app.use('/subscriptions', new azureUrl(azurecoverage).router); app.use('/parameterGrouping', new parameterGrouping(azurecoverage).router); app.use('/validation', new validation(coverage).router); app.use('/customUri', new customUri(coverage).router); app.use('/xml', new xml().router); // catch 404 and forward to error handler app.use(function(req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); app.use(function(err, req, res, next) { res.status(err.status || 500); res.end(JSON.stringify(err)); }); module.exports = app;
mit
theoweiss/openhab2
bundles/org.openhab.binding.miio/src/main/java/org/openhab/binding/miio/internal/basic/CommandParameterType.java
1157
/** * Copyright (c) 2010-2019 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.binding.miio.internal.basic; /** * Various types of parameters to be send * * @author Marcel Verpaalen - Initial contribution */ public enum CommandParameterType { NONE("none"), EMPTY("empty"), ONOFF("onoff"), ONOFFPARA("onoffpara"), STRING("string"), CUSTOMSTRING("customstring"), NUMBER("number"), COLOR("color"), UNKNOWN("unknown"); private String text; CommandParameterType(String text) { this.text = text; } public static CommandParameterType fromString(String text) { for (CommandParameterType param : CommandParameterType.values()) { if (param.text.equalsIgnoreCase(text)) { return param; } } return UNKNOWN; } }
epl-1.0
schnitzel25/pizza
sites/all/themes/pzz/libraries/responsive-nav/README.md
13085
# Responsive Nav ### Responsive navigation plugin without library dependencies and with fast touch screen support. [Responsive Nav](http://responsive-nav.com) is a tiny JavaScript plugin which weighs only 1.3kb minified and Gzip’ed, and helps you to create a toggled navigation for small screens. It uses touch events and CSS3 transitions for the best possible performance. It also contains a “clever” workaround that makes it possible to transition from `height: 0` to `height: auto`, which isn’t normally possible with CSS3 transitions. #### Features: * Simple, semantic markup. * Weighs only 1.3kb minified and Gzip’ed. * Doesn’t require any external library. * Uses CSS3 transitions and touch events. * Supports RequireJS and multiple instances. * Removes the 300ms delay between a physical tap and the click event. * Makes it possible to use CSS3 transitions with height: auto. * Built with accessibility in mind, meaning that everything works on screen readers and with JavaScript disabled, too. * Works in all major desktop and mobile browsers, including IE 6 and up. * Free to use under the MIT license. # Demos * [Fixed positioned one page demo](http://www.adtile.me/fixed-nav/) using smooth scrolling. View source on GitHub: [adtile/fixed-nav](https://github.com/adtile/fixed-nav) * [Advanced demo](http://responsive-nav.com/demo/) (View the [source code](https://github.com/viljamis/responsive-nav.js/tree/master/demos/advanced-left-navigation)) * [Official website](http://responsive-nav.com) (works as a demo, too!) * [Simple demo with IE support](http://responsive-nav.com/demo-respondjs/) (View the [source code](https://github.com/viljamis/responsive-nav.js/tree/master/demos/ie-support-using-respondjs)) * 11(!) additional examples included in this repository's [demos](https://github.com/viljamis/responsive-nav.js/tree/master/demos) folder. # Usage instructions Following the steps below you will be able to get the plugin up and running. If you notice any bugs, please post them to [GitHub issues](https://github.com/viljamis/responsive-nav.js/issues). 1. Link files: ```html <!-- Put these into the <head> --> <link rel="stylesheet" href="responsive-nav.css"> <script src="responsive-nav.js"></script> ``` 1. Add markup: ```html <nav class="nav-collapse"> <ul> <li><a href="#">Home</a></li> <li><a href="#">About</a></li> <li><a href="#">Projects</a></li> <li><a href="#">Contact</a></li> </ul> </nav> ``` 1. Hook up the plugin: ```html <!-- Put this right before the </body> closing tag --> <script> var nav = responsiveNav(".nav-collapse"); </script> ``` 1. Customizable options: ```javascript var nav = responsiveNav(".nav-collapse", { // Selector animate: true, // Boolean: Use CSS3 transitions, true or false transition: 284, // Integer: Speed of the transition, in milliseconds label: "Menu", // String: Label for the navigation toggle insert: "before", // String: Insert the toggle before or after the navigation customToggle: "", // Selector: Specify the ID of a custom toggle closeOnNavClick: false, // Boolean: Close the navigation when one of the links are clicked openPos: "relative", // String: Position of the opened nav, relative or static navClass: "nav-collapse", // String: Default CSS class. If changed, you need to edit the CSS too! navActiveClass: "js-nav-active", // String: Class that is added to <html> element when nav is active jsClass: "js", // String: 'JS enabled' class which is added to <html> element init: function(){}, // Function: Init callback open: function(){}, // Function: Open callback close: function(){} // Function: Close callback }); ``` # Public methods See the [example code here](https://github.com/viljamis/responsive-nav.js/blob/master/demos/public-events/index.html) for the usage. `nav.toggle();` `nav.open();` `nav.close();` `nav.destroy();` `nav.resize();` # Changing the breakpoint Breakpoint is defined in the [responsive-nav.css](https://github.com/viljamis/responsive-nav.js/blob/master/responsive-nav.css) file. Responsive Nav checks on window resize and on orientation change if the navigation toggle has `display: none;` and based on that switches between mobile and desktop states. # Supporting old IEs Even though Responsive Nav works even on IE6, you should remember that IE8 and under do not support media queries and thus can’t change between "small screen" and "large screen" styles. If needed, you can add Media Query support for those browsers using [respond.js](https://github.com/scottjehl/Respond). There’s an example [here](https://github.com/viljamis/responsive-nav.js/tree/master/demos/ie-support-using-respondjs). When old IE support is needed you should stick to using ID selector with Responsive Nav. That’s because the plugin uses `getElementById` method by default which is widely supported in all browsers. When using classes or element selectors `querySelector` will be used instead which isn’t supported in old IEs. # Things to keep in mind Calculated Max-height doesn't account for top/bottom padding on `.nav-collapse` (this is on purpose). If you need to add padding inside the nav, you can apply it to any other element, for example the `<ul>` inside `.nav-collapse`. # Tested on the following platforms * iOS 4.2.1+ * Android 1.6+ * Windows Phone 7.5+ * Blackberry 7.0+ * Blackberry Tablet 2.0+ * Jolla 1.0+ * Kindle 3.3+ * Maemo 5.0+ * Meego 1.2+ * Symbian 3 * Symbian Belle * Symbian S40 Asha * webOS 2.0+ * Windows XP+ * Mac OS X # Working on the repository [GruntJS](http://gruntjs.com/) is used for the build process, which means node and npm are required. If you already have those on your machine, you can install Grunt and all dependencies required for the build using: ```sh npm install -g grunt-cli npm install ``` ## Starting the server ```sh python -m SimpleHTTPServer 8000 ``` ## Git Hooks It is useful to setup a pre-commit and post-checkout hooks to smooth your workflow. On pre-commit we want to ensure that the project can build successfully, and on post-checkout we want to ensure that any new dependencies are installed via npm. ### Pre-Commit ```sh touch .git/hooks/pre-commit && echo -e '#!/bin/sh\ngrunt test' > .git/hooks/pre-commit && chmod +x .git/hooks/pre-commit ``` ### Post-Checkout ```sh touch .git/hooks/post-checkout && echo -e '#!/bin/sh\nnpm install\nexit 0' > .git/hooks/post-checkout && chmod +x .git/hooks/post-checkout ``` ## Building The Project To build the project, run unit tests etc. enter the following at the terminal: ```sh grunt ``` Grunt can also be used to monitor files and re-build the project on each change. For this we use Grunt's watch task: ```sh grunt watch ``` Next time you change the file, Grunt will perform all build tasks. ## Testing The test suite can be run with `grunt test` and is also part of the default Grunt task. This command runs all tests locally using PhantomJS. ### Running on multiple devices/browsers It's possible to run the test suite on multiple devices with Karma. The Karma server can be started with `grunt karma` and multiple browsers should then point to the machine running the server on port 9876 (e.g. http://localhost:9876). Once the browsers are connected, the test suite can be run with `grunt karma:all:run`. An easier way to test on multiple devices as part of the development cycle is to use karma with the watch task. Running `grunt karma watch` will automatically start the Karma server in the background and will run the tests automatically every time a file changes, on every connected device. # Special thanks In random order: * [Matteo Spinelli](https://twitter.com/cubiq) * [Matt Stow](https://twitter.com/stowball) * [Joao Carlos](https://twitter.com/jcxplorer) * [Vesa Vänskä](https://twitter.com/vesan) * [Andrea Carraro](https://github.com/toomuchdesign) * [Nick Williams](https://twitter.com/WickyNilliams) # License Licensed under the MIT license. Copyright (c) 2013 Viljami Salminen, http://viljamis.com/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Changelog `1.0.39` (2015-04-09) - Adds Browserify support. `1.0.38` (2015-04-02) - Fixes custom-toggle demo, adds feature detection for event.stopImmediatePropagation, removes all pointer-event hacks completely and also fixes an issue which caused multiple taps to sometimes freeze the nav completely. `1.0.34` (2014-12-16) - Fixes versioning. `1.0.33` (2014-12-15) - "closeOnNavClick" now works on old IEs too, so no more features that only work in modern browsers. Fixes a bug which caused the navigation to sometimes not toggle. Also fixes a bug in Safari that sometimes caused the navigation render incorrectly when switching between browser tabs. (+Adds more comments to the code.) `1.0.32` (2014-03-05) - Ditching the `[].forEach.call(NodeList)` hack to make the code more sensible and future-proof. `1.0.31` (2014-03-02) - Fixes Chrome Mobile rendering issue. `1.0.30` (2014-03-02) - Better performance. New [fixed navigation example](https://github.com/adtile/fixed-nav) provided by [Adtile](https://twitter.com/adtilehq). This release fixes an issue where multiple navigations on the same page got the same calculated height. Toggle now has an "active" class when the nav is open. Two new options are added: "closeOnNavClick" and "navActiveClass". Two new methods are also added: "open" and "close" (thanks to [@munkius](https://github.com/munkius) for the heads up on this!). This release adds also better "hamburger" icon for advanced demos which you can now style via css (size, color, shadow & etc). Includes also other bug fixes targeting older Android devices. `1.0.25` (2013-12-13) - Fixes ghost click issues on Android + a problem where calculated max-height might get overridden under certain circumstances. `1.0.24` (2013-11-27) - Adds new option called "navClass." All tests should also work now on real iOS, Windows Phone and Android devices (when using grunt-karma), and not just with PhantomJS. `1.0.23` (2013-09-25) - Fixes IE8 bugs + starts using automated builds and tests. `1.0.22` (2013-09-19) - Public resize method (to allow calling resize manually when needed). `1.0.21` (2013-09-18) - Multiple instances are now possible thanks to [@toomuchdesign](https://github.com/toomuchdesign). Uses classes instead of ID's now by default, but can be configured to use ID's if old IE support is needed (check the "ie-support" folder in demos). `1.0.20` (2013-08-12) - Uses now touchmove & touchend, which means that the menu doesn’t trigger anymore if the user starts moving finger instead of just tapping. Also fixes one Android bug and a bug which appeared when tapping the toggle really fast over and over. Plugin’s Functionality doesn’t depent on window load event anymore so it works now with tools like require.js too. `1.0.16` (2013-08-02) - Set `navOpen` state in the `_init` method. Thanks [@nicolashery](https://github.com/nicolashery)! `1.0.15` (2013-06-28) - Responsive Nav now automatically combines multiple navigations inside a container. `1.0.14` (2013-04-13) - Adds touchend listener and stopProganation + prevents ghost click from happening on some Android browsers. "tabIndex" and "debug" settings are being removed. `v1.11` (2013-04-09) - Performance optimization, bug fixes and 6 additional usage examples `v1.07` (2013-04-03) - Simplifies the codebase and fixes few bugs `v1.05` (2013-03-31) - Adds callback functionality and removes unnecessary CSS. `v1.03` (2013-03-28) - Adds option to disable CSS3 transitions + three other options called "tabIndex", "openPos" and "jsClass". `v1.00` (2013-03-25) - Release. Big thank you’s for the help go out to [@cubiq](https://twitter.com/cubiq), [@stowball](https://twitter.com/stowball), [@jcxplorer](https://twitter.com/jcxplorer) and [@vesan](https://twitter.com/vesan)! # Want to do a pull request? Great! New ideas are more than welcome, but please check the [Pull Request Guidelines](CONTRIBUTING.md) first before doing so.
gpl-2.0
isauragalafate/drupal8
vendor/symfony/validator/Tests/Constraints/RegexTest.php
3009
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\Validator\Tests\Constraints; use PHPUnit\Framework\TestCase; use Symfony\Component\Validator\Constraints\Regex; /** * @author Bernhard Schussek <[email protected]> */ class RegexTest extends TestCase { public function testConstraintGetDefaultOption() { $constraint = new Regex('/^[0-9]+$/'); $this->assertSame('/^[0-9]+$/', $constraint->pattern); } public function provideHtmlPatterns() { return array( // HTML5 wraps the pattern in ^(?:pattern)$ array('/^[0-9]+$/', '[0-9]+'), array('/[0-9]+$/', '.*[0-9]+'), array('/^[0-9]+/', '[0-9]+.*'), array('/[0-9]+/', '.*[0-9]+.*'), // We need a smart way to allow matching of patterns that contain // ^ and $ at various sub-clauses of an or-clause // .*(pattern).* seems to work correctly array('/[0-9]$|[a-z]+/', '.*([0-9]$|[a-z]+).*'), array('/[0-9]$|^[a-z]+/', '.*([0-9]$|^[a-z]+).*'), array('/^[0-9]|[a-z]+$/', '.*(^[0-9]|[a-z]+$).*'), // Unescape escaped delimiters array('/^[0-9]+\/$/', '[0-9]+/'), array('#^[0-9]+\#$#', '[0-9]+#'), // Cannot be converted array('/^[0-9]+$/i', null), // Inverse matches are simple, just wrap in // ((?!pattern).)* array('/^[0-9]+$/', '((?!^[0-9]+$).)*', false), array('/[0-9]+$/', '((?![0-9]+$).)*', false), array('/^[0-9]+/', '((?!^[0-9]+).)*', false), array('/[0-9]+/', '((?![0-9]+).)*', false), array('/[0-9]$|[a-z]+/', '((?![0-9]$|[a-z]+).)*', false), array('/[0-9]$|^[a-z]+/', '((?![0-9]$|^[a-z]+).)*', false), array('/^[0-9]|[a-z]+$/', '((?!^[0-9]|[a-z]+$).)*', false), array('/^[0-9]+\/$/', '((?!^[0-9]+/$).)*', false), array('#^[0-9]+\#$#', '((?!^[0-9]+#$).)*', false), array('/^[0-9]+$/i', null, false), ); } /** * @dataProvider provideHtmlPatterns */ public function testGetHtmlPattern($pattern, $htmlPattern, $match = true) { $constraint = new Regex(array( 'pattern' => $pattern, 'match' => $match, )); $this->assertSame($pattern, $constraint->pattern); $this->assertSame($htmlPattern, $constraint->getHtmlPattern()); } public function testGetCustomHtmlPattern() { $constraint = new Regex(array( 'pattern' => '((?![0-9]$|[a-z]+).)*', 'htmlPattern' => 'foobar', )); $this->assertSame('((?![0-9]$|[a-z]+).)*', $constraint->pattern); $this->assertSame('foobar', $constraint->getHtmlPattern()); } }
gpl-2.0
foxsat-hdr/linux-kernel
net/ipv6/ip6_input.c
6156
/* * IPv6 input * Linux INET6 implementation * * Authors: * Pedro Roque <[email protected]> * Ian P. Morris <[email protected]> * * $Id: ip6_input.c,v 1.19 2000/12/13 18:31:50 davem Exp $ * * Based in linux/net/ipv4/ip_input.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Changes * * Mitsuru KANDA @USAGI and * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/sched.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/xfrm.h> static inline int ip6_rcv_finish( struct sk_buff *skb) { if (skb->dst == NULL) ip6_route_input(skb); return dst_input(skb); } int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct ipv6hdr *hdr; u32 pkt_len; if (skb->pkt_type == PACKET_OTHERHOST) goto drop; IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); goto out; } /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = &loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex; if (skb->len < sizeof(struct ipv6hdr)) goto err; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; if (hdr->version != 6) goto err; pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) goto truncated; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; } if (hdr->nexthdr == NEXTHDR_HOP) { skb->h.raw = (u8*)(hdr+1); if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); return 0; } hdr = skb->nh.ipv6h; } return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); truncated: IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); err: IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); drop: kfree_skb(skb); out: return 0; } /* * Deliver the packet to the host */ static inline int ip6_input_finish(struct sk_buff *skb) { struct inet6_protocol *ipprot; struct sock *raw_sk; unsigned int nhoff; int nexthdr; u8 hash; skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); /* * Parse extension headers */ nexthdr = skb->nh.ipv6h->nexthdr; nhoff = offsetof(struct ipv6hdr, nexthdr); /* Skip hop-by-hop options, they are already parsed. */ if (nexthdr == NEXTHDR_HOP) { nhoff = sizeof(struct ipv6hdr); nexthdr = skb->h.raw[0]; skb->h.raw += (skb->h.raw[1]+1)<<3; } rcu_read_lock(); resubmit: if (!pskb_pull(skb, skb->h.raw - skb->data)) goto discard; nexthdr = skb->nh.raw[nhoff]; raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk) ipv6_raw_deliver(skb, nexthdr); hash = nexthdr & (MAX_INET_PROTOS - 1); if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { struct ipv6hdr *hdr; skb_postpull_rcsum(skb, skb->nh.raw, skb->h.raw - skb->nh.raw); hdr = skb->nh.ipv6h; if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr)) goto discard; } if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; ret = ipprot->handler(&skb, &nhoff); if (ret > 0) goto resubmit; else if (ret == 0) IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); } else { if (!raw_sk) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff); } } else { IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); kfree_skb(skb); } } rcu_read_unlock(); return 0; discard: IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; } int ip6_input(struct sk_buff *skb) { return NF_HOOK(PF_INET6,NF_IP6_LOCAL_IN, skb, skb->dev, NULL, ip6_input_finish); } int ip6_mc_input(struct sk_buff *skb) { struct ipv6hdr *hdr; int deliver; IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); hdr = skb->nh.ipv6h; deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) || ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); /* * IPv6 multicast router mode isnt currently supported. */ #if 0 if (ipv6_config.multicast_route) { int addr_type; addr_type = ipv6_addr_type(&hdr->daddr); if (!(addr_type & (IPV6_ADDR_LOOPBACK | IPV6_ADDR_LINKLOCAL))) { struct sk_buff *skb2; struct dst_entry *dst; dst = skb->dst; if (deliver) { skb2 = skb_clone(skb, GFP_ATOMIC); dst_output(skb2); } else { dst_output(skb); return 0; } } } #endif if (likely(deliver)) { ip6_input(skb); return 0; } /* discard */ kfree_skb(skb); return 0; }
gpl-2.0
poondog/KANGAROO-kernel
arch/s390/mm/fault.c
17263
/* * arch/s390/mm/fault.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner ([email protected]) * Ulrich Weigand ([email protected]) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1995 Linus Torvalds */ #include <linux/kernel_stat.h> #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/compat.h> #include <linux/smp.h> #include <linux/kdebug.h> #include <linux/init.h> #include <linux/console.h> #include <linux/module.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <asm/asm-offsets.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/compat.h> #include "../kernel/entry.h" #ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 #define __SUBCODE_MASK 0x0200 #define __PF_RES_FIELD 0ULL #else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL #endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 #define VM_FAULT_BADACCESS 0x040000 static unsigned long store_indication; void fault_init(void) { if (test_facility(2) && test_facility(75)) store_indication = 0xc00; } static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; preempt_enable(); } return ret; } /* * Unlock any spinlocks which will prevent us from getting the * message out. */ void bust_spinlocks(int yes) { if (yes) { oops_in_progress = 1; } else { int loglevel_save = console_loglevel; console_unblank(); oops_in_progress = 0; /* * OK, the message is on the console. Now we call printk() * without oops_in_progress set so that printk will give klogd * a poke. Hold onto your hats... */ console_loglevel = 15; printk(" "); console_loglevel = loglevel_save; } } /* * Returns the address space associated with the fault. * Returns 0 for kernel space and 1 for user space. */ static inline int user_space_fault(unsigned long trans_exc_code) { /* * The lowest two bits of the translation exception * identification indicate which paging table was used. */ trans_exc_code &= 3; if (trans_exc_code == 2) /* Access via secondary space, set_fs setting decides */ return current->thread.mm_segment.ar4; if (user_mode == HOME_SPACE_MODE) /* User space if the access has been done via home space. */ return trans_exc_code == 3; /* * If the user space is not the home space the kernel runs in home * space. Access via secondary space has already been covered, * access via primary space or access register is from user space * and access via home space is from the kernel. */ return trans_exc_code != 3; } static inline void report_user_fault(struct pt_regs *regs, long int_code, int signr, unsigned long address) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; if (!unhandled_signal(current, signr)) return; if (!printk_ratelimit()) return; printk("User process fault: interruption code 0x%lX ", int_code); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); printk("\n"); printk("failing address: %lX\n", address); show_regs(regs); } /* * Send SIGSEGV to task. This is an external routine * to keep the stack usage of do_page_fault small. */ static noinline void do_sigsegv(struct pt_regs *regs, long int_code, int si_code, unsigned long trans_exc_code) { struct siginfo si; unsigned long address; address = trans_exc_code & __FAIL_ADDR_MASK; current->thread.prot_addr = address; current->thread.trap_no = int_code; report_user_fault(regs, int_code, SIGSEGV, address); si.si_signo = SIGSEGV; si.si_code = si_code; si.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &si, current); } static noinline void do_no_context(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { const struct exception_table_entry *fixup; unsigned long address; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ address = trans_exc_code & __FAIL_ADDR_MASK; if (!user_space_fault(trans_exc_code)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die("Oops", regs, int_code); do_exit(SIGKILL); } static noinline void do_low_address(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ die ("Low-address protection", regs, int_code); do_exit(SIGKILL); } do_no_context(regs, int_code, trans_exc_code); } static noinline void do_sigbus(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { struct task_struct *tsk = current; unsigned long address; struct siginfo si; /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ address = trans_exc_code & __FAIL_ADDR_MASK; tsk->thread.prot_addr = address; tsk->thread.trap_no = int_code; si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRERR; si.si_addr = (void __user *) address; force_sig_info(SIGBUS, &si, tsk); } static noinline void do_fault_error(struct pt_regs *regs, long int_code, unsigned long trans_exc_code, int fault) { int si_code; switch (fault) { case VM_FAULT_BADACCESS: case VM_FAULT_BADMAP: /* Bad memory access. Check if it is kernel or user space. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* User mode accesses just cause a SIGSEGV */ si_code = (fault == VM_FAULT_BADMAP) ? SEGV_MAPERR : SEGV_ACCERR; do_sigsegv(regs, int_code, si_code, trans_exc_code); return; } case VM_FAULT_BADCONTEXT: do_no_context(regs, int_code, trans_exc_code); break; default: /* fault & VM_FAULT_ERROR */ if (fault & VM_FAULT_OOM) { if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs, int_code, trans_exc_code); else pagefault_out_of_memory(); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs, int_code, trans_exc_code); else do_sigbus(regs, int_code, trans_exc_code); } else BUG(); break; } } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * interruption code (int_code): * 04 Protection -> Write-Protection (suprression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ static inline int do_exception(struct pt_regs *regs, int access, unsigned long trans_exc_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; unsigned int flags; int fault; if (notify_page_fault(regs)) return 0; tsk = current; mm = tsk->mm; /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ fault = VM_FAULT_BADCONTEXT; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; retry: down_read(&mm->mmap_sem); fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) goto out_up; if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) goto out_up; if (expand_stack(vma, address)) goto out_up; } /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ fault = VM_FAULT_BADACCESS; if (unlikely(!(vma->vm_flags & access))) goto out_up; if (is_vm_hugetlb_page(vma)) address &= HPAGE_MASK; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; /* * Major/minor page fault accounting is only done on the * initial attempt. If we go through a retry, it is extremely * likely that the page will be found in page cache at that point. */ if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } if (fault & VM_FAULT_RETRY) { /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; goto retry; } } /* * The instruction that caused the program check will * be repeated. Don't signal single step via SIGTRAP. */ clear_tsk_thread_flag(tsk, TIF_PER_TRAP); fault = 0; out_up: up_read(&mm->mmap_sem); out: return fault; } void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, unsigned long trans_exc_code) { int fault; /* Protection exception is suppressing, decrement psw address. */ regs->psw.addr -= (pgm_int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { do_low_address(regs, pgm_int_code, trans_exc_code); return; } fault = do_exception(regs, VM_WRITE, trans_exc_code); if (unlikely(fault)) do_fault_error(regs, 4, trans_exc_code, fault); } void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, unsigned long trans_exc_code) { int access, fault; access = VM_READ | VM_EXEC | VM_WRITE; fault = do_exception(regs, access, trans_exc_code); if (unlikely(fault)) do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); } #ifdef CONFIG_64BIT void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, unsigned long trans_exc_code) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); up_read(&mm->mmap_sem); if (vma) { update_mm(mm, current); return; } /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); return; } no_context: do_no_context(regs, pgm_int_code, trans_exc_code); } #endif int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) { struct pt_regs regs; int access, fault; regs.psw.mask = psw_kernel_bits; if (!irqs_disabled()) regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr |= PSW_ADDR_AMODE; uaddr &= PAGE_MASK; access = write ? VM_WRITE : VM_READ; fault = do_exception(&regs, access, uaddr | 2); if (unlikely(fault)) { if (fault & VM_FAULT_OOM) return -EFAULT; else if (fault & VM_FAULT_SIGBUS) do_sigbus(&regs, pgm_int_code, uaddr); } return fault ? -EFAULT : 0; } #ifdef CONFIG_PFAULT /* * 'pfault' pseudo page faults routines. */ static int pfault_disable; static int __init nopfault(char *str) { pfault_disable = 1; return 1; } __setup("nopfault", nopfault); struct pfault_refbk { u16 refdiagc; u16 reffcode; u16 refdwlen; u16 refversn; u64 refgaddr; u64 refselmk; u64 refcmpmk; u64 reserved; } __attribute__ ((packed, aligned(8))); int pfault_init(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 0, .refdwlen = 5, .refversn = 2, .refgaddr = __LC_CURRENT_PID, .refselmk = 1ULL << 48, .refcmpmk = 1ULL << 48, .reserved = __PF_RES_FIELD }; int rc; if (!MACHINE_IS_VM || pfault_disable) return -1; asm volatile( " diag %1,%0,0x258\n" "0: j 2f\n" "1: la %0,8\n" "2:\n" EX_TABLE(0b,1b) : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); return rc; } void pfault_fini(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 1, .refdwlen = 5, .refversn = 2, }; if (!MACHINE_IS_VM || pfault_disable) return; asm volatile( " diag %0,0,0x258\n" "0:\n" EX_TABLE(0b,0b) : : "a" (&refbk), "m" (refbk) : "cc"); } static DEFINE_SPINLOCK(pfault_lock); static LIST_HEAD(pfault_list); static void pfault_interrupt(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { struct task_struct *tsk; __u16 subcode; pid_t pid; /* * Get the external interruption subcode & pfault * initial/completion signal bit. VM stores this * in the 'cpu address' field associated with the * external interrupt. */ subcode = ext_int_code >> 16; if ((subcode & 0xff00) != __SUBCODE_MASK) return; kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; if (subcode & 0x0080) { /* Get the token (= pid of the affected task). */ pid = sizeof(void *) == 4 ? param32 : param64; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) get_task_struct(tsk); rcu_read_unlock(); if (!tsk) return; } else { tsk = current; } spin_lock(&pfault_lock); if (subcode & 0x0080) { /* signal bit is set -> a page has been swapped in by VM */ if (tsk->thread.pfault_wait == 1) { /* Initial interrupt was faster than the completion * interrupt. pfault_wait is valid. Set pfault_wait * back to zero and wake up the process. This can * safely be done because the task is still sleeping * and can't produce new pfaults. */ tsk->thread.pfault_wait = 0; list_del(&tsk->thread.list); wake_up_process(tsk); put_task_struct(tsk); } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial * interrupt doesn't put the task to sleep. */ tsk->thread.pfault_wait = -1; } put_task_struct(tsk); } else { /* signal bit not set -> a real page is missing. */ if (tsk->thread.pfault_wait == 1) { /* Already on the list with a reference: put to sleep */ set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } else if (tsk->thread.pfault_wait == -1) { /* Completion interrupt was faster than the initial * interrupt (pfault_wait == -1). Set pfault_wait * back to zero and exit. */ tsk->thread.pfault_wait = 0; } else { /* Initial interrupt arrived before completion * interrupt. Let the task sleep. * An extra task reference is needed since a different * cpu may set the task state to TASK_RUNNING again * before the scheduler is reached. */ get_task_struct(tsk); tsk->thread.pfault_wait = 1; list_add(&tsk->thread.list, &pfault_list); set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } } spin_unlock(&pfault_lock); } static int __cpuinit pfault_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct thread_struct *thread, *next; struct task_struct *tsk; switch (action) { case CPU_DEAD: case CPU_DEAD_FROZEN: spin_lock_irq(&pfault_lock); list_for_each_entry_safe(thread, next, &pfault_list, list) { thread->pfault_wait = 0; list_del(&thread->list); tsk = container_of(thread, struct task_struct, thread); wake_up_process(tsk); put_task_struct(tsk); } spin_unlock_irq(&pfault_lock); break; default: break; } return NOTIFY_OK; } static int __init pfault_irq_init(void) { int rc; if (!MACHINE_IS_VM) return 0; rc = register_external_interrupt(0x2603, pfault_interrupt); if (rc) goto out_extint; rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; if (rc) goto out_pfault; service_subclass_irq_register(); hotcpu_notifier(pfault_cpu_notify, 0); return 0; out_pfault: unregister_external_interrupt(0x2603, pfault_interrupt); out_extint: pfault_disable = 1; return rc; } early_initcall(pfault_irq_init); #endif /* CONFIG_PFAULT */
gpl-2.0
Haynie-Research-and-Development/jarvis
web/webapi/sms/vendor/twilio/sdk/Twilio/Rest/Notify/V1/Service/BindingOptions.php
5550
<?php /** * This code was generated by * \ / _ _ _| _ _ * | (_)\/(_)(_|\/| |(/_ v1.0.0 * / / */ namespace Twilio\Rest\Notify\V1\Service; use Twilio\Options; use Twilio\Values; /** * PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. */ abstract class BindingOptions { /** * @param string $tag The tag * @param string $notificationProtocolVersion The notification_protocol_version * @param string $credentialSid The credential_sid * @param string $endpoint The endpoint * @return CreateBindingOptions Options builder */ public static function create($tag = Values::NONE, $notificationProtocolVersion = Values::NONE, $credentialSid = Values::NONE, $endpoint = Values::NONE) { return new CreateBindingOptions($tag, $notificationProtocolVersion, $credentialSid, $endpoint); } /** * @param \DateTime $startDate The start_date * @param \DateTime $endDate The end_date * @param string $identity The identity * @param string $tag The tag * @return ReadBindingOptions Options builder */ public static function read($startDate = Values::NONE, $endDate = Values::NONE, $identity = Values::NONE, $tag = Values::NONE) { return new ReadBindingOptions($startDate, $endDate, $identity, $tag); } } class CreateBindingOptions extends Options { /** * @param string $tag The tag * @param string $notificationProtocolVersion The notification_protocol_version * @param string $credentialSid The credential_sid * @param string $endpoint The endpoint */ public function __construct($tag = Values::NONE, $notificationProtocolVersion = Values::NONE, $credentialSid = Values::NONE, $endpoint = Values::NONE) { $this->options['tag'] = $tag; $this->options['notificationProtocolVersion'] = $notificationProtocolVersion; $this->options['credentialSid'] = $credentialSid; $this->options['endpoint'] = $endpoint; } /** * The tag * * @param string $tag The tag * @return $this Fluent Builder */ public function setTag($tag) { $this->options['tag'] = $tag; return $this; } /** * The notification_protocol_version * * @param string $notificationProtocolVersion The notification_protocol_version * @return $this Fluent Builder */ public function setNotificationProtocolVersion($notificationProtocolVersion) { $this->options['notificationProtocolVersion'] = $notificationProtocolVersion; return $this; } /** * The credential_sid * * @param string $credentialSid The credential_sid * @return $this Fluent Builder */ public function setCredentialSid($credentialSid) { $this->options['credentialSid'] = $credentialSid; return $this; } /** * The endpoint * * @param string $endpoint The endpoint * @return $this Fluent Builder */ public function setEndpoint($endpoint) { $this->options['endpoint'] = $endpoint; return $this; } /** * Provide a friendly representation * * @return string Machine friendly representation */ public function __toString() { $options = array(); foreach ($this->options as $key => $value) { if ($value != Values::NONE) { $options[] = "$key=$value"; } } return '[Twilio.Notify.V1.CreateBindingOptions ' . implode(' ', $options) . ']'; } } class ReadBindingOptions extends Options { /** * @param \DateTime $startDate The start_date * @param \DateTime $endDate The end_date * @param string $identity The identity * @param string $tag The tag */ public function __construct($startDate = Values::NONE, $endDate = Values::NONE, $identity = Values::NONE, $tag = Values::NONE) { $this->options['startDate'] = $startDate; $this->options['endDate'] = $endDate; $this->options['identity'] = $identity; $this->options['tag'] = $tag; } /** * The start_date * * @param \DateTime $startDate The start_date * @return $this Fluent Builder */ public function setStartDate($startDate) { $this->options['startDate'] = $startDate; return $this; } /** * The end_date * * @param \DateTime $endDate The end_date * @return $this Fluent Builder */ public function setEndDate($endDate) { $this->options['endDate'] = $endDate; return $this; } /** * The identity * * @param string $identity The identity * @return $this Fluent Builder */ public function setIdentity($identity) { $this->options['identity'] = $identity; return $this; } /** * The tag * * @param string $tag The tag * @return $this Fluent Builder */ public function setTag($tag) { $this->options['tag'] = $tag; return $this; } /** * Provide a friendly representation * * @return string Machine friendly representation */ public function __toString() { $options = array(); foreach ($this->options as $key => $value) { if ($value != Values::NONE) { $options[] = "$key=$value"; } } return '[Twilio.Notify.V1.ReadBindingOptions ' . implode(' ', $options) . ']'; } }
gpl-2.0
vorvor/diszkont
sites/all/themes/fusion/fusion_core/css/fusion-responsive-default.css
5160
@media only screen and (min-width:1200px) { /** * Fusion Grid Layout - 1200px width, 12 column grid, 30px gutters * -------------------------------------------------------------------- */ /* grid widths */ body .grid12-1 {width: 100px;} body .grid12-2 {width: 200px;} body .grid12-3 {width: 300px;} body .grid12-4 {width: 400px;} body .grid12-5 {width: 500px;} body .grid12-6 {width: 600px;} body .grid12-7 {width: 700px;} body .grid12-8 {width: 800px;} body .grid12-9 {width: 900px;} body .grid12-10 {width: 1000px;} body .grid12-11 {width: 1100px;} body .grid12-12 {width: 1200px;} /* block indents */ body .grid12-indent-1 {margin-left: 100px;} body .grid12-indent-2 {margin-left: 200px;} body .grid12-indent-3 {margin-left: 300px;} body .grid12-indent-4 {margin-left: 400px;} body .grid12-indent-5 {margin-left: 500px;} body .grid12-indent-6 {margin-left: 600px;} body .grid12-indent-7 {margin-left: 700px;} body .grid12-indent-8 {margin-left: 800px;} body .grid12-indent-9 {margin-left: 900px;} body .grid12-indent-10 {margin-left: 1000px;} body .grid12-indent-11 {margin-left: 1100px;} /* gutters (2 x margin) */ .gutter { margin-left: 15px; margin-right: 15px; } /* page min width */ body, div.full-width { min-width: 1200px; } #grid-mask-overlay .block .gutter { background-color: rgba(0, 73, 225, .12); outline: none; } #label_sidebar { display: block; } } @media only screen and (max-device-width: 1024px) and (orientation:landscape), only screen and (min-width:960px) and (max-width:1199px) and (min-device-width: 1100px) { /** * Fusion Grid Layout - 960px width, 12 column grid, 20px gutters * -------------------------------------------------------------------- */ /* grid widths */ body .grid12-1 {width: 80px;} body .grid12-2 {width: 160px;} body .grid12-3 {width: 240px;} body .grid12-4 {width: 320px;} body .grid12-5 {width: 400px;} body .grid12-6 {width: 480px;} body .grid12-7 {width: 560px;} body .grid12-8 {width: 640px;} body .grid12-9 {width: 720px;} body .grid12-10 {width: 800px;} body .grid12-11 {width: 880px;} body .grid12-12 {width: 960px;} /* block indents */ body .grid12-indent-1 {margin-left: 80px;} body .grid12-indent-2 {margin-left: 160px;} body .grid12-indent-3 {margin-left: 240px;} body .grid12-indent-4 {margin-left: 320px;} body .grid12-indent-5 {margin-left: 400px;} body .grid12-indent-6 {margin-left: 480px;} body .grid12-indent-7 {margin-left: 560px;} body .grid12-indent-8 {margin-left: 640px;} body .grid12-indent-9 {margin-left: 720px;} body .grid12-indent-10 {margin-left: 800px;} body .grid12-indent-11 {margin-left: 880px;} /* gutters (2 x margin) */ .gutter { margin-left: 10px; margin-right: 10px; } /* page min width */ body, div.full-width { min-width: 960px; } #grid-mask-overlay .block .gutter { background-color: rgba(213,95,28,.15); outline: none; } #label_sidebar_tablet_landscape { display: block; } } @media only screen and (max-device-width: 1024px) and (orientation:portrait), only screen and (min-width:768px) and (max-width:959px) and (min-device-width: 1100px) { /** * Fusion Grid Layout - 768px width, 12 column grid, 20px gutters * -------------------------------------------------------------------- */ /* grid widths */ body .grid12-1 {width: 64px;} body .grid12-2 {width: 128px;} body .grid12-3 {width: 192px;} body .grid12-4 {width: 256px;} body .grid12-5 {width: 320px;} body .grid12-6 {width: 384px;} body .grid12-7 {width: 448px;} body .grid12-8 {width: 512px;} body .grid12-9 {width: 576px;} body .grid12-10 {width: 640px;} body .grid12-11 {width: 704px;} body .grid12-12 {width: 768px;} /* block indents */ body .grid12-indent-1 {margin-left: 64px;} body .grid12-indent-2 {margin-left: 128px;} body .grid12-indent-3 {margin-left: 192px;} body .grid12-indent-4 {margin-left: 256px;} body .grid12-indent-5 {margin-left: 320px;} body .grid12-indent-6 {margin-left: 384px;} body .grid12-indent-7 {margin-left: 448px;} body .grid12-indent-8 {margin-left: 512px;} body .grid12-indent-9 {margin-left: 576px;} body .grid12-indent-10 {margin-left: 640px;} body .grid12-indent-11 {margin-left: 704px;} /* gutters (2 x margin) */ .gutter { margin-left: 10px; margin-right: 10px; } /* page min width */ body, div.full-width { min-width: 768px; } #grid-mask-overlay .block .gutter { background-color: rgba(11,186,40,.15); outline: none; } #label_sidebar_tablet_portrait { display: block; } } @media only screen and (max-width:759px) { /** * Fusion Grid Layout - Mobile layout, 12 column grid, 10px gutters * -------------------------------------------------------------------- */ /* grid widths */ body .grid12-1, body .grid12-2, body .grid12-3, body .grid12-4, body .grid12-5, body .grid12-6, body .grid12-7, body .grid12-8, body .grid12-9, body .grid12-10, body .grid12-11, body .grid12-12 { clear: both; width: 100%; } /* gutters (2 x margin) */ .gutter { margin-left: 5px; margin-right: 5px; } /* page min width */ body, div.full-width { min-width: 0; } #grid-mask-overlay .block .gutter { background-color: rgba(220,220,70,.1); outline: none; } #label_sidebar_phone_landscape { display: block; } }
gpl-2.0
aertmann/neos-development-collection
Neos.Neos/Classes/Controller/Module/UserController.php
460
<?php namespace Neos\Neos\Controller\Module; /* * This file is part of the Neos.Neos package. * * (c) Contributors of the Neos Project - www.neos.io * * This package is Open Source Software. For the full copyright and license * information, please view the LICENSE file which was distributed with this * source code. */ use Neos\Flow\Annotations as Flow; /** * @Flow\Scope("singleton") */ class UserController extends AbstractModuleController { }
gpl-3.0
selmentdev/selment-toolchain
source/gcc-latest/gcc/postreload-gcse.c
41806
/* Post reload partially redundant load elimination Copyright (C) 2004-2016 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "predict.h" #include "df.h" #include "tm_p.h" #include "insn-config.h" #include "emit-rtl.h" #include "recog.h" #include "cfgrtl.h" #include "profile.h" #include "expr.h" #include "params.h" #include "tree-pass.h" #include "dbgcnt.h" #include "gcse-common.h" /* The following code implements gcse after reload, the purpose of this pass is to cleanup redundant loads generated by reload and other optimizations that come after gcse. It searches for simple inter-block redundancies and tries to eliminate them by adding moves and loads in cold places. Perform partially redundant load elimination, try to eliminate redundant loads created by the reload pass. We try to look for full or partial redundant loads fed by one or more loads/stores in predecessor BBs, and try adding loads to make them fully redundant. We also check if it's worth adding loads to be able to delete the redundant load. Algorithm: 1. Build available expressions hash table: For each load/store instruction, if the loaded/stored memory didn't change until the end of the basic block add this memory expression to the hash table. 2. Perform Redundancy elimination: For each load instruction do the following: perform partial redundancy elimination, check if it's worth adding loads to make the load fully redundant. If so add loads and register copies and delete the load. 3. Delete instructions made redundant in step 2. Future enhancement: If the loaded register is used/defined between load and some store, look for some other free register between load and all its stores, and replace the load with a copy from this register to the loaded register. */ /* Keep statistics of this pass. */ static struct { int moves_inserted; int copies_inserted; int insns_deleted; } stats; /* We need to keep a hash table of expressions. The table entries are of type 'struct expr', and for each expression there is a single linked list of occurrences. */ /* Expression elements in the hash table. */ struct expr { /* The expression (SET_SRC for expressions, PATTERN for assignments). */ rtx expr; /* The same hash for this entry. */ hashval_t hash; /* Index in the transparent bitmaps. */ unsigned int bitmap_index; /* List of available occurrence in basic blocks in the function. */ struct occr *avail_occr; }; /* Hashtable helpers. */ struct expr_hasher : nofree_ptr_hash <expr> { static inline hashval_t hash (const expr *); static inline bool equal (const expr *, const expr *); }; /* Hash expression X. DO_NOT_RECORD_P is a boolean indicating if a volatile operand is found or if the expression contains something we don't want to insert in the table. */ static hashval_t hash_expr (rtx x, int *do_not_record_p) { *do_not_record_p = 0; return hash_rtx (x, GET_MODE (x), do_not_record_p, NULL, /*have_reg_qty=*/false); } /* Callback for hashtab. Return the hash value for expression EXP. We don't actually hash here, we just return the cached hash value. */ inline hashval_t expr_hasher::hash (const expr *exp) { return exp->hash; } /* Callback for hashtab. Return nonzero if exp1 is equivalent to exp2. */ inline bool expr_hasher::equal (const expr *exp1, const expr *exp2) { int equiv_p = exp_equiv_p (exp1->expr, exp2->expr, 0, true); gcc_assert (!equiv_p || exp1->hash == exp2->hash); return equiv_p; } /* The table itself. */ static hash_table<expr_hasher> *expr_table; static struct obstack expr_obstack; /* Occurrence of an expression. There is at most one occurrence per basic block. If a pattern appears more than once, the last appearance is used. */ struct occr { /* Next occurrence of this expression. */ struct occr *next; /* The insn that computes the expression. */ rtx_insn *insn; /* Nonzero if this [anticipatable] occurrence has been deleted. */ char deleted_p; }; static struct obstack occr_obstack; /* The following structure holds the information about the occurrences of the redundant instructions. */ struct unoccr { struct unoccr *next; edge pred; rtx_insn *insn; }; static struct obstack unoccr_obstack; /* Array where each element is the CUID if the insn that last set the hard register with the number of the element, since the start of the current basic block. This array is used during the building of the hash table (step 1) to determine if a reg is killed before the end of a basic block. It is also used when eliminating partial redundancies (step 2) to see if a reg was modified since the start of a basic block. */ static int *reg_avail_info; /* A list of insns that may modify memory within the current basic block. */ struct modifies_mem { rtx_insn *insn; struct modifies_mem *next; }; static struct modifies_mem *modifies_mem_list; /* The modifies_mem structs also go on an obstack, only this obstack is freed each time after completing the analysis or transformations on a basic block. So we allocate a dummy modifies_mem_obstack_bottom object on the obstack to keep track of the bottom of the obstack. */ static struct obstack modifies_mem_obstack; static struct modifies_mem *modifies_mem_obstack_bottom; /* Mapping of insn UIDs to CUIDs. CUIDs are like UIDs except they increase monotonically in each basic block, have no gaps, and only apply to real insns. */ static int *uid_cuid; #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)]) /* Bitmap of blocks which have memory stores. */ static bitmap modify_mem_list_set; /* Bitmap of blocks which have calls. */ static bitmap blocks_with_calls; /* Vector indexed by block # with a list of all the insns that modify memory within the block. */ static vec<rtx_insn *> *modify_mem_list; /* Vector indexed by block # with a canonicalized list of insns that modify memory in the block. */ static vec<modify_pair> *canon_modify_mem_list; /* Vector of simple bitmaps indexed by block number. Each component sbitmap indicates which expressions are transparent through the block. */ static sbitmap *transp; /* Helpers for memory allocation/freeing. */ static void alloc_mem (void); static void free_mem (void); /* Support for hash table construction and transformations. */ static bool oprs_unchanged_p (rtx, rtx_insn *, bool); static void record_last_reg_set_info (rtx_insn *, rtx); static void record_last_reg_set_info_regno (rtx_insn *, int); static void record_last_mem_set_info (rtx_insn *); static void record_last_set_info (rtx, const_rtx, void *); static void record_opr_changes (rtx_insn *); static void find_mem_conflicts (rtx, const_rtx, void *); static int load_killed_in_block_p (int, rtx, bool); static void reset_opr_set_tables (void); /* Hash table support. */ static hashval_t hash_expr (rtx, int *); static void insert_expr_in_table (rtx, rtx_insn *); static struct expr *lookup_expr_in_table (rtx); static void dump_hash_table (FILE *); /* Helpers for eliminate_partially_redundant_load. */ static bool reg_killed_on_edge (rtx, edge); static bool reg_used_on_edge (rtx, edge); static rtx get_avail_load_store_reg (rtx_insn *); static bool bb_has_well_behaved_predecessors (basic_block); static struct occr* get_bb_avail_insn (basic_block, struct occr *, int); static void hash_scan_set (rtx_insn *); static void compute_hash_table (void); /* The work horses of this pass. */ static void eliminate_partially_redundant_load (basic_block, rtx_insn *, struct expr *); static void eliminate_partially_redundant_loads (void); /* Allocate memory for the CUID mapping array and register/memory tracking tables. */ static void alloc_mem (void) { int i; basic_block bb; rtx_insn *insn; /* Find the largest UID and create a mapping from UIDs to CUIDs. */ uid_cuid = XCNEWVEC (int, get_max_uid () + 1); i = 1; FOR_EACH_BB_FN (bb, cfun) FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) uid_cuid[INSN_UID (insn)] = i++; else uid_cuid[INSN_UID (insn)] = i; } /* Allocate the available expressions hash table. We don't want to make the hash table too small, but unnecessarily making it too large also doesn't help. The i/4 is a gcse.c relic, and seems like a reasonable choice. */ expr_table = new hash_table<expr_hasher> (MAX (i / 4, 13)); /* We allocate everything on obstacks because we often can roll back the whole obstack to some point. Freeing obstacks is very fast. */ gcc_obstack_init (&expr_obstack); gcc_obstack_init (&occr_obstack); gcc_obstack_init (&unoccr_obstack); gcc_obstack_init (&modifies_mem_obstack); /* Working array used to track the last set for each register in the current block. */ reg_avail_info = (int *) xmalloc (FIRST_PSEUDO_REGISTER * sizeof (int)); /* Put a dummy modifies_mem object on the modifies_mem_obstack, so we can roll it back in reset_opr_set_tables. */ modifies_mem_obstack_bottom = (struct modifies_mem *) obstack_alloc (&modifies_mem_obstack, sizeof (struct modifies_mem)); blocks_with_calls = BITMAP_ALLOC (NULL); modify_mem_list_set = BITMAP_ALLOC (NULL); modify_mem_list = (vec_rtx_heap *) xcalloc (last_basic_block_for_fn (cfun), sizeof (vec_rtx_heap)); canon_modify_mem_list = (vec_modify_pair_heap *) xcalloc (last_basic_block_for_fn (cfun), sizeof (vec_modify_pair_heap)); } /* Free memory allocated by alloc_mem. */ static void free_mem (void) { free (uid_cuid); delete expr_table; expr_table = NULL; obstack_free (&expr_obstack, NULL); obstack_free (&occr_obstack, NULL); obstack_free (&unoccr_obstack, NULL); obstack_free (&modifies_mem_obstack, NULL); unsigned i; bitmap_iterator bi; EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi) { modify_mem_list[i].release (); canon_modify_mem_list[i].release (); } BITMAP_FREE (blocks_with_calls); BITMAP_FREE (modify_mem_list_set); free (reg_avail_info); free (modify_mem_list); free (canon_modify_mem_list); } /* Insert expression X in INSN in the hash TABLE. If it is already present, record it as the last occurrence in INSN's basic block. */ static void insert_expr_in_table (rtx x, rtx_insn *insn) { int do_not_record_p; hashval_t hash; struct expr *cur_expr, **slot; struct occr *avail_occr, *last_occr = NULL; hash = hash_expr (x, &do_not_record_p); /* Do not insert expression in the table if it contains volatile operands, or if hash_expr determines the expression is something we don't want to or can't handle. */ if (do_not_record_p) return; /* We anticipate that redundant expressions are rare, so for convenience allocate a new hash table element here already and set its fields. If we don't do this, we need a hack with a static struct expr. Anyway, obstack_free is really fast and one more obstack_alloc doesn't hurt if we're going to see more expressions later on. */ cur_expr = (struct expr *) obstack_alloc (&expr_obstack, sizeof (struct expr)); cur_expr->expr = x; cur_expr->hash = hash; cur_expr->avail_occr = NULL; slot = expr_table->find_slot_with_hash (cur_expr, hash, INSERT); if (! (*slot)) { /* The expression isn't found, so insert it. */ *slot = cur_expr; /* Anytime we add an entry to the table, record the index of the new entry. The bitmap index starts counting at zero. */ cur_expr->bitmap_index = expr_table->elements () - 1; } else { /* The expression is already in the table, so roll back the obstack and use the existing table entry. */ obstack_free (&expr_obstack, cur_expr); cur_expr = *slot; } /* Search for another occurrence in the same basic block. */ avail_occr = cur_expr->avail_occr; while (avail_occr && BLOCK_FOR_INSN (avail_occr->insn) != BLOCK_FOR_INSN (insn)) { /* If an occurrence isn't found, save a pointer to the end of the list. */ last_occr = avail_occr; avail_occr = avail_occr->next; } if (avail_occr) /* Found another instance of the expression in the same basic block. Prefer this occurrence to the currently recorded one. We want the last one in the block and the block is scanned from start to end. */ avail_occr->insn = insn; else { /* First occurrence of this expression in this basic block. */ avail_occr = (struct occr *) obstack_alloc (&occr_obstack, sizeof (struct occr)); /* First occurrence of this expression in any block? */ if (cur_expr->avail_occr == NULL) cur_expr->avail_occr = avail_occr; else last_occr->next = avail_occr; avail_occr->insn = insn; avail_occr->next = NULL; avail_occr->deleted_p = 0; } } /* Lookup pattern PAT in the expression hash table. The result is a pointer to the table entry, or NULL if not found. */ static struct expr * lookup_expr_in_table (rtx pat) { int do_not_record_p; struct expr **slot, *tmp_expr; hashval_t hash = hash_expr (pat, &do_not_record_p); if (do_not_record_p) return NULL; tmp_expr = (struct expr *) obstack_alloc (&expr_obstack, sizeof (struct expr)); tmp_expr->expr = pat; tmp_expr->hash = hash; tmp_expr->avail_occr = NULL; slot = expr_table->find_slot_with_hash (tmp_expr, hash, INSERT); obstack_free (&expr_obstack, tmp_expr); if (!slot) return NULL; else return (*slot); } /* Dump all expressions and occurrences that are currently in the expression hash table to FILE. */ /* This helper is called via htab_traverse. */ int dump_expr_hash_table_entry (expr **slot, FILE *file) { struct expr *exprs = *slot; struct occr *occr; fprintf (file, "expr: "); print_rtl (file, exprs->expr); fprintf (file,"\nhashcode: %u\n", exprs->hash); fprintf (file,"list of occurrences:\n"); occr = exprs->avail_occr; while (occr) { rtx_insn *insn = occr->insn; print_rtl_single (file, insn); fprintf (file, "\n"); occr = occr->next; } fprintf (file, "\n"); return 1; } static void dump_hash_table (FILE *file) { fprintf (file, "\n\nexpression hash table\n"); fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n", (long) expr_table->size (), (long) expr_table->elements (), expr_table->collisions ()); if (expr_table->elements () > 0) { fprintf (file, "\n\ntable entries:\n"); expr_table->traverse <FILE *, dump_expr_hash_table_entry> (file); } fprintf (file, "\n"); } /* Return true if register X is recorded as being set by an instruction whose CUID is greater than the one given. */ static bool reg_changed_after_insn_p (rtx x, int cuid) { unsigned int regno, end_regno; regno = REGNO (x); end_regno = END_REGNO (x); do if (reg_avail_info[regno] > cuid) return true; while (++regno < end_regno); return false; } /* Return nonzero if the operands of expression X are unchanged 1) from the start of INSN's basic block up to but not including INSN if AFTER_INSN is false, or 2) from INSN to the end of INSN's basic block if AFTER_INSN is true. */ static bool oprs_unchanged_p (rtx x, rtx_insn *insn, bool after_insn) { int i, j; enum rtx_code code; const char *fmt; if (x == 0) return 1; code = GET_CODE (x); switch (code) { case REG: /* We are called after register allocation. */ gcc_assert (REGNO (x) < FIRST_PSEUDO_REGISTER); if (after_insn) return !reg_changed_after_insn_p (x, INSN_CUID (insn) - 1); else return !reg_changed_after_insn_p (x, 0); case MEM: if (load_killed_in_block_p (INSN_CUID (insn), x, after_insn)) return 0; else return oprs_unchanged_p (XEXP (x, 0), insn, after_insn); case PC: case CC0: /*FIXME*/ case CONST: CASE_CONST_ANY: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return 1; case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case PRE_MODIFY: case POST_MODIFY: if (after_insn) return 0; break; default: break; } for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') { if (! oprs_unchanged_p (XEXP (x, i), insn, after_insn)) return 0; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, after_insn)) return 0; } return 1; } /* Used for communication between find_mem_conflicts and load_killed_in_block_p. Nonzero if find_mem_conflicts finds a conflict between two memory references. This is a bit of a hack to work around the limitations of note_stores. */ static int mems_conflict_p; /* DEST is the output of an instruction. If it is a memory reference, and possibly conflicts with the load found in DATA, then set mems_conflict_p to a nonzero value. */ static void find_mem_conflicts (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data) { rtx mem_op = (rtx) data; while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); /* If DEST is not a MEM, then it will not conflict with the load. Note that function calls are assumed to clobber memory, but are handled elsewhere. */ if (! MEM_P (dest)) return; if (true_dependence (dest, GET_MODE (dest), mem_op)) mems_conflict_p = 1; } /* Return nonzero if the expression in X (a memory reference) is killed in the current basic block before (if AFTER_INSN is false) or after (if AFTER_INSN is true) the insn with the CUID in UID_LIMIT. This function assumes that the modifies_mem table is flushed when the hash table construction or redundancy elimination phases start processing a new basic block. */ static int load_killed_in_block_p (int uid_limit, rtx x, bool after_insn) { struct modifies_mem *list_entry = modifies_mem_list; while (list_entry) { rtx_insn *setter = list_entry->insn; /* Ignore entries in the list that do not apply. */ if ((after_insn && INSN_CUID (setter) < uid_limit) || (! after_insn && INSN_CUID (setter) > uid_limit)) { list_entry = list_entry->next; continue; } /* If SETTER is a call everything is clobbered. Note that calls to pure functions are never put on the list, so we need not worry about them. */ if (CALL_P (setter)) return 1; /* SETTER must be an insn of some kind that sets memory. Call note_stores to examine each hunk of memory that is modified. It will set mems_conflict_p to nonzero if there may be a conflict between X and SETTER. */ mems_conflict_p = 0; note_stores (PATTERN (setter), find_mem_conflicts, x); if (mems_conflict_p) return 1; list_entry = list_entry->next; } return 0; } /* Record register first/last/block set information for REGNO in INSN. */ static inline void record_last_reg_set_info (rtx_insn *insn, rtx reg) { unsigned int regno, end_regno; regno = REGNO (reg); end_regno = END_REGNO (reg); do reg_avail_info[regno] = INSN_CUID (insn); while (++regno < end_regno); } static inline void record_last_reg_set_info_regno (rtx_insn *insn, int regno) { reg_avail_info[regno] = INSN_CUID (insn); } /* Record memory modification information for INSN. We do not actually care about the memory location(s) that are set, or even how they are set (consider a CALL_INSN). We merely need to record which insns modify memory. */ static void record_last_mem_set_info (rtx_insn *insn) { struct modifies_mem *list_entry; list_entry = (struct modifies_mem *) obstack_alloc (&modifies_mem_obstack, sizeof (struct modifies_mem)); list_entry->insn = insn; list_entry->next = modifies_mem_list; modifies_mem_list = list_entry; record_last_mem_set_info_common (insn, modify_mem_list, canon_modify_mem_list, modify_mem_list_set, blocks_with_calls); } /* Called from compute_hash_table via note_stores to handle one SET or CLOBBER in an insn. DATA is really the instruction in which the SET is taking place. */ static void record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data) { rtx_insn *last_set_insn = (rtx_insn *) data; if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (REG_P (dest)) record_last_reg_set_info (last_set_insn, dest); else if (MEM_P (dest)) { /* Ignore pushes, they don't clobber memory. They may still clobber the stack pointer though. Some targets do argument pushes without adding REG_INC notes. See e.g. PR25196, where a pushsi2 on i386 doesn't have REG_INC notes. Note such changes here too. */ if (! push_operand (dest, GET_MODE (dest))) record_last_mem_set_info (last_set_insn); else record_last_reg_set_info_regno (last_set_insn, STACK_POINTER_REGNUM); } } /* Reset tables used to keep track of what's still available since the start of the block. */ static void reset_opr_set_tables (void) { memset (reg_avail_info, 0, FIRST_PSEUDO_REGISTER * sizeof (int)); obstack_free (&modifies_mem_obstack, modifies_mem_obstack_bottom); modifies_mem_list = NULL; } /* Record things set by INSN. This data is used by oprs_unchanged_p. */ static void record_opr_changes (rtx_insn *insn) { rtx note; /* Find all stores and record them. */ note_stores (PATTERN (insn), record_last_set_info, insn); /* Also record autoincremented REGs for this insn as changed. */ for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_INC) record_last_reg_set_info (insn, XEXP (note, 0)); /* Finally, if this is a call, record all call clobbers. */ if (CALL_P (insn)) { unsigned int regno; rtx link, x; hard_reg_set_iterator hrsi; EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, regno, hrsi) record_last_reg_set_info_regno (insn, regno); for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) if (GET_CODE (XEXP (link, 0)) == CLOBBER) { x = XEXP (XEXP (link, 0), 0); if (REG_P (x)) { gcc_assert (HARD_REGISTER_P (x)); record_last_reg_set_info (insn, x); } } if (! RTL_CONST_OR_PURE_CALL_P (insn)) record_last_mem_set_info (insn); } } /* Scan the pattern of INSN and add an entry to the hash TABLE. After reload we are interested in loads/stores only. */ static void hash_scan_set (rtx_insn *insn) { rtx pat = PATTERN (insn); rtx src = SET_SRC (pat); rtx dest = SET_DEST (pat); /* We are only interested in loads and stores. */ if (! MEM_P (src) && ! MEM_P (dest)) return; /* Don't mess with jumps and nops. */ if (JUMP_P (insn) || set_noop_p (pat)) return; if (REG_P (dest)) { if (/* Don't CSE something if we can't do a reg/reg copy. */ can_copy_p (GET_MODE (dest)) /* Is SET_SRC something we want to gcse? */ && general_operand (src, GET_MODE (src)) #ifdef STACK_REGS /* Never consider insns touching the register stack. It may create situations that reg-stack cannot handle (e.g. a stack register live across an abnormal edge). */ && (REGNO (dest) < FIRST_STACK_REG || REGNO (dest) > LAST_STACK_REG) #endif /* An expression is not available if its operands are subsequently modified, including this insn. */ && oprs_unchanged_p (src, insn, true)) { insert_expr_in_table (src, insn); } } else if (REG_P (src)) { /* Only record sets of pseudo-regs in the hash table. */ if (/* Don't CSE something if we can't do a reg/reg copy. */ can_copy_p (GET_MODE (src)) /* Is SET_DEST something we want to gcse? */ && general_operand (dest, GET_MODE (dest)) #ifdef STACK_REGS /* As above for STACK_REGS. */ && (REGNO (src) < FIRST_STACK_REG || REGNO (src) > LAST_STACK_REG) #endif && ! (flag_float_store && FLOAT_MODE_P (GET_MODE (dest))) /* Check if the memory expression is killed after insn. */ && ! load_killed_in_block_p (INSN_CUID (insn) + 1, dest, true) && oprs_unchanged_p (XEXP (dest, 0), insn, true)) { insert_expr_in_table (dest, insn); } } } /* Create hash table of memory expressions available at end of basic blocks. Basically you should think of this hash table as the representation of AVAIL_OUT. This is the set of expressions that is generated in a basic block and not killed before the end of the same basic block. Notice that this is really a local computation. */ static void compute_hash_table (void) { basic_block bb; FOR_EACH_BB_FN (bb, cfun) { rtx_insn *insn; /* First pass over the instructions records information used to determine when registers and memory are last set. Since we compute a "local" AVAIL_OUT, reset the tables that help us keep track of what has been modified since the start of the block. */ reset_opr_set_tables (); FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) record_opr_changes (insn); } /* The next pass actually builds the hash table. */ FOR_BB_INSNS (bb, insn) if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SET) hash_scan_set (insn); } } /* Check if register REG is killed in any insn waiting to be inserted on edge E. This function is required to check that our data flow analysis is still valid prior to commit_edge_insertions. */ static bool reg_killed_on_edge (rtx reg, edge e) { rtx_insn *insn; for (insn = e->insns.r; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && reg_set_p (reg, insn)) return true; return false; } /* Similar to above - check if register REG is used in any insn waiting to be inserted on edge E. Assumes no such insn can be a CALL_INSN; if so call reg_used_between_p with PREV(insn),NEXT(insn) instead of calling reg_overlap_mentioned_p. */ static bool reg_used_on_edge (rtx reg, edge e) { rtx_insn *insn; for (insn = e->insns.r; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn))) return true; return false; } /* Return the loaded/stored register of a load/store instruction. */ static rtx get_avail_load_store_reg (rtx_insn *insn) { if (REG_P (SET_DEST (PATTERN (insn)))) /* A load. */ return SET_DEST (PATTERN (insn)); else { /* A store. */ gcc_assert (REG_P (SET_SRC (PATTERN (insn)))); return SET_SRC (PATTERN (insn)); } } /* Return nonzero if the predecessors of BB are "well behaved". */ static bool bb_has_well_behaved_predecessors (basic_block bb) { edge pred; edge_iterator ei; if (EDGE_COUNT (bb->preds) == 0) return false; FOR_EACH_EDGE (pred, ei, bb->preds) { if ((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred)) return false; if ((pred->flags & EDGE_ABNORMAL_CALL) && cfun->has_nonlocal_label) return false; if (tablejump_p (BB_END (pred->src), NULL, NULL)) return false; } return true; } /* Search for the occurrences of expression in BB. */ static struct occr* get_bb_avail_insn (basic_block bb, struct occr *orig_occr, int bitmap_index) { struct occr *occr = orig_occr; for (; occr != NULL; occr = occr->next) if (BLOCK_FOR_INSN (occr->insn) == bb) return occr; /* If we could not find an occurrence in BB, see if BB has a single predecessor with an occurrence that is transparent through BB. */ if (single_pred_p (bb) && bitmap_bit_p (transp[bb->index], bitmap_index) && (occr = get_bb_avail_insn (single_pred (bb), orig_occr, bitmap_index))) { rtx avail_reg = get_avail_load_store_reg (occr->insn); if (!reg_set_between_p (avail_reg, PREV_INSN (BB_HEAD (bb)), NEXT_INSN (BB_END (bb))) && !reg_killed_on_edge (avail_reg, single_pred_edge (bb))) return occr; } return NULL; } /* This helper is called via htab_traverse. */ int compute_expr_transp (expr **slot, FILE *dump_file ATTRIBUTE_UNUSED) { struct expr *expr = *slot; compute_transp (expr->expr, expr->bitmap_index, transp, blocks_with_calls, modify_mem_list_set, canon_modify_mem_list); return 1; } /* This handles the case where several stores feed a partially redundant load. It checks if the redundancy elimination is possible and if it's worth it. Redundancy elimination is possible if, 1) None of the operands of an insn have been modified since the start of the current basic block. 2) In any predecessor of the current basic block, the same expression is generated. See the function body for the heuristics that determine if eliminating a redundancy is also worth doing, assuming it is possible. */ static void eliminate_partially_redundant_load (basic_block bb, rtx_insn *insn, struct expr *expr) { edge pred; rtx_insn *avail_insn = NULL; rtx avail_reg; rtx dest, pat; struct occr *a_occr; struct unoccr *occr, *avail_occrs = NULL; struct unoccr *unoccr, *unavail_occrs = NULL, *rollback_unoccr = NULL; int npred_ok = 0; gcov_type ok_count = 0; /* Redundant load execution count. */ gcov_type critical_count = 0; /* Execution count of critical edges. */ edge_iterator ei; bool critical_edge_split = false; /* The execution count of the loads to be added to make the load fully redundant. */ gcov_type not_ok_count = 0; basic_block pred_bb; pat = PATTERN (insn); dest = SET_DEST (pat); /* Check that the loaded register is not used, set, or killed from the beginning of the block. */ if (reg_changed_after_insn_p (dest, 0) || reg_used_between_p (dest, PREV_INSN (BB_HEAD (bb)), insn)) return; /* Check potential for replacing load with copy for predecessors. */ FOR_EACH_EDGE (pred, ei, bb->preds) { rtx_insn *next_pred_bb_end; avail_insn = NULL; avail_reg = NULL_RTX; pred_bb = pred->src; for (a_occr = get_bb_avail_insn (pred_bb, expr->avail_occr, expr->bitmap_index); a_occr; a_occr = get_bb_avail_insn (pred_bb, a_occr->next, expr->bitmap_index)) { /* Check if the loaded register is not used. */ avail_insn = a_occr->insn; avail_reg = get_avail_load_store_reg (avail_insn); gcc_assert (avail_reg); /* Make sure we can generate a move from register avail_reg to dest. */ rtx_insn *move = gen_move_insn (copy_rtx (dest), copy_rtx (avail_reg)); extract_insn (move); if (! constrain_operands (1, get_preferred_alternatives (insn, pred_bb)) || reg_killed_on_edge (avail_reg, pred) || reg_used_on_edge (dest, pred)) { avail_insn = NULL; continue; } next_pred_bb_end = NEXT_INSN (BB_END (BLOCK_FOR_INSN (avail_insn))); if (!reg_set_between_p (avail_reg, avail_insn, next_pred_bb_end)) /* AVAIL_INSN remains non-null. */ break; else avail_insn = NULL; } if (EDGE_CRITICAL_P (pred)) critical_count += pred->count; if (avail_insn != NULL_RTX) { npred_ok++; ok_count += pred->count; if (! set_noop_p (PATTERN (gen_move_insn (copy_rtx (dest), copy_rtx (avail_reg))))) { /* Check if there is going to be a split. */ if (EDGE_CRITICAL_P (pred)) critical_edge_split = true; } else /* Its a dead move no need to generate. */ continue; occr = (struct unoccr *) obstack_alloc (&unoccr_obstack, sizeof (struct unoccr)); occr->insn = avail_insn; occr->pred = pred; occr->next = avail_occrs; avail_occrs = occr; if (! rollback_unoccr) rollback_unoccr = occr; } else { /* Adding a load on a critical edge will cause a split. */ if (EDGE_CRITICAL_P (pred)) critical_edge_split = true; not_ok_count += pred->count; unoccr = (struct unoccr *) obstack_alloc (&unoccr_obstack, sizeof (struct unoccr)); unoccr->insn = NULL; unoccr->pred = pred; unoccr->next = unavail_occrs; unavail_occrs = unoccr; if (! rollback_unoccr) rollback_unoccr = unoccr; } } if (/* No load can be replaced by copy. */ npred_ok == 0 /* Prevent exploding the code. */ || (optimize_bb_for_size_p (bb) && npred_ok > 1) /* If we don't have profile information we cannot tell if splitting a critical edge is profitable or not so don't do it. */ || ((! profile_info || ! flag_branch_probabilities || targetm.cannot_modify_jumps_p ()) && critical_edge_split)) goto cleanup; /* Check if it's worth applying the partial redundancy elimination. */ if (ok_count < GCSE_AFTER_RELOAD_PARTIAL_FRACTION * not_ok_count) goto cleanup; if (ok_count < GCSE_AFTER_RELOAD_CRITICAL_FRACTION * critical_count) goto cleanup; /* Generate moves to the loaded register from where the memory is available. */ for (occr = avail_occrs; occr; occr = occr->next) { avail_insn = occr->insn; pred = occr->pred; /* Set avail_reg to be the register having the value of the memory. */ avail_reg = get_avail_load_store_reg (avail_insn); gcc_assert (avail_reg); insert_insn_on_edge (gen_move_insn (copy_rtx (dest), copy_rtx (avail_reg)), pred); stats.moves_inserted++; if (dump_file) fprintf (dump_file, "generating move from %d to %d on edge from %d to %d\n", REGNO (avail_reg), REGNO (dest), pred->src->index, pred->dest->index); } /* Regenerate loads where the memory is unavailable. */ for (unoccr = unavail_occrs; unoccr; unoccr = unoccr->next) { pred = unoccr->pred; insert_insn_on_edge (copy_insn (PATTERN (insn)), pred); stats.copies_inserted++; if (dump_file) { fprintf (dump_file, "generating on edge from %d to %d a copy of load: ", pred->src->index, pred->dest->index); print_rtl (dump_file, PATTERN (insn)); fprintf (dump_file, "\n"); } } /* Delete the insn if it is not available in this block and mark it for deletion if it is available. If insn is available it may help discover additional redundancies, so mark it for later deletion. */ for (a_occr = get_bb_avail_insn (bb, expr->avail_occr, expr->bitmap_index); a_occr && (a_occr->insn != insn); a_occr = get_bb_avail_insn (bb, a_occr->next, expr->bitmap_index)) ; if (!a_occr) { stats.insns_deleted++; if (dump_file) { fprintf (dump_file, "deleting insn:\n"); print_rtl_single (dump_file, insn); fprintf (dump_file, "\n"); } delete_insn (insn); } else a_occr->deleted_p = 1; cleanup: if (rollback_unoccr) obstack_free (&unoccr_obstack, rollback_unoccr); } /* Performing the redundancy elimination as described before. */ static void eliminate_partially_redundant_loads (void) { rtx_insn *insn; basic_block bb; /* Note we start at block 1. */ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) return; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb) { /* Don't try anything on basic blocks with strange predecessors. */ if (! bb_has_well_behaved_predecessors (bb)) continue; /* Do not try anything on cold basic blocks. */ if (optimize_bb_for_size_p (bb)) continue; /* Reset the table of things changed since the start of the current basic block. */ reset_opr_set_tables (); /* Look at all insns in the current basic block and see if there are any loads in it that we can record. */ FOR_BB_INSNS (bb, insn) { /* Is it a load - of the form (set (reg) (mem))? */ if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SET && REG_P (SET_DEST (PATTERN (insn))) && MEM_P (SET_SRC (PATTERN (insn)))) { rtx pat = PATTERN (insn); rtx src = SET_SRC (pat); struct expr *expr; if (!MEM_VOLATILE_P (src) && GET_MODE (src) != BLKmode && general_operand (src, GET_MODE (src)) /* Are the operands unchanged since the start of the block? */ && oprs_unchanged_p (src, insn, false) && !(cfun->can_throw_non_call_exceptions && may_trap_p (src)) && !side_effects_p (src) /* Is the expression recorded? */ && (expr = lookup_expr_in_table (src)) != NULL) { /* We now have a load (insn) and an available memory at its BB start (expr). Try to remove the loads if it is redundant. */ eliminate_partially_redundant_load (bb, insn, expr); } } /* Keep track of everything modified by this insn, so that we know what has been modified since the start of the current basic block. */ if (INSN_P (insn)) record_opr_changes (insn); } } commit_edge_insertions (); } /* Go over the expression hash table and delete insns that were marked for later deletion. */ /* This helper is called via htab_traverse. */ int delete_redundant_insns_1 (expr **slot, void *data ATTRIBUTE_UNUSED) { struct expr *exprs = *slot; struct occr *occr; for (occr = exprs->avail_occr; occr != NULL; occr = occr->next) { if (occr->deleted_p && dbg_cnt (gcse2_delete)) { delete_insn (occr->insn); stats.insns_deleted++; if (dump_file) { fprintf (dump_file, "deleting insn:\n"); print_rtl_single (dump_file, occr->insn); fprintf (dump_file, "\n"); } } } return 1; } static void delete_redundant_insns (void) { expr_table->traverse <void *, delete_redundant_insns_1> (NULL); if (dump_file) fprintf (dump_file, "\n"); } /* Main entry point of the GCSE after reload - clean some redundant loads due to spilling. */ static void gcse_after_reload_main (rtx f ATTRIBUTE_UNUSED) { memset (&stats, 0, sizeof (stats)); /* Allocate memory for this pass. Also computes and initializes the insns' CUIDs. */ alloc_mem (); /* We need alias analysis. */ init_alias_analysis (); compute_hash_table (); if (dump_file) dump_hash_table (dump_file); if (expr_table->elements () > 0) { /* Knowing which MEMs are transparent through a block can signifiantly increase the number of redundant loads found. So compute transparency information for each memory expression in the hash table. */ df_analyze (); /* This can not be part of the normal allocation routine because we have to know the number of elements in the hash table. */ transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), expr_table->elements ()); bitmap_vector_ones (transp, last_basic_block_for_fn (cfun)); expr_table->traverse <FILE *, compute_expr_transp> (dump_file); eliminate_partially_redundant_loads (); delete_redundant_insns (); sbitmap_vector_free (transp); if (dump_file) { fprintf (dump_file, "GCSE AFTER RELOAD stats:\n"); fprintf (dump_file, "copies inserted: %d\n", stats.copies_inserted); fprintf (dump_file, "moves inserted: %d\n", stats.moves_inserted); fprintf (dump_file, "insns deleted: %d\n", stats.insns_deleted); fprintf (dump_file, "\n\n"); } statistics_counter_event (cfun, "copies inserted", stats.copies_inserted); statistics_counter_event (cfun, "moves inserted", stats.moves_inserted); statistics_counter_event (cfun, "insns deleted", stats.insns_deleted); } /* We are finished with alias. */ end_alias_analysis (); free_mem (); } static unsigned int rest_of_handle_gcse2 (void) { gcse_after_reload_main (get_insns ()); rebuild_jump_labels (get_insns ()); return 0; } namespace { const pass_data pass_data_gcse2 = { RTL_PASS, /* type */ "gcse2", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_GCSE_AFTER_RELOAD, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_gcse2 : public rtl_opt_pass { public: pass_gcse2 (gcc::context *ctxt) : rtl_opt_pass (pass_data_gcse2, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *fun) { return (optimize > 0 && flag_gcse_after_reload && optimize_function_for_speed_p (fun)); } virtual unsigned int execute (function *) { return rest_of_handle_gcse2 (); } }; // class pass_gcse2 } // anon namespace rtl_opt_pass * make_pass_gcse2 (gcc::context *ctxt) { return new pass_gcse2 (ctxt); }
gpl-3.0
gguruss/mixerp
src/Libraries/Server Controls/Project/MixERP.Net.WebControls.Flag/FlagControl/IDisposable.cs
972
namespace MixERP.Net.WebControls.Flag { public partial class FlagControl { private bool disposed; public override void Dispose() { if (!this.disposed) { this.Dispose(true); base.Dispose(); } } private void Dispose(bool disposing) { if (!disposing) { return; } if (this.container != null) { this.container.Dispose(); this.container = null; } if (this.flagDropDownlist != null) { this.flagDropDownlist.Dispose(); this.flagDropDownlist = null; } if (this.updateButton != null) { this.updateButton.Dispose(); this.updateButton = null; } this.disposed = true; } } }
gpl-3.0
jiyuren/AmazeFileManager-1
src/main/java/com/amaze/filemanager/utils/RootHelper.java
7137
/* * Copyright (C) 2014 Arpit Khurana <[email protected]> * * This file is part of Amaze File Manager. * * Amaze File Manager is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.amaze.filemanager.utils; import com.stericson.RootTools.RootTools; import com.stericson.RootTools.execution.Command; import java.io.File; import java.util.ArrayList; public class RootHelper { public static String runAndWait(String cmd,boolean root) { Command c=new Command(0,cmd) { @Override public void commandOutput(int i, String s) { } @Override public void commandTerminated(int i, String s) { } @Override public void commandCompleted(int i, int i2) { } }; try {RootTools.getShell(root).add(c);} catch (Exception e) { // Logger.errorST("Exception when trying to run shell command", e); return null; } if (!waitForCommand(c)) { return null; } return c.toString(); } public static ArrayList<String> runAndWait1(String cmd, final boolean root) { final ArrayList<String> output=new ArrayList<String>(); Command cc=new Command(1,cmd) { @Override public void commandOutput(int i, String s) { output.add(s); // System.out.println("output "+root+s); } @Override public void commandTerminated(int i, String s) { System.out.println("error"+root+s); } @Override public void commandCompleted(int i, int i2) { } }; try { RootTools.getShell(root).add(cc); } catch (Exception e) { // Logger.errorST("Exception when trying to run shell command", e); e.printStackTrace(); return null; } if (!waitForCommand(cc)) { return null; } return output; } private static boolean waitForCommand(Command cmd) { while (!cmd.isFinished()) { synchronized (cmd) { try { if (!cmd.isFinished()) { cmd.wait(2000); } } catch (InterruptedException e) { e.printStackTrace(); } } if (!cmd.isExecuting() && !cmd.isFinished()) { // Logger.errorST("Error: Command is not executing and is not finished!"); return false; } } //Logger.debug("Command Finished!"); return true; } public static String getCommandLineString(String input) { return input.replaceAll(UNIX_ESCAPE_EXPRESSION, "\\\\$1"); } private static final String UNIX_ESCAPE_EXPRESSION = "(\\(|\\)|\\[|\\]|\\s|\'|\"|`|\\{|\\}|&|\\\\|\\?)"; static Futils futils=new Futils(); public static ArrayList<String[]> getFilesList(boolean showSize,String path,boolean showHidden){ File f=new File(path); ArrayList<String[]> files=new ArrayList<String[]>(); try { if(f.exists() && f.isDirectory()){ for(File x:f.listFiles()){ String k="",size=""; if(x.isDirectory()) {k="-1"; if(showSize)size=""+getCount(x); }else if(showSize)size=""+x.length(); if(showHidden){ files.add(new String[]{x.getPath(),"",parseFilePermission(x),k,x.lastModified()+"",size}); } else{if(!x.isHidden()){files.add(new String[]{x.getPath(),"",parseFilePermission(x),k,x.lastModified()+"",size});}} } }}catch (Exception e){} return files;} public static String[] addFile(File x,boolean showSize,boolean showHidden){ String k="",size=""; if(x.isDirectory()) { k="-1"; if(showSize)size=""+getCount(x); }else if(showSize) size=""+x.length(); if(showHidden){ return (new String[]{x.getPath(),"",parseFilePermission(x),k,x.lastModified()+"", size}); } else { if(!x.isHidden()) {return (new String[]{x.getPath(),"",parseFilePermission(x),k,x .lastModified()+"",size}); } } return null; } public static String parseFilePermission(File f){ String per=""; if(f.canRead()){per=per+"r";} if(f.canWrite()){per=per+"w";} if(f.canExecute()){per=per+"x";} return per;} public static ArrayList<String[]> getFilesList(String path,boolean root,boolean showHidden,boolean showSize) { String p = " "; if (showHidden) p = "a "; Futils futils = new Futils(); ArrayList<String[]> a = new ArrayList<String[]>(); ArrayList<String> ls = new ArrayList<String>(); if (root) { if (!path.startsWith("/storage")) { String cpath = getCommandLineString(path); ls = runAndWait1("ls -l" + p + cpath, root); if(ls!=null){ for (String file : ls) { if (!file.contains("Permission denied")) try { String[] array = futils.parseName(file); array[0] = path + "/" + array[0]; a.add(array); } catch (Exception e) { System.out.println(file); e.printStackTrace(); } }} } else if (futils.canListFiles(new File(path))) { a = getFilesList(showSize,path, showHidden); } else { a = new ArrayList<String[]>(); } } else if (futils.canListFiles(new File(path))) { a = getFilesList(showSize,path, showHidden); } else { a = new ArrayList<String[]>(); } if (a.size() == 0 && futils.canListFiles(new File(path))) { a = getFilesList(showSize,path, showHidden); } return a; } public static Integer getCount(File f){ if(f.exists() && f.canRead() && f.isDirectory()){ try{return f.listFiles().length;}catch(Exception e){return 0;} } return null;}}
gpl-3.0
bohlian/erpnext
erpnext/docs/user/manual/en/accounts/index.md
677
# Accounts At the end of sales and purchase cycle comes billing and payments. You may have an accountant in your team, or you may be doing accounting yourself, or you may have outsourced your accounting. In all the cases financial accounting forms the core of any business management system like an ERP. In ERPNext, your accounting operations consists of 3 main transactions: * Sales Invoice: The bills that you raise to your Customers for the products or services you provide. * Purchase Invoice: Bills that your Suppliers give you for their products or services. * Journal Entries: For accounting entries, like payments, credit and other types. ### Topics {index}
gpl-3.0
keen99/terraform
builtin/providers/aws/resource_aws_autoscaling_group_test.go
15294
package aws import ( "fmt" "reflect" "strings" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccAWSAutoScalingGroup_basic(t *testing.T) { var group autoscaling.Group var lc autoscaling.LaunchConfiguration resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAWSAutoScalingGroupConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSAutoScalingGroupHealthyCapacity(&group, 2), testAccCheckAWSAutoScalingGroupAttributes(&group), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "name", "foobar3-terraform-test"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "max_size", "5"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "min_size", "2"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "health_check_grace_period", "300"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "health_check_type", "ELB"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "desired_capacity", "4"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "force_delete", "true"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "termination_policies.912102603", "OldestInstance"), ), }, resource.TestStep{ Config: testAccAWSAutoScalingGroupConfigUpdate, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "desired_capacity", "5"), testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{ "value": "bar-foo", "propagate_at_launch": true, }), ), }, }, }) } func TestAccAWSAutoScalingGroup_tags(t *testing.T) { var group autoscaling.Group resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAWSAutoScalingGroupConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAutoscalingTags(&group.Tags, "Foo", map[string]interface{}{ "value": "foo-bar", "propagate_at_launch": true, }), ), }, resource.TestStep{ Config: testAccAWSAutoScalingGroupConfigUpdate, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAutoscalingTagNotExists(&group.Tags, "Foo"), testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{ "value": "bar-foo", "propagate_at_launch": true, }), ), }, }, }) } func TestAccAWSAutoScalingGroup_VpcUpdates(t *testing.T) { var group autoscaling.Group resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAWSAutoScalingGroupConfigWithAZ, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), ), }, resource.TestStep{ Config: testAccAWSAutoScalingGroupConfigWithVPCIdent, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(&group), ), }, }, }) } func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) { var group autoscaling.Group resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAWSAutoScalingGroupConfigWithLoadBalancer, Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group), ), }, }, }) } func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).autoscalingconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_group" { continue } // Try to find the Group describeGroups, err := conn.DescribeAutoScalingGroups( &autoscaling.DescribeAutoScalingGroupsInput{ AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)}, }) if err == nil { if len(describeGroups.AutoScalingGroups) != 0 && *describeGroups.AutoScalingGroups[0].AutoScalingGroupName == rs.Primary.ID { return fmt.Errorf("AutoScaling Group still exists") } } // Verify the error ec2err, ok := err.(awserr.Error) if !ok { return err } if ec2err.Code() != "InvalidGroup.NotFound" { return err } } return nil } func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resource.TestCheckFunc { return func(s *terraform.State) error { if *group.AvailabilityZones[0] != "us-west-2a" { return fmt.Errorf("Bad availability_zones: %#v", group.AvailabilityZones[0]) } if *group.AutoScalingGroupName != "foobar3-terraform-test" { return fmt.Errorf("Bad name: %s", *group.AutoScalingGroupName) } if *group.MaxSize != 5 { return fmt.Errorf("Bad max_size: %d", *group.MaxSize) } if *group.MinSize != 2 { return fmt.Errorf("Bad max_size: %d", *group.MinSize) } if *group.HealthCheckType != "ELB" { return fmt.Errorf("Bad health_check_type,\nexpected: %s\ngot: %s", "ELB", *group.HealthCheckType) } if *group.HealthCheckGracePeriod != 300 { return fmt.Errorf("Bad health_check_grace_period: %d", *group.HealthCheckGracePeriod) } if *group.DesiredCapacity != 4 { return fmt.Errorf("Bad desired_capacity: %d", *group.DesiredCapacity) } if *group.LaunchConfigurationName == "" { return fmt.Errorf("Bad launch configuration name: %s", *group.LaunchConfigurationName) } t := &autoscaling.TagDescription{ Key: aws.String("Foo"), Value: aws.String("foo-bar"), PropagateAtLaunch: aws.Bool(true), ResourceType: aws.String("auto-scaling-group"), ResourceID: group.AutoScalingGroupName, } if !reflect.DeepEqual(group.Tags[0], t) { return fmt.Errorf( "Got:\n\n%#v\n\nExpected:\n\n%#v\n", group.Tags[0], t) } return nil } } func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc { return func(s *terraform.State) error { if *group.LoadBalancerNames[0] != "foobar-terraform-test" { return fmt.Errorf("Bad load_balancers: %#v", group.LoadBalancerNames[0]) } return nil } } func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return fmt.Errorf("No AutoScaling Group ID is set") } conn := testAccProvider.Meta().(*AWSClient).autoscalingconn describeGroups, err := conn.DescribeAutoScalingGroups( &autoscaling.DescribeAutoScalingGroupsInput{ AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)}, }) if err != nil { return err } if len(describeGroups.AutoScalingGroups) != 1 || *describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID { return fmt.Errorf("AutoScaling Group not found") } *group = *describeGroups.AutoScalingGroups[0] return nil } } func testLaunchConfigurationName(n string, lc *autoscaling.LaunchConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if *lc.LaunchConfigurationName != rs.Primary.Attributes["launch_configuration"] { return fmt.Errorf("Launch configuration names do not match") } return nil } } func testAccCheckAWSAutoScalingGroupHealthyCapacity( g *autoscaling.Group, exp int) resource.TestCheckFunc { return func(s *terraform.State) error { healthy := 0 for _, i := range g.Instances { if i.HealthStatus == nil { continue } if strings.EqualFold(*i.HealthStatus, "Healthy") { healthy++ } } if healthy < exp { return fmt.Errorf("Expected at least %d healthy, got %d.", exp, healthy) } return nil } } func testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(group *autoscaling.Group) resource.TestCheckFunc { return func(s *terraform.State) error { // Grab Subnet Ids var subnets []string for _, rs := range s.RootModule().Resources { if rs.Type != "aws_subnet" { continue } subnets = append(subnets, rs.Primary.Attributes["id"]) } if group.VPCZoneIdentifier == nil { return fmt.Errorf("Bad VPC Zone Identifier\nexpected: %s\ngot nil", subnets) } zones := strings.Split(*group.VPCZoneIdentifier, ",") remaining := len(zones) for _, z := range zones { for _, s := range subnets { if z == s { remaining-- } } } if remaining != 0 { return fmt.Errorf("Bad VPC Zone Identifier match\nexpected: %s\ngot:%s", zones, subnets) } return nil } } const testAccAWSAutoScalingGroupConfig = ` resource "aws_launch_configuration" "foobar" { image_id = "ami-21f78e11" instance_type = "t1.micro" } resource "aws_autoscaling_group" "bar" { availability_zones = ["us-west-2a"] name = "foobar3-terraform-test" max_size = 5 min_size = 2 health_check_grace_period = 300 health_check_type = "ELB" desired_capacity = 4 force_delete = true termination_policies = ["OldestInstance"] launch_configuration = "${aws_launch_configuration.foobar.name}" tag { key = "Foo" value = "foo-bar" propagate_at_launch = true } } ` const testAccAWSAutoScalingGroupConfigUpdate = ` resource "aws_launch_configuration" "foobar" { image_id = "ami-21f78e11" instance_type = "t1.micro" } resource "aws_launch_configuration" "new" { image_id = "ami-21f78e11" instance_type = "t1.micro" } resource "aws_autoscaling_group" "bar" { availability_zones = ["us-west-2a"] name = "foobar3-terraform-test" max_size = 5 min_size = 2 health_check_grace_period = 300 health_check_type = "ELB" desired_capacity = 5 force_delete = true launch_configuration = "${aws_launch_configuration.new.name}" tag { key = "Bar" value = "bar-foo" propagate_at_launch = true } } ` const testAccAWSAutoScalingGroupConfigWithLoadBalancer = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" tags { Name = "tf-asg-test" } } resource "aws_internet_gateway" "gw" { vpc_id = "${aws_vpc.foo.id}" } resource "aws_subnet" "foo" { cidr_block = "10.1.1.0/24" vpc_id = "${aws_vpc.foo.id}" } resource "aws_security_group" "foo" { vpc_id="${aws_vpc.foo.id}" ingress { protocol = "-1" from_port = 0 to_port = 0 cidr_blocks = ["0.0.0.0/0"] } egress { protocol = "-1" from_port = 0 to_port = 0 cidr_blocks = ["0.0.0.0/0"] } } resource "aws_elb" "bar" { name = "foobar-terraform-test" subnets = ["${aws_subnet.foo.id}"] security_groups = ["${aws_security_group.foo.id}"] listener { instance_port = 80 instance_protocol = "http" lb_port = 80 lb_protocol = "http" } health_check { healthy_threshold = 2 unhealthy_threshold = 2 target = "HTTP:80/" interval = 5 timeout = 2 } depends_on = ["aws_internet_gateway.gw"] } resource "aws_launch_configuration" "foobar" { // need an AMI that listens on :80 at boot, this is: // bitnami-nginxstack-1.6.1-0-linux-ubuntu-14.04.1-x86_64-hvm-ebs-ami-99f5b1a9-3 image_id = "ami-b5b3fc85" instance_type = "t2.micro" security_groups = ["${aws_security_group.foo.id}"] } resource "aws_autoscaling_group" "bar" { availability_zones = ["${aws_subnet.foo.availability_zone}"] vpc_zone_identifier = ["${aws_subnet.foo.id}"] name = "foobar3-terraform-test" max_size = 2 min_size = 2 health_check_grace_period = 300 health_check_type = "ELB" min_elb_capacity = 2 force_delete = true launch_configuration = "${aws_launch_configuration.foobar.name}" load_balancers = ["${aws_elb.bar.name}"] } ` const testAccAWSAutoScalingGroupConfigWithAZ = ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" tags { Name = "terraform-test" } } resource "aws_subnet" "main" { vpc_id = "${aws_vpc.default.id}" cidr_block = "10.0.1.0/24" availability_zone = "us-west-2a" tags { Name = "terraform-test" } } resource "aws_subnet" "alt" { vpc_id = "${aws_vpc.default.id}" cidr_block = "10.0.2.0/24" availability_zone = "us-west-2b" tags { Name = "asg-vpc-thing" } } resource "aws_launch_configuration" "foobar" { name = "vpc-asg-test" image_id = "ami-b5b3fc85" instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { availability_zones = ["us-west-2a"] name = "vpc-asg-test" max_size = 2 min_size = 1 health_check_grace_period = 300 health_check_type = "ELB" desired_capacity = 1 force_delete = true termination_policies = ["OldestInstance"] launch_configuration = "${aws_launch_configuration.foobar.name}" } ` const testAccAWSAutoScalingGroupConfigWithVPCIdent = ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" tags { Name = "terraform-test" } } resource "aws_subnet" "main" { vpc_id = "${aws_vpc.default.id}" cidr_block = "10.0.1.0/24" availability_zone = "us-west-2a" tags { Name = "terraform-test" } } resource "aws_subnet" "alt" { vpc_id = "${aws_vpc.default.id}" cidr_block = "10.0.2.0/24" availability_zone = "us-west-2b" tags { Name = "asg-vpc-thing" } } resource "aws_launch_configuration" "foobar" { name = "vpc-asg-test" image_id = "ami-b5b3fc85" instance_type = "t2.micro" } resource "aws_autoscaling_group" "bar" { vpc_zone_identifier = [ "${aws_subnet.main.id}", "${aws_subnet.alt.id}", ] name = "vpc-asg-test" max_size = 2 min_size = 1 health_check_grace_period = 300 health_check_type = "ELB" desired_capacity = 1 force_delete = true termination_policies = ["OldestInstance"] launch_configuration = "${aws_launch_configuration.foobar.name}" } `
mpl-2.0
MjAbuz/exist
extensions/debuggee/src/org/exist/debuggee/dbgp/packets/StepInto.java
1942
/* * eXist Open Source Native XML Database * Copyright (C) 2009-2011 The eXist Project * http://exist-db.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * $Id$ */ package org.exist.debuggee.dbgp.packets; import org.apache.mina.core.session.IoSession; /** * @author <a href="mailto:[email protected]">Dmitriy Shabanov</a> * */ public class StepInto extends AbstractCommandContinuation { public StepInto(IoSession session, String args) { super(session, args); } /* (non-Javadoc) * @see org.exist.debuggee.dgbp.packets.Command#exec() */ @Override public synchronized void exec() { getJoint().continuation(this); } public synchronized byte[] responseBytes() { String responce = xml_declaration + "<response " + namespaces + "command=\"step_into\" " + "status=\""+getStatus()+"\" " + "reason=\"ok\" " + "transaction_id=\""+transactionID+"\"/>"; return responce.getBytes(); } public byte[] commandBytes() { String command = "step_into -i "+transactionID; return command.getBytes(); } public int getType() { return STEP_INTO; } public boolean is(int type) { return (type == STEP_INTO); } public String toString() { return "step_into ["+transactionID+"]"; } }
lgpl-2.1
kenhys/groonga
vendor/nginx-1.19.2/src/http/ngx_http_upstream_round_robin.h
5089
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #ifndef _NGX_HTTP_UPSTREAM_ROUND_ROBIN_H_INCLUDED_ #define _NGX_HTTP_UPSTREAM_ROUND_ROBIN_H_INCLUDED_ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; struct ngx_http_upstream_rr_peer_s { struct sockaddr *sockaddr; socklen_t socklen; ngx_str_t name; ngx_str_t server; ngx_int_t current_weight; ngx_int_t effective_weight; ngx_int_t weight; ngx_uint_t conns; ngx_uint_t max_conns; ngx_uint_t fails; time_t accessed; time_t checked; ngx_uint_t max_fails; time_t fail_timeout; ngx_msec_t slow_start; ngx_msec_t start_time; ngx_uint_t down; #if (NGX_HTTP_SSL || NGX_COMPAT) void *ssl_session; int ssl_session_len; #endif #if (NGX_HTTP_UPSTREAM_ZONE) ngx_atomic_t lock; #endif ngx_http_upstream_rr_peer_t *next; NGX_COMPAT_BEGIN(32) NGX_COMPAT_END }; typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; struct ngx_http_upstream_rr_peers_s { ngx_uint_t number; #if (NGX_HTTP_UPSTREAM_ZONE) ngx_slab_pool_t *shpool; ngx_atomic_t rwlock; ngx_http_upstream_rr_peers_t *zone_next; #endif ngx_uint_t total_weight; unsigned single:1; unsigned weighted:1; ngx_str_t *name; ngx_http_upstream_rr_peers_t *next; ngx_http_upstream_rr_peer_t *peer; }; #if (NGX_HTTP_UPSTREAM_ZONE) #define ngx_http_upstream_rr_peers_rlock(peers) \ \ if (peers->shpool) { \ ngx_rwlock_rlock(&peers->rwlock); \ } #define ngx_http_upstream_rr_peers_wlock(peers) \ \ if (peers->shpool) { \ ngx_rwlock_wlock(&peers->rwlock); \ } #define ngx_http_upstream_rr_peers_unlock(peers) \ \ if (peers->shpool) { \ ngx_rwlock_unlock(&peers->rwlock); \ } #define ngx_http_upstream_rr_peer_lock(peers, peer) \ \ if (peers->shpool) { \ ngx_rwlock_wlock(&peer->lock); \ } #define ngx_http_upstream_rr_peer_unlock(peers, peer) \ \ if (peers->shpool) { \ ngx_rwlock_unlock(&peer->lock); \ } #else #define ngx_http_upstream_rr_peers_rlock(peers) #define ngx_http_upstream_rr_peers_wlock(peers) #define ngx_http_upstream_rr_peers_unlock(peers) #define ngx_http_upstream_rr_peer_lock(peers, peer) #define ngx_http_upstream_rr_peer_unlock(peers, peer) #endif typedef struct { ngx_uint_t config; ngx_http_upstream_rr_peers_t *peers; ngx_http_upstream_rr_peer_t *current; uintptr_t *tried; uintptr_t data; } ngx_http_upstream_rr_peer_data_t; ngx_int_t ngx_http_upstream_init_round_robin(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us); ngx_int_t ngx_http_upstream_init_round_robin_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us); ngx_int_t ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r, ngx_http_upstream_resolved_t *ur); ngx_int_t ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, void *data); void ngx_http_upstream_free_round_robin_peer(ngx_peer_connection_t *pc, void *data, ngx_uint_t state); #if (NGX_HTTP_SSL) ngx_int_t ngx_http_upstream_set_round_robin_peer_session(ngx_peer_connection_t *pc, void *data); void ngx_http_upstream_save_round_robin_peer_session(ngx_peer_connection_t *pc, void *data); #endif #endif /* _NGX_HTTP_UPSTREAM_ROUND_ROBIN_H_INCLUDED_ */
lgpl-2.1
kedgeproject/kedge
vendor/github.com/openshift/origin/pkg/cmd/server/bootstrappolicy/all.go
384
package bootstrappolicy import ( rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" ) func Policy() *rbacrest.PolicyData { return &rbacrest.PolicyData{ ClusterRoles: GetBootstrapClusterRoles(), ClusterRoleBindings: GetBootstrapClusterRoleBindings(), Roles: GetBootstrapNamespaceRoles(), RoleBindings: GetBootstrapNamespaceRoleBindings(), } }
apache-2.0
gburton1/metrics
metrics-healthchecks/src/main/java/com/codahale/metrics/health/jvm/ThreadDeadlockHealthCheck.java
1021
package com.codahale.metrics.health.jvm; import com.codahale.metrics.health.HealthCheck; import com.codahale.metrics.jvm.ThreadDeadlockDetector; import java.util.Set; /** * A health check which returns healthy if no threads are deadlocked. */ public class ThreadDeadlockHealthCheck extends HealthCheck { private final ThreadDeadlockDetector detector; /** * Creates a new health check. */ public ThreadDeadlockHealthCheck() { this(new ThreadDeadlockDetector()); } /** * Creates a new health check with the given detector. * * @param detector a thread deadlock detector */ public ThreadDeadlockHealthCheck(ThreadDeadlockDetector detector) { this.detector = detector; } @Override protected Result check() throws Exception { final Set<String> threads = detector.getDeadlockedThreads(); if (threads.isEmpty()) { return Result.healthy(); } return Result.unhealthy(threads.toString()); } }
apache-2.0
patrox/dropwizard
dropwizard-jdbi/src/main/java/io/dropwizard/jdbi/args/InstantArgument.java
1397
package io.dropwizard.jdbi.args; import org.skife.jdbi.v2.StatementContext; import org.skife.jdbi.v2.tweak.Argument; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Timestamp; import java.sql.Types; import java.time.Instant; import java.util.Calendar; import java.util.Optional; /** * An {@link Argument} for {@link Instant} objects. */ public class InstantArgument implements Argument { private final Instant instant; private final Optional<Calendar> calendar; protected InstantArgument(final Instant instant, final Optional<Calendar> calendar) { this.instant = instant; this.calendar = calendar; } @Override public void apply(int position, PreparedStatement statement, StatementContext ctx) throws SQLException { if (instant != null) { if (calendar.isPresent()) { // We need to make a clone, because Calendar is not thread-safe // and some JDBC drivers mutate it during time calculations final Calendar calendarClone = (Calendar) calendar.get().clone(); statement.setTimestamp(position, Timestamp.from(instant), calendarClone); } else { statement.setTimestamp(position, Timestamp.from(instant)); } } else { statement.setNull(position, Types.TIMESTAMP); } } }
apache-2.0
yepengxj/df_st_origin1
pkg/authorization/cache/clusterpolicybinding.go
4026
package cache import ( "time" kapi "k8s.io/kubernetes/pkg/api" errors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" authorizationapi "github.com/openshift/origin/pkg/authorization/api" "github.com/openshift/origin/pkg/authorization/client" clusterbindingregistry "github.com/openshift/origin/pkg/authorization/registry/clusterpolicybinding" ) type readOnlyClusterPolicyBindingCache struct { registry clusterbindingregistry.WatchingRegistry indexer cache.Indexer reflector *cache.Reflector keyFunc cache.KeyFunc } func NewReadOnlyClusterPolicyBindingCache(registry clusterbindingregistry.WatchingRegistry) *readOnlyClusterPolicyBindingCache { ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return registry.ListClusterPolicyBindings(ctx, labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return registry.WatchClusterPolicyBindings(ctx, labels.Everything(), fields.Everything(), resourceVersion) }, }, &authorizationapi.ClusterPolicyBinding{}, indexer, 2*time.Minute, ) return &readOnlyClusterPolicyBindingCache{ registry: registry, indexer: indexer, reflector: reflector, keyFunc: cache.MetaNamespaceKeyFunc, } } // Run begins watching and synchronizing the cache func (c *readOnlyClusterPolicyBindingCache) Run() { c.reflector.Run() } // RunUntil starts a watch and handles watch events. Will restart the watch if it is closed. // RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed. func (c *readOnlyClusterPolicyBindingCache) RunUntil(stopChannel <-chan struct{}) { c.reflector.RunUntil(stopChannel) } // LastSyncResourceVersion exposes the LastSyncResourceVersion of the internal reflector func (c *readOnlyClusterPolicyBindingCache) LastSyncResourceVersion() string { return c.reflector.LastSyncResourceVersion() } func (c *readOnlyClusterPolicyBindingCache) List(label labels.Selector, field fields.Selector) (*authorizationapi.ClusterPolicyBindingList, error) { clusterPolicyBindingList := &authorizationapi.ClusterPolicyBindingList{} returnedList := c.indexer.List() for i := range returnedList { clusterPolicyBinding, castOK := returnedList[i].(*authorizationapi.ClusterPolicyBinding) if !castOK { return clusterPolicyBindingList, errors.NewInvalid("ClusterPolicyBinding", "clusterPolicyBinding", []error{}) } if label.Matches(labels.Set(clusterPolicyBinding.Labels)) && field.Matches(authorizationapi.ClusterPolicyBindingToSelectableFields(clusterPolicyBinding)) { clusterPolicyBindingList.Items = append(clusterPolicyBindingList.Items, *clusterPolicyBinding) } } return clusterPolicyBindingList, nil } func (c *readOnlyClusterPolicyBindingCache) Get(name string) (*authorizationapi.ClusterPolicyBinding, error) { keyObj := &authorizationapi.ClusterPolicyBinding{ObjectMeta: kapi.ObjectMeta{Name: name}} key, _ := c.keyFunc(keyObj) item, exists, getErr := c.indexer.GetByKey(key) if getErr != nil { return &authorizationapi.ClusterPolicyBinding{}, getErr } if !exists { existsErr := errors.NewNotFound("ClusterPolicyBinding", name) return &authorizationapi.ClusterPolicyBinding{}, existsErr } clusterPolicyBinding, castOK := item.(*authorizationapi.ClusterPolicyBinding) if !castOK { castErr := errors.NewInvalid("ClusterPolicyBinding", name, []error{}) return &authorizationapi.ClusterPolicyBinding{}, castErr } return clusterPolicyBinding, nil } func newReadOnlyClusterPolicyBindings(cache readOnlyAuthorizationCache) client.ReadOnlyClusterPolicyBindingInterface { return cache.readOnlyClusterPolicyBindingCache }
apache-2.0
weswigham/TypeScript
tests/baselines/reference/pathMappingBasedModuleResolution5_classic.js
1513
//// [tests/cases/compiler/pathMappingBasedModuleResolution5_classic.ts] //// //// [file1.ts] import {x} from "folder2/file1" import {y} from "folder3/file2" import {z} from "components/file3" import {z1} from "file4" declare function use(a: any): void; use(x.toExponential()); use(y.toExponential()); use(z.toExponential()); use(z1.toExponential()); //// [file1.ts] export var x = 1; //// [file2.ts] export var y = 1; //// [file3.ts] export var z = 1; //// [file4.ts] export var z1 = 1; //// [file1.js] define(["require", "exports"], function (require, exports) { "use strict"; exports.__esModule = true; exports.x = 1; }); //// [file2.js] define(["require", "exports"], function (require, exports) { "use strict"; exports.__esModule = true; exports.y = 1; }); //// [file3.js] define(["require", "exports"], function (require, exports) { "use strict"; exports.__esModule = true; exports.z = 1; }); //// [file4.js] define(["require", "exports"], function (require, exports) { "use strict"; exports.__esModule = true; exports.z1 = 1; }); //// [file1.js] define(["require", "exports", "folder2/file1", "folder3/file2", "components/file3", "file4"], function (require, exports, file1_1, file2_1, file3_1, file4_1) { "use strict"; exports.__esModule = true; use(file1_1.x.toExponential()); use(file2_1.y.toExponential()); use(file3_1.z.toExponential()); use(file4_1.z1.toExponential()); });
apache-2.0
yanzhijun/jclouds-aliyun
providers/aws-ec2/src/main/java/org/jclouds/aws/ec2/features/AWSInstanceApi.java
4022
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.aws.ec2.features; import static org.jclouds.aws.reference.FormParameters.ACTION; import java.util.Set; import javax.inject.Named; import javax.ws.rs.FormParam; import javax.ws.rs.POST; import javax.ws.rs.Path; import org.jclouds.Fallbacks.EmptySetOnNotFoundOr404; import org.jclouds.aws.ec2.domain.AWSRunningInstance; import org.jclouds.aws.ec2.xml.AWSDescribeInstancesResponseHandler; import org.jclouds.aws.ec2.xml.AWSRunInstancesResponseHandler; import org.jclouds.aws.filters.FormSigner; import org.jclouds.ec2.binders.BindFiltersToIndexedFormParams; import org.jclouds.ec2.binders.BindInstanceIdsToIndexedFormParams; import org.jclouds.ec2.binders.IfNotNullBindAvailabilityZoneToFormParam; import org.jclouds.ec2.domain.Reservation; import org.jclouds.ec2.features.InstanceApi; import org.jclouds.ec2.options.RunInstancesOptions; import org.jclouds.javax.annotation.Nullable; import org.jclouds.location.functions.RegionToEndpointOrProviderIfNull; import org.jclouds.rest.annotations.BinderParam; import org.jclouds.rest.annotations.EndpointParam; import org.jclouds.rest.annotations.Fallback; import org.jclouds.rest.annotations.FormParams; import org.jclouds.rest.annotations.RequestFilters; import org.jclouds.rest.annotations.VirtualHost; import org.jclouds.rest.annotations.XMLResponseParser; import com.google.common.collect.Multimap; /** * Provides access to EC2 Instance Services via their REST API. * <p/> */ @RequestFilters(FormSigner.class) @VirtualHost public interface AWSInstanceApi extends InstanceApi { @Named("DescribeInstances") @Override @POST @Path("/") @FormParams(keys = ACTION, values = "DescribeInstances") @XMLResponseParser(AWSDescribeInstancesResponseHandler.class) @Fallback(EmptySetOnNotFoundOr404.class) Set<? extends Reservation<? extends AWSRunningInstance>> describeInstancesInRegion( @EndpointParam(parser = RegionToEndpointOrProviderIfNull.class) @Nullable String region, @BinderParam(BindInstanceIdsToIndexedFormParams.class) String... instanceIds); @Named("DescribeInstances") @POST @Path("/") @FormParams(keys = ACTION, values = "DescribeInstances") @XMLResponseParser(AWSDescribeInstancesResponseHandler.class) @Fallback(EmptySetOnNotFoundOr404.class) Set<? extends Reservation<? extends AWSRunningInstance>> describeInstancesInRegionWithFilter( @EndpointParam(parser = RegionToEndpointOrProviderIfNull.class) @Nullable String region, @BinderParam(BindFiltersToIndexedFormParams.class) Multimap<String, String> filter); @Named("RunInstances") @Override @POST @Path("/") @FormParams(keys = ACTION, values = "RunInstances") @XMLResponseParser(AWSRunInstancesResponseHandler.class) Reservation<? extends AWSRunningInstance> runInstancesInRegion( @EndpointParam(parser = RegionToEndpointOrProviderIfNull.class) @Nullable String region, @Nullable @BinderParam(IfNotNullBindAvailabilityZoneToFormParam.class) String nullableAvailabilityZone, @FormParam("ImageId") String imageId, @FormParam("MinCount") int minCount, @FormParam("MaxCount") int maxCount, RunInstancesOptions... options); }
apache-2.0
BiryukovVA/ignite
modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java
12148
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.direct; import java.nio.ByteBuffer; import java.util.BitSet; import java.util.Collection; import java.util.Map; import java.util.UUID; import org.apache.ignite.internal.direct.state.DirectMessageState; import org.apache.ignite.internal.direct.state.DirectMessageStateItem; import org.apache.ignite.internal.direct.stream.DirectByteBufferStream; import org.apache.ignite.internal.direct.stream.v1.DirectByteBufferStreamImplV1; import org.apache.ignite.internal.direct.stream.v2.DirectByteBufferStreamImplV2; import org.apache.ignite.internal.direct.stream.v3.DirectByteBufferStreamImplV3; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteOutClosure; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageWriter; import org.jetbrains.annotations.Nullable; /** * Message writer implementation. */ public class DirectMessageWriter implements MessageWriter { /** State. */ @GridToStringInclude private final DirectMessageState<StateItem> state; /** Protocol version. */ @GridToStringInclude private final byte protoVer; /** * @param protoVer Protocol version. */ public DirectMessageWriter(final byte protoVer) { state = new DirectMessageState<>(StateItem.class, new IgniteOutClosure<StateItem>() { @Override public StateItem apply() { return new StateItem(protoVer); } }); this.protoVer = protoVer; } /** {@inheritDoc} */ @Override public void setBuffer(ByteBuffer buf) { state.item().stream.setBuffer(buf); } /** {@inheritDoc} */ @Override public void setCurrentWriteClass(Class<? extends Message> msgCls) { // No-op. } /** {@inheritDoc} */ @Override public boolean writeHeader(short type, byte fieldCnt) { DirectByteBufferStream stream = state.item().stream; stream.writeShort(type); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeByte(String name, byte val) { DirectByteBufferStream stream = state.item().stream; stream.writeByte(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeShort(String name, short val) { DirectByteBufferStream stream = state.item().stream; stream.writeShort(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeInt(String name, int val) { DirectByteBufferStream stream = state.item().stream; stream.writeInt(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeLong(String name, long val) { DirectByteBufferStream stream = state.item().stream; stream.writeLong(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeFloat(String name, float val) { DirectByteBufferStream stream = state.item().stream; stream.writeFloat(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeDouble(String name, double val) { DirectByteBufferStream stream = state.item().stream; stream.writeDouble(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeChar(String name, char val) { DirectByteBufferStream stream = state.item().stream; stream.writeChar(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeBoolean(String name, boolean val) { DirectByteBufferStream stream = state.item().stream; stream.writeBoolean(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeByteArray(String name, @Nullable byte[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeByteArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeByteArray(String name, byte[] val, long off, int len) { DirectByteBufferStream stream = state.item().stream; stream.writeByteArray(val, off, len); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeShortArray(String name, @Nullable short[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeShortArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeIntArray(String name, @Nullable int[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeIntArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeLongArray(String name, @Nullable long[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeLongArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeLongArray(String name, long[] val, int len) { DirectByteBufferStream stream = state.item().stream; stream.writeLongArray(val, len); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeFloatArray(String name, @Nullable float[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeFloatArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeDoubleArray(String name, @Nullable double[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeDoubleArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeCharArray(String name, @Nullable char[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeCharArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeBooleanArray(String name, @Nullable boolean[] val) { DirectByteBufferStream stream = state.item().stream; stream.writeBooleanArray(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeString(String name, String val) { DirectByteBufferStream stream = state.item().stream; stream.writeString(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeBitSet(String name, BitSet val) { DirectByteBufferStream stream = state.item().stream; stream.writeBitSet(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeUuid(String name, UUID val) { DirectByteBufferStream stream = state.item().stream; stream.writeUuid(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeIgniteUuid(String name, IgniteUuid val) { DirectByteBufferStream stream = state.item().stream; stream.writeIgniteUuid(val); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean writeAffinityTopologyVersion(String name, AffinityTopologyVersion val) { if (protoVer >= 3) { DirectByteBufferStream stream = state.item().stream; stream.writeAffinityTopologyVersion(val); return stream.lastFinished(); } return writeMessage(name, val); } /** {@inheritDoc} */ @Override public boolean writeMessage(String name, @Nullable Message msg) { DirectByteBufferStream stream = state.item().stream; stream.writeMessage(msg, this); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public <T> boolean writeObjectArray(String name, T[] arr, MessageCollectionItemType itemType) { DirectByteBufferStream stream = state.item().stream; stream.writeObjectArray(arr, itemType, this); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public <T> boolean writeCollection(String name, Collection<T> col, MessageCollectionItemType itemType) { DirectByteBufferStream stream = state.item().stream; stream.writeCollection(col, itemType, this); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public <K, V> boolean writeMap(String name, Map<K, V> map, MessageCollectionItemType keyType, MessageCollectionItemType valType) { DirectByteBufferStream stream = state.item().stream; stream.writeMap(map, keyType, valType, this); return stream.lastFinished(); } /** {@inheritDoc} */ @Override public boolean isHeaderWritten() { return state.item().hdrWritten; } /** {@inheritDoc} */ @Override public void onHeaderWritten() { state.item().hdrWritten = true; } /** {@inheritDoc} */ @Override public int state() { return state.item().state; } /** {@inheritDoc} */ @Override public void incrementState() { state.item().state++; } /** {@inheritDoc} */ @Override public void beforeInnerMessageWrite() { state.forward(); } /** {@inheritDoc} */ @Override public void afterInnerMessageWrite(boolean finished) { state.backward(finished); } /** {@inheritDoc} */ @Override public void reset() { state.reset(); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(DirectMessageWriter.class, this); } /** */ private static class StateItem implements DirectMessageStateItem { /** */ private final DirectByteBufferStream stream; /** */ private int state; /** */ private boolean hdrWritten; /** * @param protoVer Protocol version. */ public StateItem(byte protoVer) { switch (protoVer) { case 1: stream = new DirectByteBufferStreamImplV1(null); break; case 2: stream = new DirectByteBufferStreamImplV2(null); break; case 3: stream = new DirectByteBufferStreamImplV3(null); break; default: throw new IllegalStateException("Invalid protocol version: " + protoVer); } } /** {@inheritDoc} */ @Override public void reset() { state = 0; hdrWritten = false; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(StateItem.class, this); } } }
apache-2.0
Geovanny0401/bookStore
bookstore-front/node_modules/zone.js/dist/zone-bluebird.js
1237
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ (function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? factory() : typeof define === 'function' && define.amd ? define(factory) : (factory()); }(this, (function () { 'use strict'; /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ Zone.__load_patch('bluebird', function (global, Zone) { // TODO: @JiaLiPassion, we can automatically patch bluebird // if global.Promise = Bluebird, but sometimes in nodejs, // global.Promise is not Bluebird, and Bluebird is just be // used by other libraries such as sequelize, so I think it is // safe to just expose a method to patch Bluebird explicitly var BLUEBIRD = 'bluebird'; Zone[Zone.__symbol__(BLUEBIRD)] = function patchBluebird(Bluebird) { Bluebird.setScheduler(function (fn) { Zone.current.scheduleMicroTask(BLUEBIRD, fn); }); }; }); })));
apache-2.0
asedunov/intellij-community
platform/lang-impl/src/com/intellij/testIntegration/TestFinderHelper.java
4188
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.testIntegration; import com.intellij.openapi.extensions.Extensions; import com.intellij.openapi.util.Comparing; import com.intellij.openapi.util.Pair; import com.intellij.openapi.util.text.StringUtil; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiNamedElement; import com.intellij.psi.codeStyle.NameUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.*; public class TestFinderHelper { public static PsiElement findSourceElement(@NotNull final PsiElement from) { for (TestFinder each : getFinders()) { PsiElement result = each.findSourceElement(from); if (result != null) return result; } return null; } public static Collection<PsiElement> findTestsForClass(@NotNull final PsiElement element) { Collection<PsiElement> result = new LinkedHashSet<>(); for (TestFinder each : getFinders()) { result.addAll(each.findTestsForClass(element)); } return result; } public static Collection<PsiElement> findClassesForTest(@NotNull final PsiElement element) { Collection<PsiElement> result = new LinkedHashSet<>(); for (TestFinder each : getFinders()) { result.addAll(each.findClassesForTest(element)); } return result; } public static boolean isTest(PsiElement element) { if (element == null) return false; for (TestFinder each : getFinders()) { if (each.isTest(element)) return true; } return false; } public static TestFinder[] getFinders() { return Extensions.getExtensions(TestFinder.EP_NAME); } public static Integer calcTestNameProximity(final String className, final String testName) { int posProximity = testName.indexOf(className); int sizeProximity = testName.length() - className.length(); return posProximity + sizeProximity; } public static List<PsiElement> getSortedElements(final List<Pair<? extends PsiNamedElement, Integer>> elementsWithWeights, final boolean weightsAscending) { return getSortedElements(elementsWithWeights, weightsAscending, null); } public static List<PsiElement> getSortedElements(final List<Pair<? extends PsiNamedElement, Integer>> elementsWithWeights, final boolean weightsAscending, @Nullable final Comparator<PsiElement> sameNameComparator) { Collections.sort(elementsWithWeights, (o1, o2) -> { int result = weightsAscending ? o1.second.compareTo(o2.second) : o2.second.compareTo(o1.second); if (result == 0) result = Comparing.compare(o1.first.getName(), o2.first.getName()); if (result == 0 && sameNameComparator != null) result = sameNameComparator.compare(o1.first, o2.first); return result; }); final List<PsiElement> result = new ArrayList<>(elementsWithWeights.size()); for (Pair<? extends PsiNamedElement, Integer> each : elementsWithWeights) { result.add(each.first); } return result; } public static List<Pair<String, Integer>> collectPossibleClassNamesWithWeights(String testName) { String[] words = NameUtil.splitNameIntoWords(testName); List<Pair<String, Integer>> result = new ArrayList<>(); for (int from = 0; from < words.length; from++) { for (int to = from; to < words.length; to++) { result.add(new Pair<>(StringUtil.join(words, from, to + 1, ""), words.length - from + to)); } } return result; } }
apache-2.0
dungbq89/lnews
vendor/sleeping-owl/admin/src/SleepingOwl/Admin/Admin.php
3169
<?php namespace SleepingOwl\Admin; use SleepingOwl\Html\FormBuilder; use SleepingOwl\Html\HtmlBuilder; use SleepingOwl\Admin\Menu\MenuItem; use SleepingOwl\Admin\Models\ModelItem; use SleepingOwl\Admin\Models\Models; use Illuminate\Config\Repository; use Illuminate\Filesystem\Filesystem; use Illuminate\Routing\Router as IlluminateRouter; use Symfony\Component\Finder\Finder; use Illuminate\Routing\UrlGenerator; /** * Class Admin * * @package SleepingOwl\Admin */ class Admin { /** * Bootstrap filename */ const BOOTSRAP_FILE = 'bootstrap.php'; /** * @var Admin */ public static $instance; /** * @var string */ public $title; /** * @var Router */ public $router; /** * @var MenuItem */ public $menu; /** * @var Models */ public $models; /** * @var HtmlBuilder */ public $htmlBuilder; /** * @var FormBuilder */ public $formBuilder; /** * @var Finder */ protected $finder; /** * @var string */ protected $bootstrapDirectory; /** * @var Filesystem */ protected $filesystem; /** * @param HtmlBuilder $htmlBuilder * @param FormBuilder $formBuilder * @param Finder $finder * @param Repository $config * @param IlluminateRouter $illuminateRouter * @param UrlGenerator $urlGenerator * @param Filesystem $filesystem */ function __construct(HtmlBuilder $htmlBuilder, FormBuilder $formBuilder, Finder $finder, Repository $config, IlluminateRouter $illuminateRouter, UrlGenerator $urlGenerator, Filesystem $filesystem) { static::$instance = $this; $this->htmlBuilder = $htmlBuilder; $this->formBuilder = $formBuilder; $this->finder = $finder; $this->filesystem = $filesystem; $this->title = $config->get('admin.title'); $this->bootstrapDirectory = $config->get('admin.bootstrapDirectory'); $this->router = new Router($illuminateRouter, $config, $urlGenerator, $config->get('admin.prefix')); $this->menu = new MenuItem; $this->models = new Models; $this->requireBootstrap(); } /** * @return Admin */ public static function instance() { if (is_null(static::$instance)) { app('\SleepingOwl\Admin\Admin'); } return static::$instance; } /** * */ protected function requireBootstrap() { if (! $this->filesystem->isDirectory($this->bootstrapDirectory)) return; $files = $this->finder->create()->files()->name('/^[^_].+\.php$/')->in($this->bootstrapDirectory); $files->sort(function ($a) { return $a->getFilename() !== static::BOOTSRAP_FILE; }); foreach ($files as $file) { $this->filesystem->requireOnce($file); } } /** * @param $class * @return ModelItem */ public static function model($class) { $modelItem = new ModelItem($class); return $modelItem; } /** * @param null $model * @return MenuItem */ public static function menu($model = null) { return new MenuItem($model); } /** * @param string $content * @param $title * @return string */ public static function view($content, $title = null) { $controller = \App::make('SleepingOwl\Admin\Controllers\AdminController', ['disableFilters' => true]); return $controller->renderCustomContent($title, $content); } }
apache-2.0
MarsuperMammal/pw_gce
lib/puppet/type/ec2_scalingpolicy.rb
1573
Puppet::Type.newtype(:ec2_scalingpolicy) do @doc = 'Type representing an EC2 scaling policy.' ensurable newparam(:name, namevar: true) do desc 'The name of the scaling policy.' validate do |value| fail 'Scaling policies must have a name' if value == '' fail 'name should be a String' unless value.is_a?(String) end end newproperty(:scaling_adjustment) do desc 'The amount to adjust the size of the group by.' validate do |value| fail 'scaling adjustment cannot be blank' if value == '' end munge do |value| value.to_i end end newproperty(:region) do desc 'The region in which to launch the policy.' validate do |value| fail 'region should not contain spaces' if value =~ /\s/ fail 'region should not be blank' if value == '' fail 'region should be a String' unless value.is_a?(String) end end newproperty(:adjustment_type) do desc 'The type of policy.' validate do |value| fail 'adjustment_type should not contain spaces' if value =~ /\s/ fail 'adjustment_type should not be blank' if value == '' fail 'adjustment_type should be a String' unless value.is_a?(String) end end newproperty(:auto_scaling_group) do desc 'The auto scaling group to attach the policy to.' validate do |value| fail 'auto_scaling_group cannot be blank' if value == '' fail 'auto_scaling_group should be a String' unless value.is_a?(String) end end autorequire(:ec2_autoscalinggroup) do self[:auto_scaling_group] end end
apache-2.0
pinotlytics/pinot
pinot-core/src/test/java/com/linkedin/pinot/core/realtime/RealtimeIntegrationTest.java
877
/** * Copyright (C) 2014-2015 LinkedIn Corp. ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.core.realtime; public class RealtimeIntegrationTest { public void endToEndTest(){ //start zk server //setup cluster //setup realtime resource //start controller //start participants } }
apache-2.0
NSAmelchev/ignite
modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/oom/IgniteQueryOOMTestSuite.java
1192
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.query.oom; import org.junit.runner.RunWith; import org.junit.runners.Suite; /** * Test suite for queries produces OOME in some cases. */ @RunWith(Suite.class) @Suite.SuiteClasses({ //Query history. QueryOOMWithoutQueryParallelismTest.class, QueryOOMWithQueryParallelismTest.class, }) public class IgniteQueryOOMTestSuite { }
apache-2.0
samaitra/ignite
modules/core/src/test/java/org/apache/ignite/cache/IgniteCacheEntryProcessorSequentialCallTest.java
11581
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.cache; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import javax.cache.processor.EntryProcessorException; import javax.cache.processor.MutableEntry; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.transactions.Transaction; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; import org.apache.ignite.transactions.TransactionOptimisticException; import org.junit.Test; /** */ public class IgniteCacheEntryProcessorSequentialCallTest extends GridCommonAbstractTest { /** */ private static final String CACHE = "cache"; /** */ private static final String MVCC_CACHE = "mvccCache"; /** */ private String cacheName; /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { startGrids(2); } /** {@inheritDoc} */ @Override protected void afterTestsStopped() throws Exception { stopAllGrids(); } /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { super.beforeTest(); cacheName = CACHE; } /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); CacheConfiguration ccfg = cacheConfiguration(CACHE); CacheConfiguration mvccCfg = cacheConfiguration(MVCC_CACHE) .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT); cfg.setCacheConfiguration(ccfg, mvccCfg); return cfg; } /** * * @return Cache configuration. * @param name Cache name. */ private CacheConfiguration cacheConfiguration(String name) { CacheConfiguration cacheCfg = new CacheConfiguration(name); cacheCfg.setCacheMode(CacheMode.PARTITIONED); cacheCfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); cacheCfg.setRebalanceMode(CacheRebalanceMode.SYNC); cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC); cacheCfg.setMaxConcurrentAsyncOperations(0); cacheCfg.setBackups(0); return cacheCfg; } /** * */ @Test public void testOptimisticSerializableTxInvokeSequentialCall() throws Exception { transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.SERIALIZABLE); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.SERIALIZABLE); } /** * */ @Test public void testOptimisticRepeatableReadTxInvokeSequentialCall() throws Exception { transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.REPEATABLE_READ); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.REPEATABLE_READ); } /** * */ @Test public void testOptimisticReadCommittedTxInvokeSequentialCall() throws Exception { transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED); } /** * */ @Test public void testPessimisticSerializableTxInvokeSequentialCall() throws Exception { transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.SERIALIZABLE); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.SERIALIZABLE); } /** * */ @Test public void testPessimisticRepeatableReadTxInvokeSequentialCall() throws Exception { transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ); } /** * */ @Test public void testPessimisticReadCommittedTxInvokeSequentialCall() throws Exception { transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.READ_COMMITTED); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.READ_COMMITTED); } /** * */ @Test public void testMvccTxInvokeSequentialCall() throws Exception { cacheName = MVCC_CACHE; transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ); transactionInvokeSequentialCallOnNearNode(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ); } /** * Test for sequential entry processor invoking not null value on primary cache. * In this test entry processor gets value from local node. * * @param transactionConcurrency Transaction concurrency. * @param transactionIsolation Transaction isolation. */ public void transactionInvokeSequentialCallOnPrimaryNode(TransactionConcurrency transactionConcurrency, TransactionIsolation transactionIsolation) throws Exception { TestKey key = new TestKey(1L); TestValue val = new TestValue(); val.value("1"); Ignite primaryIgnite; if (ignite(0).affinity(cacheName).isPrimary(ignite(0).cluster().localNode(), key)) primaryIgnite = ignite(0); else primaryIgnite = ignite(1); IgniteCache<TestKey, TestValue> cache = primaryIgnite.cache(cacheName); cache.put(key, val); NotNullCacheEntryProcessor cacheEntryProcessor = new NotNullCacheEntryProcessor(); try (Transaction transaction = primaryIgnite.transactions().txStart(transactionConcurrency, transactionIsolation)) { cache.invoke(key, cacheEntryProcessor); cache.invoke(key, cacheEntryProcessor); transaction.commit(); } cache.remove(key); } /** * Test for sequential entry processor invoking not null value on near cache. * In this test entry processor fetches value from remote node. * * @param transactionConcurrency Transaction concurrency. * @param transactionIsolation Transaction isolation. */ public void transactionInvokeSequentialCallOnNearNode(TransactionConcurrency transactionConcurrency, TransactionIsolation transactionIsolation) throws Exception { TestKey key = new TestKey(1L); TestValue val = new TestValue(); val.value("1"); Ignite nearIgnite; Ignite primaryIgnite; if (ignite(0).affinity(cacheName).isPrimary(ignite(0).cluster().localNode(), key)) { primaryIgnite = ignite(0); nearIgnite = ignite(1); } else { primaryIgnite = ignite(1); nearIgnite = ignite(0); } primaryIgnite.cache(cacheName).put(key, val); IgniteCache<TestKey, TestValue> nearCache = nearIgnite.cache(cacheName); NotNullCacheEntryProcessor cacheEntryProcessor = new NotNullCacheEntryProcessor(); try (Transaction transaction = nearIgnite.transactions().txStart(transactionConcurrency, transactionIsolation)) { nearCache.invoke(key, cacheEntryProcessor); nearCache.invoke(key, cacheEntryProcessor); transaction.commit(); } primaryIgnite.cache(cacheName).remove(key); } /** * Test for sequential entry processor invocation. During transaction value is changed externally, which leads to * optimistic conflict exception. */ @Test @SuppressWarnings("ThrowableNotThrown") public void testTxInvokeSequentialOptimisticConflict() throws Exception { TestKey key = new TestKey(1L); IgniteCache<TestKey, TestValue> cache = ignite(0).cache(CACHE); CountDownLatch latch = new CountDownLatch(1); cache.put(key, new TestValue("1")); multithreadedAsync(new Runnable() { @Override public void run() { try { latch.await(); } catch (InterruptedException e) { fail(); } cache.put(key, new TestValue("2")); } }, 1); Transaction tx = ignite(0).transactions().txStart(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.SERIALIZABLE); cache.invoke(key, new NotNullCacheEntryProcessor()); latch.countDown(); Thread.sleep(1_000); cache.invoke(key, new NotNullCacheEntryProcessor()); GridTestUtils.assertThrowsWithCause(new Callable<Object>() { @Override public Object call() throws Exception { tx.commit(); return null; } }, TransactionOptimisticException.class); cache.remove(key); } /** * Cache entry processor checking whether entry has got non-null value. */ public static class NotNullCacheEntryProcessor implements CacheEntryProcessor<TestKey, TestValue, Object> { /** {@inheritDoc} */ @Override public Object process(MutableEntry entry, Object... arguments) throws EntryProcessorException { assertNotNull(entry.getValue()); return null; } } /** * */ public static class TestKey { /** Value. */ private final Long val; /** * @param val Value. */ public TestKey(Long val) { this.val = val; } } /** * */ public static class TestValue { /** Value. */ private String val; /** * Default constructor. */ public TestValue() { } /** * @param val Value. */ public TestValue(String val) { this.val = val; } /** * @return Value. */ public String value() { return val; } /** * @param val New value. */ public void value(String val) { this.val = val; } } }
apache-2.0
Ant-Droid/android_frameworks_base_OLD
docs/html/sdk/support_api_diff/22.2.0/changes/constructors_index_additions.html
2470
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <HTML style="overflow:auto;"> <HEAD> <meta name="generator" content="JDiff v1.1.0"> <!-- Generated by the JDiff Javadoc doclet --> <!-- (http://www.jdiff.org) --> <meta name="description" content="JDiff is a Javadoc doclet which generates an HTML report of all the packages, classes, constructors, methods, and fields which have been removed, added or changed in any way, including their documentation, when two APIs are compared."> <meta name="keywords" content="diff, jdiff, javadiff, java diff, java difference, API difference, difference between two APIs, API diff, Javadoc, doclet"> <TITLE> Constructor Additions Index </TITLE> <link href="../../../../assets/android-developer-docs.css" rel="stylesheet" type="text/css" /> <link href="../stylesheet-jdiff.css" rel="stylesheet" type="text/css" /> <noscript> <style type="text/css"> body{overflow:auto;} #body-content{position:relative; top:0;} #doc-content{overflow:visible;border-left:3px solid #666;} #side-nav{padding:0;} #side-nav .toggle-list ul {display:block;} #resize-packages-nav{border-bottom:3px solid #666;} </style> </noscript> <style type="text/css"> </style> </HEAD> <BODY class="gc-documentation" style="padding:12px;"> <a NAME="topheader"></a> <table summary="Index for Constructors" width="100%" class="jdiffIndex" border="0" cellspacing="0" cellpadding="0" style="padding-bottom:0;margin-bottom:0;"> <tr> <th class="indexHeader"> Filter the Index: </th> </tr> <tr> <td class="indexText" style="line-height:1.3em;padding-left:2em;"> <a href="constructors_index_all.html" class="staysblack">All Constructors</a> <br> <font color="#999999">Removals</font> <br> <font color="#999999">Additions</font> <br> <font color="#999999">Changes</font> </td> </tr> </table> <div id="indexTableCaption" style="background-color:#eee;padding:0 4px 0 4px;font-size:11px;margin-bottom:1em;"> Listed as: <span style="color:#069"><strong>Added</strong></span>, <span style="color:#069"><strike>Removed</strike></span>, <span style="color:#069">Changed</span></font> </div> <script src="http://www.google-analytics.com/ga.js" type="text/javascript"> </script> <script type="text/javascript"> try { var pageTracker = _gat._getTracker("UA-5831155-1"); pageTracker._setAllowAnchor(true); pageTracker._initData(); pageTracker._trackPageview(); } catch(e) {} </script> </BODY> </HTML>
apache-2.0
shahrzadmn/vaadin
server/src/com/vaadin/event/EventRouter.java
8333
/* * Copyright 2000-2014 Vaadin Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.vaadin.event; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collection; import java.util.EventObject; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.logging.Logger; import com.vaadin.server.ErrorEvent; import com.vaadin.server.ErrorHandler; /** * <code>EventRouter</code> class implementing the inheritable event listening * model. For more information on the event model see the * {@link com.vaadin.event package documentation}. * * @author Vaadin Ltd. * @since 3.0 */ @SuppressWarnings("serial") public class EventRouter implements MethodEventSource { /** * List of registered listeners. */ private LinkedHashSet<ListenerMethod> listenerList = null; /* * Registers a new listener with the specified activation method to listen * events generated by this component. Don't add a JavaDoc comment here, we * use the default documentation from implemented interface. */ @Override public void addListener(Class<?> eventType, Object object, Method method) { if (listenerList == null) { listenerList = new LinkedHashSet<ListenerMethod>(); } listenerList.add(new ListenerMethod(eventType, object, method)); } /* * Registers a new listener with the specified named activation method to * listen events generated by this component. Don't add a JavaDoc comment * here, we use the default documentation from implemented interface. */ @Override public void addListener(Class<?> eventType, Object object, String methodName) { if (listenerList == null) { listenerList = new LinkedHashSet<ListenerMethod>(); } listenerList.add(new ListenerMethod(eventType, object, methodName)); } /* * Removes all registered listeners matching the given parameters. Don't add * a JavaDoc comment here, we use the default documentation from implemented * interface. */ @Override public void removeListener(Class<?> eventType, Object target) { if (listenerList != null) { final Iterator<ListenerMethod> i = listenerList.iterator(); while (i.hasNext()) { final ListenerMethod lm = i.next(); if (lm.matches(eventType, target)) { i.remove(); return; } } } } /* * Removes the event listener methods matching the given given paramaters. * Don't add a JavaDoc comment here, we use the default documentation from * implemented interface. */ @Override public void removeListener(Class<?> eventType, Object target, Method method) { if (listenerList != null) { final Iterator<ListenerMethod> i = listenerList.iterator(); while (i.hasNext()) { final ListenerMethod lm = i.next(); if (lm.matches(eventType, target, method)) { i.remove(); return; } } } } /* * Removes the event listener method matching the given given parameters. * Don't add a JavaDoc comment here, we use the default documentation from * implemented interface. */ @Override public void removeListener(Class<?> eventType, Object target, String methodName) { // Find the correct method final Method[] methods = target.getClass().getMethods(); Method method = null; for (int i = 0; i < methods.length; i++) { if (methods[i].getName().equals(methodName)) { method = methods[i]; } } if (method == null) { throw new IllegalArgumentException(); } // Remove the listeners if (listenerList != null) { final Iterator<ListenerMethod> i = listenerList.iterator(); while (i.hasNext()) { final ListenerMethod lm = i.next(); if (lm.matches(eventType, target, method)) { i.remove(); return; } } } } /** * Removes all listeners from event router. */ public void removeAllListeners() { listenerList = null; } /** * Sends an event to all registered listeners. The listeners will decide if * the activation method should be called or not. * * @param event * the Event to be sent to all listeners. */ public void fireEvent(EventObject event) { fireEvent(event, null); } /** * Sends an event to all registered listeners. The listeners will decide if * the activation method should be called or not. * <p> * If an error handler is set, the processing of other listeners will * continue after the error handler method call unless the error handler * itself throws an exception. * * @param event * the Event to be sent to all listeners. * @param errorHandler * error handler to use to handle any exceptions thrown by * listeners or null to let the exception propagate to the * caller, preventing further listener calls */ public void fireEvent(EventObject event, ErrorHandler errorHandler) { // It is not necessary to send any events if there are no listeners if (listenerList != null) { // Make a copy of the listener list to allow listeners to be added // inside listener methods. Fixes #3605. // Send the event to all listeners. The listeners themselves // will filter out unwanted events. final Object[] listeners = listenerList.toArray(); for (int i = 0; i < listeners.length; i++) { ListenerMethod listenerMethod = (ListenerMethod) listeners[i]; if (null != errorHandler) { try { listenerMethod.receiveEvent(event); } catch (Exception e) { errorHandler.error(new ErrorEvent(e)); } } else { listenerMethod.receiveEvent(event); } } } } /** * Checks if the given Event type is listened by a listener registered to * this router. * * @param eventType * the event type to be checked * @return true if a listener is registered for the given event type */ public boolean hasListeners(Class<?> eventType) { if (listenerList != null) { for (ListenerMethod lm : listenerList) { if (lm.isType(eventType)) { return true; } } } return false; } /** * Returns all listeners that match or extend the given event type. * * @param eventType * The type of event to return listeners for. * @return A collection with all registered listeners. Empty if no listeners * are found. */ public Collection<?> getListeners(Class<?> eventType) { List<Object> listeners = new ArrayList<Object>(); if (listenerList != null) { for (ListenerMethod lm : listenerList) { if (lm.isOrExtendsType(eventType)) { listeners.add(lm.getTarget()); } } } return listeners; } private Logger getLogger() { return Logger.getLogger(EventRouter.class.getName()); } }
apache-2.0
zhangjunfang/eclipse-dir
nsp/src/main/webapp/scripts/lib/fckeditor/editor/dialog/common/fck_dialog_common.js
8894
/* * FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright * (C) 2003-2008 Frederico Caldeira Knabben == BEGIN LICENSE == * * Licensed under the terms of any of the following licenses at your choice: - * GNU General Public License Version 2 or later (the "GPL") * http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License * Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - * Mozilla Public License Version 1.1 or later (the "MPL") * http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == * * Useful functions used by almost all dialog window pages. Dialogs should link * to this file as the very first script on the page. */ // Automatically detect the correct document.domain (#123). (function() { var d = document.domain; while (true) { // Test if we can access a parent property. try { var test = window.parent.document.domain; break; } catch (e) { } // Remove a domain part: www.mytest.example.com => mytest.example.com => // example.com ... d = d.replace(/.*?(?:\.|$)/, ''); if (d.length == 0) break; // It was not able to detect the domain. try { document.domain = d; } catch (e) { break; } } })(); // Attention: FCKConfig must be available in the page. function GetCommonDialogCss(prefix) { // CSS minified by http://iceyboard.no-ip.org/projects/css_compressor (see // _dev/css_compression.txt). return FCKConfig.BasePath + 'dialog/common/' + '|.ImagePreviewArea{border:#000 1px solid;overflow:auto;width:100%;height:170px;background-color:#fff}.FlashPreviewArea{border:#000 1px solid;padding:5px;overflow:auto;width:100%;height:170px;background-color:#fff}.BtnReset{float:left;background-position:center center;background-image:url(images/reset.gif);width:16px;height:16px;background-repeat:no-repeat;border:1px none;font-size:1px}.BtnLocked,.BtnUnlocked{float:left;background-position:center center;background-image:url(images/locked.gif);width:16px;height:16px;background-repeat:no-repeat;border:none 1px;font-size:1px}.BtnUnlocked{background-image:url(images/unlocked.gif)}.BtnOver{border:outset 1px;cursor:pointer;cursor:hand}'; } // Gets a element by its Id. Used for shorter coding. function GetE(elementId) { return document.getElementById(elementId); } function ShowE(element, isVisible) { if (typeof(element) == 'string') element = GetE(element); element.style.display = isVisible ? '' : 'none'; } function SetAttribute(element, attName, attValue) { if (attValue == null || attValue.length == 0) element.removeAttribute(attName, 0); // 0 : Case Insensitive else element.setAttribute(attName, attValue, 0); // 0 : Case Insensitive } function GetAttribute(element, attName, valueIfNull) { var oAtt = element.attributes[attName]; if (oAtt == null || !oAtt.specified) return valueIfNull ? valueIfNull : ''; var oValue = element.getAttribute(attName, 2); if (oValue == null) oValue = oAtt.nodeValue; return (oValue == null ? valueIfNull : oValue); } function SelectField(elementId) { var element = GetE(elementId); element.focus(); // element.select may not be available for some fields (like <select>). if (element.select) element.select(); } // Functions used by text fields to accept numbers only. var IsDigit = (function() { var KeyIdentifierMap = { End : 35, Home : 36, Left : 37, Right : 39, 'U+00007F' : 46 // Delete }; return function(e) { if (!e) e = event; var iCode = (e.keyCode || e.charCode); if (!iCode && e.keyIdentifier && (e.keyIdentifier in KeyIdentifierMap)) iCode = KeyIdentifierMap[e.keyIdentifier]; return ((iCode >= 48 && iCode <= 57) // Numbers || (iCode >= 35 && iCode <= 40) // Arrows, Home, End || iCode == 8 // Backspace || iCode == 46 // Delete || iCode == 9 // Tab ); } })(); String.prototype.Trim = function() { return this.replace(/(^\s*)|(\s*$)/g, ''); } String.prototype.StartsWith = function(value) { return (this.substr(0, value.length) == value); } String.prototype.Remove = function(start, length) { var s = ''; if (start > 0) s = this.substring(0, start); if (start + length < this.length) s += this.substring(start + length, this.length); return s; } String.prototype.ReplaceAll = function(searchArray, replaceArray) { var replaced = this; for (var i = 0; i < searchArray.length; i++) { replaced = replaced.replace(searchArray[i], replaceArray[i]); } return replaced; } function OpenFileBrowser(url, width, height) { // oEditor must be defined. var iLeft = (oEditor.FCKConfig.ScreenWidth - width) / 2; var iTop = (oEditor.FCKConfig.ScreenHeight - height) / 2; var sOptions = "toolbar=no,status=no,resizable=yes,dependent=yes,scrollbars=yes"; sOptions += ",width=" + width; sOptions += ",height=" + height; sOptions += ",left=" + iLeft; sOptions += ",top=" + iTop; window.open(url, 'FCKBrowseWindow', sOptions); } /** * Utility function to create/update an element with a name attribute in IE, so * it behaves properly when moved around It also allows to change the name or * other special attributes in an existing node oEditor : instance of FCKeditor * where the element will be created oOriginal : current element being edited or * null if it has to be created nodeName : string with the name of the element * to create oAttributes : Hash object with the attributes that must be set at * creation time in IE Those attributes will be set also after the element has * been created for any other browser to avoid redudant code */ function CreateNamedElement(oEditor, oOriginal, nodeName, oAttributes) { var oNewNode; // IE doesn't allow easily to change properties of an existing object, // so remove the old and force the creation of a new one. var oldNode = null; if (oOriginal && oEditor.FCKBrowserInfo.IsIE) { // Force the creation only if some of the special attributes have // changed: var bChanged = false; for (var attName in oAttributes) bChanged |= (oOriginal.getAttribute(attName, 2) != oAttributes[attName]); if (bChanged) { oldNode = oOriginal; oOriginal = null; } } // If the node existed (and it's not IE), then we just have to update its // attributes if (oOriginal) { oNewNode = oOriginal; } else { // #676, IE doesn't play nice with the name or type attribute if (oEditor.FCKBrowserInfo.IsIE) { var sbHTML = []; sbHTML.push('<' + nodeName); for (var prop in oAttributes) { sbHTML.push(' ' + prop + '="' + oAttributes[prop] + '"'); } sbHTML.push('>'); if (!oEditor.FCKListsLib.EmptyElements[nodeName.toLowerCase()]) sbHTML.push('</' + nodeName + '>'); oNewNode = oEditor.FCK.EditorDocument .createElement(sbHTML.join('')); // Check if we are just changing the properties of an existing node: // copy its properties if (oldNode) { CopyAttributes(oldNode, oNewNode, oAttributes); oEditor.FCKDomTools.MoveChildren(oldNode, oNewNode); oldNode.parentNode.removeChild(oldNode); oldNode = null; if (oEditor.FCK.Selection.SelectionData) { // Trick to refresh the selection object and avoid error in // fckdialog.html Selection.EnsureSelection var oSel = oEditor.FCK.EditorDocument.selection; oEditor.FCK.Selection.SelectionData = oSel.createRange(); // Now // oSel.type // will // be // 'None' // reflecting // the // real // situation } } oNewNode = oEditor.FCK.InsertElement(oNewNode); // FCK.Selection.SelectionData is broken by now since we've // deleted the previously selected element. So we need to reassign // it. if (oEditor.FCK.Selection.SelectionData) { var range = oEditor.FCK.EditorDocument.body .createControlRange(); range.add(oNewNode); oEditor.FCK.Selection.SelectionData = range; } } else { oNewNode = oEditor.FCK.InsertElement(nodeName); } } // Set the basic attributes for (var attName in oAttributes) oNewNode.setAttribute(attName, oAttributes[attName], 0); // 0 : Case // Insensitive return oNewNode; } // Copy all the attributes from one node to the other, kinda like a clone // But oSkipAttributes is an object with the attributes that must NOT be copied function CopyAttributes(oSource, oDest, oSkipAttributes) { var aAttributes = oSource.attributes; for (var n = 0; n < aAttributes.length; n++) { var oAttribute = aAttributes[n]; if (oAttribute.specified) { var sAttName = oAttribute.nodeName; // We can set the type only once, so do it with the proper value, // not copying it. if (sAttName in oSkipAttributes) continue; var sAttValue = oSource.getAttribute(sAttName, 2); if (sAttValue == null) sAttValue = oAttribute.nodeValue; oDest.setAttribute(sAttName, sAttValue, 0); // 0 : Case Insensitive } } // The style: oDest.style.cssText = oSource.style.cssText; }
bsd-2-clause
hoangt/tpzsimul.gem5
src/cpu/pred/bpred_unit.cc
2246
/* * Copyright (c) 2004-2006 The Regents of The University of Michigan * Copyright (c) 2010 The University of Edinburgh * Copyright (c) 2012 Mark D. Hill and David A. Wood * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Authors: Kevin Lim * Timothy M. Jones */ #include "cpu/pred/2bit_local.hh" #include "cpu/pred/bi_mode.hh" #include "cpu/pred/bpred_unit_impl.hh" #include "cpu/pred/tournament.hh" BPredUnit * BranchPredictorParams::create() { // Setup the selected predictor. if (predType == "local") { return new LocalBP(this); } else if (predType == "tournament") { return new TournamentBP(this); } else if (predType == "bi-mode") { return new BiModeBP(this); } else { fatal("Invalid BP selected!"); } }
bsd-3-clause
scheib/chromium
components/minidump_uploader/android/java/src/org/chromium/components/minidump_uploader/util/HttpURLConnectionFactory.java
548
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.components.minidump_uploader.util; import java.net.HttpURLConnection; /** * A factory class for creating a HttpURLConnection. */ public interface HttpURLConnectionFactory { /** * @param url the url to communicate with * @return a HttpURLConnection to communicate with |url| */ HttpURLConnection createHttpURLConnection(String url); }
bsd-3-clause
catapult-project/catapult
third_party/polymer/components/shadycss/tests/scoping-api.html
5179
<!doctype html> <!-- @license Copyright (c) 2018 The Polymer Project Authors. All rights reserved. This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt Code distributed by Google as part of the polymer project is also subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt --> <html> <head> <meta charset="utf-8"> <script> WCT = { waitFor(cb) { window.HTMLImports.whenReady(cb) } } </script> <script src="./test-flags.js"></script> <script src="../node_modules/wct-browser-legacy/browser.js"></script> <script src="../node_modules/@webcomponents/webcomponents-platform/webcomponents-platform.js"></script> <script src="../node_modules/es6-promise/dist/es6-promise.auto.min.js"></script> <script src="../node_modules/@webcomponents/template/template.js"></script> <script src="../node_modules/@webcomponents/html-imports/html-imports.min.js"></script> <script> window.ShadyDOM = {force: true} </script> <script src="../node_modules/@webcomponents/shadydom/shadydom.min.js"></script> <script> // disable document watcher window.ShadyDOM.handlesDynamicScoping = true; </script> <script src="../node_modules/@webcomponents/custom-elements/custom-elements.min.js"></script> <script src="../scoping-shim.min.js"></script> <script src="../apply-shim.min.js"></script> <script src="../custom-style-interface.min.js"></script> <script src="module/generated/make-element.js"></script> </head> <body> <template id="sync-element"> <style> div { background: rgb(255, 0, 0); border: 10px solid black; } </style> <div id="inner">Test</div> </template> <div id="arena"></div> <script> function assertComputedStyle(node, expectedValue, property = 'border-top-width') { const actualValue = getComputedStyle(node).getPropertyValue(property).trim(); assert.equal(actualValue, expectedValue, `${property} does not have the expected value`); } suite('Synchronous Scoping API', function() { const arena = document.querySelector('#arena'); const csfn = (node) => { return window.ShadyCSS.ScopingShim.currentScopeForNode(node); }; const sfn = (node) => { return window.ShadyCSS.ScopingShim.scopeForNode(node); }; const scopeNode = (node, scope) => { window.ShadyCSS.ScopingShim.scopeNode(node, scope); } const unscopeNode = (node, scope) => { window.ShadyCSS.ScopingShim.unscopeNode(node, scope); } let el; suiteSetup(function() { makeElement('sync-element'); }); setup(function() { el = document.createElement('sync-element'); arena.appendChild(el); }); teardown(function() { arena.innerHTML = ''; }); test('mutation observer is disabled', function(done) { const inner = el.shadowRoot.querySelector('#inner'); arena.appendChild(inner); setTimeout(() => { assertComputedStyle(inner, 'rgb(255, 0, 0)', 'background-color'); done(); }, 100); }); test('currentScopeForNode', function() { assert.equal(csfn(el), '', 'sync-scoping should be document scope'); const inner = el.shadowRoot.querySelector('#inner'); assert.equal(csfn(inner), 'sync-element', 'inner div should have sync-element scope'); const disconnected = document.createElement('sync-element'); assert.equal(csfn(disconnected), '', 'disconnected element should have a blank scope') const dynamic = document.createElement('div'); el.shadowRoot.appendChild(dynamic); assert.equal(csfn(dynamic), '', 'dynamically appended node will not be scoped yet'); }); test('scopeForNode', function() { assert.equal(sfn(el), '', 'sync-scoping should be document scope'); const inner = el.shadowRoot.querySelector('#inner'); assert.equal(sfn(inner), 'sync-element', 'inner div should have sync-element scope'); const disconnected = document.createElement('sync-element'); assert.equal(sfn(disconnected), '', 'disconnected element should have a blank scope'); const dynamic = document.createElement('div'); el.shadowRoot.appendChild(dynamic); assert.equal(sfn(dynamic), 'sync-element', 'dynamically created node should have sync-element scope'); }); test('scopeNode', function() { const div = document.createElement('div'); el.shadowRoot.appendChild(div); scopeNode(div, sfn(div)); assertComputedStyle(div, '10px'); assertComputedStyle(div, 'rgb(255, 0, 0)', 'background-color'); }); test('unscopeNode', function() { const inner = el.shadowRoot.querySelector('#inner'); arena.appendChild(inner); unscopeNode(inner, csfn(inner)); assertComputedStyle(inner, '0px'); }); }); </script> </body> </html>
bsd-3-clause
mitchellrj/genshi
genshi/filters/tests/i18n.py
89114
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2010 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. from datetime import datetime import doctest from gettext import NullTranslations import unittest from genshi.core import Attrs from genshi.template import MarkupTemplate, Context from genshi.filters.i18n import Translator, extract from genshi.input import HTML from genshi.compat import IS_PYTHON2, StringIO class DummyTranslations(NullTranslations): _domains = {} def __init__(self, catalog=()): NullTranslations.__init__(self) self._catalog = catalog or {} self.plural = lambda n: n != 1 def add_domain(self, domain, catalog): translation = DummyTranslations(catalog) translation.add_fallback(self) self._domains[domain] = translation def _domain_call(self, func, domain, *args, **kwargs): return getattr(self._domains.get(domain, self), func)(*args, **kwargs) if IS_PYTHON2: def ugettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.ugettext(message) return unicode(message) return tmsg else: def gettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.gettext(message) return unicode(message) return tmsg if IS_PYTHON2: def dugettext(self, domain, message): return self._domain_call('ugettext', domain, message) else: def dgettext(self, domain, message): return self._domain_call('gettext', domain, message) def ungettext(self, msgid1, msgid2, n): try: return self._catalog[(msgid1, self.plural(n))] except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 if not IS_PYTHON2: ngettext = ungettext del ungettext if IS_PYTHON2: def dungettext(self, domain, singular, plural, numeral): return self._domain_call('ungettext', domain, singular, plural, numeral) else: def dngettext(self, domain, singular, plural, numeral): return self._domain_call('ngettext', domain, singular, plural, numeral) class TranslatorTestCase(unittest.TestCase): def test_translate_included_attribute_text(self): """ Verify that translated attributes end up in a proper `Attrs` instance. """ html = HTML(u"""<html> <span title="Foo"></span> </html>""") translator = Translator(lambda s: u"Voh") stream = list(html.filter(translator)) kind, data, pos = stream[2] assert isinstance(data[1], Attrs) def test_extract_without_text(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <p title="Bar">Foo</p> ${ngettext("Singular", "Plural", num)} </html>""") translator = Translator(extract_text=False) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Singular', 'Plural', None), []), messages[0]) def test_extract_plural_form(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> ${ngettext("Singular", "Plural", num)} </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, 'ngettext', ('Singular', 'Plural', None), []), messages[0]) def test_extract_funky_plural_form(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> ${ngettext(len(items), *widget.display_names)} </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, 'ngettext', (None, None), []), messages[0]) def test_extract_gettext_with_unicode_string(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> ${gettext("Grüße")} </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, 'gettext', u'Gr\xfc\xdfe', []), messages[0]) def test_extract_included_attribute_text(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <span title="Foo"></span> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, 'Foo', []), messages[0]) def test_extract_attribute_expr(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <input type="submit" value="${_('Save')}" /> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, '_', 'Save', []), messages[0]) def test_extract_non_included_attribute_interpolated(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <a href="#anchor_${num}">Foo</a> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, 'Foo', []), messages[0]) def test_extract_text_from_sub(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <py:if test="foo">Foo</py:if> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, 'Foo', []), messages[0]) def test_ignore_tag_with_fixed_xml_lang(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <p xml:lang="en">(c) 2007 Edgewall Software</p> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(0, len(messages)) def test_extract_tag_with_variable_xml_lang(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <p xml:lang="${lang}">(c) 2007 Edgewall Software</p> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((2, None, '(c) 2007 Edgewall Software', []), messages[0]) def test_ignore_attribute_with_expression(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"> <input type="submit" value="Reply" title="Reply to comment $num" /> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(0, len(messages)) def test_translate_with_translations_object(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") translator = Translator(DummyTranslations({'Foo': 'Voh'})) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> </html>""", tmpl.generate().render()) class MsgDirectiveTestCase(unittest.TestCase): def test_extract_i18n_msg(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help] for details.', messages[0][2]) def test_translate_i18n_msg(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for details. </p> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p>Für Details siehe bitte <a href="help.html">Hilfe</a>.</p> </html>""".encode('utf-8'), tmpl.generate().render(encoding='utf-8')) def test_extract_i18n_msg_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Please see <a href="help.html">Help</a></p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help]', messages[0][2]) def test_translate_i18n_msg_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Please see <a href="help.html">Help</a></p> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p>Für Details siehe bitte <a href="help.html">Hilfe</a></p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_elt_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg>Please see <a href="help.html">Help</a></i18n:msg> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help]', messages[0][2]) def test_translate_i18n_msg_elt_nonewline(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg>Please see <a href="help.html">Help</a></i18n:msg> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> Für Details siehe bitte <a href="help.html">Hilfe</a> </html>""".encode('utf-8'), tmpl.generate().render(encoding='utf-8')) def test_extract_i18n_msg_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="A helpful paragraph"> Please see <a href="help.html" title="Click for help">Help</a> </p> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(3, len(messages)) self.assertEqual('A helpful paragraph', messages[0][2]) self.assertEqual(3, messages[0][0]) self.assertEqual('Click for help', messages[1][2]) self.assertEqual(4, messages[1][0]) self.assertEqual('Please see [1:Help]', messages[2][2]) self.assertEqual(3, messages[2][0]) def test_translate_i18n_msg_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="A helpful paragraph"> Please see <a href="help.html" title="Click for help">Help</a> </p> </html>""") translator = Translator(lambda msgid: { 'A helpful paragraph': 'Ein hilfreicher Absatz', 'Click for help': u'Klicken für Hilfe', 'Please see [1:Help]': u'Siehe bitte [1:Hilfe]' }[msgid]) translator.setup(tmpl) self.assertEqual(u"""<html> <p title="Ein hilfreicher Absatz">Siehe bitte <a href="help.html" title="Klicken für Hilfe">Hilfe</a></p> </html>""", tmpl.generate().render(encoding=None)) def test_extract_i18n_msg_with_dynamic_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="${_('A helpful paragraph')}"> Please see <a href="help.html" title="${_('Click for help')}">Help</a> </p> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(3, len(messages)) self.assertEqual('A helpful paragraph', messages[0][2]) self.assertEqual(3, messages[0][0]) self.assertEqual('Click for help', messages[1][2]) self.assertEqual(4, messages[1][0]) self.assertEqual('Please see [1:Help]', messages[2][2]) self.assertEqual(3, messages[2][0]) def test_translate_i18n_msg_with_dynamic_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="${_('A helpful paragraph')}"> Please see <a href="help.html" title="${_('Click for help')}">Help</a> </p> </html>""") translator = Translator(lambda msgid: { 'A helpful paragraph': 'Ein hilfreicher Absatz', 'Click for help': u'Klicken für Hilfe', 'Please see [1:Help]': u'Siehe bitte [1:Hilfe]' }[msgid]) translator.setup(tmpl) self.assertEqual(u"""<html> <p title="Ein hilfreicher Absatz">Siehe bitte <a href="help.html" title="Klicken für Hilfe">Hilfe</a></p> </html>""", tmpl.generate(_=translator.translate).render(encoding=None)) def test_extract_i18n_msg_as_element_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg params=""> Please see <a href="help.html" title="Click for help">Help</a> </i18n:msg> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual('Click for help', messages[0][2]) self.assertEqual(4, messages[0][0]) self.assertEqual('Please see [1:Help]', messages[1][2]) self.assertEqual(3, messages[1][0]) def test_translate_i18n_msg_as_element_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg params=""> Please see <a href="help.html" title="Click for help">Help</a> </i18n:msg> </html>""") translator = Translator(lambda msgid: { 'Click for help': u'Klicken für Hilfe', 'Please see [1:Help]': u'Siehe bitte [1:Hilfe]' }[msgid]) translator.setup(tmpl) self.assertEqual(u"""<html> Siehe bitte <a href="help.html" title="Klicken für Hilfe">Hilfe</a> </html>""", tmpl.generate().render(encoding=None)) def test_extract_i18n_msg_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html"><em>Help</em> page</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:[2:Help] page] for details.', messages[0][2]) def test_translate_i18n_msg_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html"><em>Help</em> page</a> for details. </p> </html>""") gettext = lambda s: u"Für Details siehe bitte [1:[2:Hilfeseite]]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p>Für Details siehe bitte <a href="help.html"><em>Hilfeseite</em></a>.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_label_with_nested_input(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:msg=""> <label><input type="text" size="3" name="daysback" value="30" /> days back</label> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('[1:[2:] days back]', messages[0][2]) def test_translate_i18n_msg_label_with_nested_input(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:msg=""> <label><input type="text" size="3" name="daysback" value="30" /> foo bar</label> </div> </html>""") gettext = lambda s: "[1:[2:] foo bar]" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <div><label><input type="text" size="3" name="daysback" value="30"/> foo bar</label></div> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Show me [1:] entries per page.', messages[0][2]) def test_translate_i18n_msg_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page. </p> </html>""") gettext = lambda s: u"[1:] Einträge pro Seite anzeigen." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p><input type="text" name="num"/> Einträge pro Seite anzeigen.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_multiple(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for <em>details</em>. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Please see [1:Help] for [2:details].', messages[0][2]) def test_translate_i18n_msg_multiple(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Please see <a href="help.html">Help</a> for <em>details</em>. </p> </html>""") gettext = lambda s: u"Für [2:Details] siehe bitte [1:Hilfe]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p>Für <em>Details</em> siehe bitte <a href="help.html">Hilfe</a>.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_multiple_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page, starting at page <input type="text" name="num" />. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Show me [1:] entries per page, starting at page [2:].', messages[0][2]) def test_translate_i18n_msg_multiple_empty(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" /> entries per page, starting at page <input type="text" name="num" />. </p> </html>""", encoding='utf-8') gettext = lambda s: u"[1:] Einträge pro Seite, beginnend auf Seite [2:]." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p><input type="text" name="num"/> Eintr\u00E4ge pro Seite, beginnend auf Seite <input type="text" name="num"/>.</p> </html>""".encode('utf-8'), tmpl.generate().render(encoding='utf-8')) def test_extract_i18n_msg_with_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name"> Hello, ${user.name}! </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Hello, %(name)s!', messages[0][2]) def test_translate_i18n_msg_with_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name"> Hello, ${user.name}! </p> </html>""") gettext = lambda s: u"Hallo, %(name)s!" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Hallo, Jim!</p> </html>""", tmpl.generate(user=dict(name='Jim')).render()) def test_translate_i18n_msg_with_param_reordered(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name"> Hello, ${user.name}! </p> </html>""") gettext = lambda s: u"%(name)s, sei gegrüßt!" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p>Jim, sei gegrüßt!</p> </html>""", tmpl.generate(user=dict(name='Jim')).render()) def test_translate_i18n_msg_with_attribute_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Hello, <a href="#${anchor}">dude</a>! </p> </html>""") gettext = lambda s: u"Sei gegrüßt, [1:Alter]!" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p>Sei gegrüßt, <a href="#42">Alter</a>!</p> </html>""", tmpl.generate(anchor='42').render()) def test_extract_i18n_msg_with_two_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name, time"> Posted by ${post.author} at ${entry.time.strftime('%H:%m')} </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Posted by %(name)s at %(time)s', messages[0][2]) def test_translate_i18n_msg_with_two_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="name, time"> Written by ${entry.author} at ${entry.time.strftime('%H:%M')} </p> </html>""") gettext = lambda s: u"%(name)s schrieb dies um %(time)s" translator = Translator(gettext) translator.setup(tmpl) entry = { 'author': 'Jim', 'time': datetime(2008, 4, 1, 14, 30) } self.assertEqual("""<html> <p>Jim schrieb dies um 14:30</p> </html>""", tmpl.generate(entry=entry).render()) def test_extract_i18n_msg_with_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" py:attrs="{'value': x}" /> entries per page. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual('Show me [1:] entries per page.', messages[0][2]) def test_translate_i18n_msg_with_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""> Show me <input type="text" name="num" py:attrs="{'value': 'x'}" /> entries per page. </p> </html>""") gettext = lambda s: u"[1:] Einträge pro Seite anzeigen." translator = Translator(gettext) translator.setup(tmpl) self.assertEqual(u"""<html> <p><input type="text" name="num" value="x"/> Einträge pro Seite anzeigen.</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_with_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:comment="As in foo bar" i18n:msg="">Foo</p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Foo', ['As in foo bar']), messages[0]) tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Foo', ['As in foo bar']), messages[0]) def test_translate_i18n_msg_with_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") gettext = lambda s: u"Voh" translator = Translator(gettext) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> </html>""", tmpl.generate().render()) def test_extract_i18n_msg_with_attr(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="Foo bar">Foo</p> </html>""") translator = Translator() messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, None, 'Foo bar', []), messages[0]) self.assertEqual((3, None, 'Foo', []), messages[1]) def test_translate_i18n_msg_with_attr(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" title="Foo bar">Foo</p> </html>""") gettext = lambda s: u"Voh" translator = Translator(DummyTranslations({ 'Foo': 'Voh', 'Foo bar': u'Voh bär' })) tmpl.filters.insert(0, translator) tmpl.add_directives(Translator.NAMESPACE, translator) self.assertEqual(u"""<html> <p title="Voh bär">Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_msg_and_py_strip_directives(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:strip="">Foo</p> <p py:strip="" i18n:msg="">Foo</p> </html>""") translator = Translator(DummyTranslations({'Foo': 'Voh'})) translator.setup(tmpl) self.assertEqual("""<html> Voh Voh </html>""", tmpl.generate().render()) def test_i18n_msg_ticket_300_extract(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg params="date, author"> Changed ${ '10/12/2008' } ago by ${ 'me, the author' } </i18n:msg> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( (3, None, 'Changed %(date)s ago by %(author)s', []), messages[0] ) def test_i18n_msg_ticket_300_translate(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:msg params="date, author"> Changed ${ date } ago by ${ author } </i18n:msg> </html>""") translations = DummyTranslations({ 'Changed %(date)s ago by %(author)s': u'Modificado à %(date)s por %(author)s' }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual(u"""<html> Modificado à um dia por Pedro </html>""".encode('utf-8'), tmpl.generate(date='um dia', author="Pedro").render(encoding='utf-8')) def test_i18n_msg_ticket_251_extract(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""><tt><b>Translation[&nbsp;0&nbsp;]</b>: <em>One coin</em></tt></p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( (3, None, u'[1:[2:Translation\\[\xa00\xa0\\]]: [3:One coin]]', []), messages[0] ) def test_i18n_msg_ticket_251_translate(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg=""><tt><b>Translation[&nbsp;0&nbsp;]</b>: <em>One coin</em></tt></p> </html>""") translations = DummyTranslations({ u'[1:[2:Translation\\[\xa00\xa0\\]]: [3:One coin]]': u'[1:[2:Trandução\\[\xa00\xa0\\]]: [3:Uma moeda]]' }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual(u"""<html> <p><tt><b>Trandução[ 0 ]</b>: <em>Uma moeda</em></tt></p> </html>""".encode('utf-8'), tmpl.generate().render(encoding='utf-8')) def test_extract_i18n_msg_with_other_directives_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:with="q = quote_plus(message[:80])">Before you do that, though, please first try <strong><a href="${trac.homepage}search?ticket=yes&amp;noquickjump=1&amp;q=$q">searching</a> for similar issues</strong>, as it is quite likely that this problem has been reported before. For questions about installation and configuration of Trac, please try the <a href="${trac.homepage}wiki/MailingList">mailing list</a> instead of filing a ticket. </p> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( 'Before you do that, though, please first try\n ' '[1:[2:searching]\n for similar issues], as it is ' 'quite likely that this problem\n has been reported ' 'before. For questions about installation\n and ' 'configuration of Trac, please try the\n ' '[3:mailing list]\n instead of filing a ticket.', messages[0][2] ) def test_translate_i18n_msg_with_other_directives_nested(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Before you do that, though, please first try <strong><a href="${trac.homepage}search?ticket=yes&amp;noquickjump=1&amp;q=q">searching</a> for similar issues</strong>, as it is quite likely that this problem has been reported before. For questions about installation and configuration of Trac, please try the <a href="${trac.homepage}wiki/MailingList">mailing list</a> instead of filing a ticket. </p> </html>""") translations = DummyTranslations({ 'Before you do that, though, please first try\n ' '[1:[2:searching]\n for similar issues], as it is ' 'quite likely that this problem\n has been reported ' 'before. For questions about installation\n and ' 'configuration of Trac, please try the\n ' '[3:mailing list]\n instead of filing a ticket.': u'Antes de o fazer, porém,\n ' u'[1:por favor tente [2:procurar]\n por problemas semelhantes], uma vez que ' u'é muito provável que este problema\n já tenha sido reportado ' u'anteriormente. Para questões relativas à instalação\n e ' u'configuração do Trac, por favor tente a\n ' u'[3:mailing list]\n em vez de criar um assunto.' }) translator = Translator(translations) translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) ctx = Context() ctx.push({'trac': {'homepage': 'http://trac.edgewall.org/'}}) self.assertEqual(u"""<html> <p>Antes de o fazer, porém, <strong>por favor tente <a href="http://trac.edgewall.org/search?ticket=yes&amp;noquickjump=1&amp;q=q">procurar</a> por problemas semelhantes</strong>, uma vez que é muito provável que este problema já tenha sido reportado anteriormente. Para questões relativas à instalação e configuração do Trac, por favor tente a <a href="http://trac.edgewall.org/wiki/MailingList">mailing list</a> em vez de criar um assunto.</p> </html>""", tmpl.generate(ctx).render()) def test_i18n_msg_with_other_nested_directives_with_reordered_content(self): # See: http://genshi.edgewall.org/ticket/300#comment:10 tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p py:if="not editable" class="hint" i18n:msg=""> <strong>Note:</strong> This repository is defined in <code><a href="${ 'href.wiki(TracIni)' }">trac.ini</a></code> and cannot be edited on this page. </p> </html>""") translations = DummyTranslations({ '[1:Note:] This repository is defined in\n ' '[2:[3:trac.ini]]\n and cannot be edited on this page.': u'[1:Nota:] Este repositório está definido em \n ' u'[2:[3:trac.ini]]\n e não pode ser editado nesta página.', }) translator = Translator(translations) translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual( '[1:Note:] This repository is defined in\n ' '[2:[3:trac.ini]]\n and cannot be edited on this page.', messages[0][2] ) self.assertEqual(u"""<html> <p class="hint"><strong>Nota:</strong> Este repositório está definido em <code><a href="href.wiki(TracIni)">trac.ini</a></code> e não pode ser editado nesta página.</p> </html>""".encode('utf-8'), tmpl.generate(editable=False).render(encoding='utf-8')) def test_extract_i18n_msg_with_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:strip=""> Please see <a href="help.html">Help</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Please see [1:Help] for details.', []), messages[0]) def test_extract_i18n_msg_with_py_strip_and_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" py:strip="" i18n:comment="Foo"> Please see <a href="help.html">Help</a> for details. </p> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, None, 'Please see [1:Help] for details.', ['Foo']), messages[0]) def test_translate_i18n_msg_and_comment_with_py_strip_directives(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:comment="As in foo bar" py:strip="">Foo</p> <p py:strip="" i18n:msg="" i18n:comment="As in foo bar">Foo</p> </html>""") translator = Translator(DummyTranslations({'Foo': 'Voh'})) translator.setup(tmpl) self.assertEqual("""<html> Voh Voh </html>""", tmpl.generate().render()) def test_translate_i18n_msg_ticket_404(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="first,second"> $first <span>$second</span> KEPT <span>Inside a tag</span> tail </p></html>""") translator = Translator(DummyTranslations()) translator.setup(tmpl) self.assertEqual("""<html> <p>FIRST <span>SECOND</span> KEPT <span>Inside a tag</span> tail""" """</p></html>""", tmpl.generate(first="FIRST", second="SECOND").render()) class ChooseDirectiveTestCase(unittest.TestCase): def test_translate_i18n_choose_as_attribute(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> <div i18n:choose="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> </html>""") translations = DummyTranslations() translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>FooBar</p> </div> <div> <p>FooBars</p> </div> </html>""", tmpl.generate(one=1, two=2).render()) def test_translate_i18n_choose_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> <i18n:choose numeral="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> </html>""") translations = DummyTranslations() translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>FooBars</p> <p>FooBar</p> </html>""", tmpl.generate(one=1, two=2).render()) def test_translate_i18n_choose_as_directive_singular_and_plural_with_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two"> <p i18n:singular="" py:strip="">FooBar Singular with Strip</p> <p i18n:plural="">FooBars Plural without Strip</p> </i18n:choose> <i18n:choose numeral="two"> <p i18n:singular="">FooBar singular without strip</p> <p i18n:plural="" py:strip="">FooBars plural with strip</p> </i18n:choose> <i18n:choose numeral="one"> <p i18n:singular="">FooBar singular without strip</p> <p i18n:plural="" py:strip="">FooBars plural with strip</p> </i18n:choose> <i18n:choose numeral="one"> <p i18n:singular="" py:strip="">FooBar singular with strip</p> <p i18n:plural="">FooBars plural without strip</p> </i18n:choose> </html>""") translations = DummyTranslations() translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>FooBars Plural without Strip</p> FooBars plural with strip <p>FooBar singular without strip</p> FooBar singular with strip </html>""", tmpl.generate(one=1, two=2).render()) def test_translate_i18n_choose_plural_singular_as_directive(self): # Ticket 371 tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two"> <i18n:singular>FooBar</i18n:singular> <i18n:plural>FooBars</i18n:plural> </i18n:choose> <i18n:choose numeral="one"> <i18n:singular>FooBar</i18n:singular> <i18n:plural>FooBars</i18n:plural> </i18n:choose> </html>""") translations = DummyTranslations({ ('FooBar', 0): 'FuBar', ('FooBars', 1): 'FuBars', 'FooBar': 'FuBar', 'FooBars': 'FuBars', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> FuBars FuBar </html>""", tmpl.generate(one=1, two=2).render()) def test_translate_i18n_choose_as_attribute_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_as_attribute_with_params_and_domain_as_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations() translations.add_domain('foo', { ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_as_directive_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Vohs John Doe</p> <p>Voh John Doe</p> </html>""", tmpl.generate(one=1, two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_as_directive_with_params_and_domain_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:domain name="foo"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </i18n:domain> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translations = DummyTranslations() translations.add_domain('foo', { ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Vohs John Doe</p> <p>Foo John Doe</p> </html>""", tmpl.generate(one=1, two=2, fname='John', lname='Doe').render()) def test_extract_i18n_choose_as_attribute(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> <div i18n:choose="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0]) self.assertEqual((7, 'ngettext', ('FooBar', 'FooBars'), []), messages[1]) def test_extract_i18n_choose_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> <i18n:choose numeral="one"> <p i18n:singular="">FooBar</p> <p i18n:plural="">FooBars</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0]) self.assertEqual((7, 'ngettext', ('FooBar', 'FooBars'), []), messages[1]) def test_extract_i18n_choose_as_attribute_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) def test_extract_i18n_choose_as_attribute_with_params_and_domain_as_param(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((4, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) def test_extract_i18n_choose_as_directive_with_params(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) self.assertEqual((7, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[1]) def test_extract_i18n_choose_as_directive_with_params_and_domain_as_directive(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:domain name="foo"> <i18n:choose numeral="two" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </i18n:domain> <i18n:choose numeral="one" params="fname, lname"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(2, len(messages)) self.assertEqual((4, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[0]) self.assertEqual((9, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), []), messages[1]) def test_extract_i18n_choose_as_attribute_with_params_and_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname" i18n:comment="As in Foo Bar"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), ['As in Foo Bar']), messages[0]) def test_extract_i18n_choose_as_directive_with_params_and_comment(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="two" params="fname, lname" i18n:comment="As in Foo Bar"> <p i18n:singular="">Foo ${fname} ${lname}</p> <p i18n:plural="">Foos ${fname} ${lname}</p> </i18n:choose> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s'), ['As in Foo Bar']), messages[0]) def test_extract_i18n_choose_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:choose="num; num" title="Things"> <i18n:singular> There is <a href="$link" title="View thing">${num} thing</a>. </i18n:singular> <i18n:plural> There are <a href="$link" title="View things">${num} things</a>. </i18n:plural> </p> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(4, len(messages)) self.assertEqual((3, None, 'Things', []), messages[0]) self.assertEqual((5, None, 'View thing', []), messages[1]) self.assertEqual((8, None, 'View things', []), messages[2]) self.assertEqual( (3, 'ngettext', ('There is [1:%(num)s thing].', 'There are [1:%(num)s things].'), []), messages[3]) def test_translate_i18n_choose_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:choose="num; num" title="Things"> <i18n:singular> There is <a href="$link" title="View thing">${num} thing</a>. </i18n:singular> <i18n:plural> There are <a href="$link" title="View things">${num} things</a>. </i18n:plural> </p> </html>""") translations = DummyTranslations({ 'Things': 'Sachen', 'View thing': 'Sache betrachten', 'View things': 'Sachen betrachten', ('There is [1:%(num)s thing].', 0): 'Da ist [1:%(num)s Sache].', ('There is [1:%(num)s thing].', 1): 'Da sind [1:%(num)s Sachen].' }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual(u"""<html> <p title="Sachen"> Da ist <a href="/things" title="Sache betrachten">1 Sache</a>. </p> </html>""", tmpl.generate(link="/things", num=1).render(encoding=None)) self.assertEqual(u"""<html> <p title="Sachen"> Da sind <a href="/things" title="Sachen betrachten">3 Sachen</a>. </p> </html>""", tmpl.generate(link="/things", num=3).render(encoding=None)) def test_extract_i18n_choose_as_element_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="num" params="num"> <p i18n:singular="" title="Things"> There is <a href="$link" title="View thing">${num} thing</a>. </p> <p i18n:plural="" title="Things"> There are <a href="$link" title="View things">${num} things</a>. </p> </i18n:choose> </html>""") translator = Translator() translator.setup(tmpl) messages = list(translator.extract(tmpl.stream)) self.assertEqual(5, len(messages)) self.assertEqual((4, None, 'Things', []), messages[0]) self.assertEqual((5, None, 'View thing', []), messages[1]) self.assertEqual((7, None, 'Things', []), messages[2]) self.assertEqual((8, None, 'View things', []), messages[3]) self.assertEqual( (3, 'ngettext', ('There is [1:%(num)s thing].', 'There are [1:%(num)s things].'), []), messages[4]) def test_translate_i18n_choose_as_element_with_attributes(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:choose numeral="num" params="num"> <p i18n:singular="" title="Things"> There is <a href="$link" title="View thing">${num} thing</a>. </p> <p i18n:plural="" title="Things"> There are <a href="$link" title="View things">${num} things</a>. </p> </i18n:choose> </html>""") translations = DummyTranslations({ 'Things': 'Sachen', 'View thing': 'Sache betrachten', 'View things': 'Sachen betrachten', ('There is [1:%(num)s thing].', 0): 'Da ist [1:%(num)s Sache].', ('There is [1:%(num)s thing].', 1): 'Da sind [1:%(num)s Sachen].' }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual(u"""<html> <p title="Sachen">Da ist <a href="/things" title="Sache betrachten">1 Sache</a>.</p> </html>""", tmpl.generate(link="/things", num=1).render(encoding=None)) self.assertEqual(u"""<html> <p title="Sachen">Da sind <a href="/things" title="Sachen betrachten">3 Sachen</a>.</p> </html>""", tmpl.generate(link="/things", num=3).render(encoding=None)) def test_translate_i18n_choose_and_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_and_domain_and_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <div i18n:choose="two; fname, lname"> <p i18n:singular="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations() translations.add_domain('foo', { ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> </html>""", tmpl.generate(two=2, fname='John', lname='Doe').render()) def test_translate_i18n_choose_and_singular_with_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="" py:strip="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> <div i18n:choose="one; fname, lname"> <p i18n:singular="" py:strip="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>Vohs John Doe</p> </div> <div> Voh John Doe </div> </html>""", tmpl.generate( one=1, two=2, fname='John',lname='Doe').render()) def test_translate_i18n_choose_and_plural_with_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="two; fname, lname"> <p i18n:singular="" py:strip="">Foo $fname $lname</p> <p i18n:plural="">Foos $fname $lname</p> </div> </html>""") translations = DummyTranslations({ ('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s', ('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s', 'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s', 'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s', }) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> Voh John Doe </div> </html>""", tmpl.generate(two=1, fname='John', lname='Doe').render()) def test_extract_i18n_choose_as_attribute_and_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:choose="one" py:strip=""> <p i18n:singular="" py:strip="">FooBar</p> <p i18n:plural="" py:strip="">FooBars</p> </div> </html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(1, len(messages)) self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0]) class DomainDirectiveTestCase(unittest.TestCase): def test_translate_i18n_domain_with_msg_directives(self): #"""translate with i18n:domain and nested i18n:msg directives """ tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:msg="">Bar</p> </div> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <div> <p>BarFoo</p> <p>PT_Foo</p> </div> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_inline_directives(self): #"""translate with inlined i18n:domain and i18n:msg directives""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="" i18n:domain="foo">FooBar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>BarFoo</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_without_msg_directives(self): #"""translate domain call without i18n:msg directives still uses current domain""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Bar</p> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:msg="">Bar</p> <p>Bar</p> </div> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> <div> <p>BarFoo</p> <p>PT_Foo</p> <p>PT_Foo</p> </div> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_as_directive_not_attribute(self): #"""translate with domain as directive""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <i18n:domain name="foo"> <p i18n:msg="">FooBar</p> <p i18n:msg="">Bar</p> <p>Bar</p> </i18n:domain> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>BarFoo</p> <p>PT_Foo</p> <p>PT_Foo</p> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_nested_directives(self): #"""translate with nested i18n:domain directives""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Bar</p> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:domain="bar" i18n:msg="">Bar</p> <p>Bar</p> </div> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> <div> <p>BarFoo</p> <p>bar_Bar</p> <p>foo_Bar</p> </div> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_empty_nested_domain_directive(self): #"""translate with empty nested i18n:domain directive does not use dngettext""" tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n"> <p i18n:msg="">Bar</p> <div i18n:domain="foo"> <p i18n:msg="">FooBar</p> <p i18n:domain="" i18n:msg="">Bar</p> <p>Bar</p> </div> <p>Bar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>Voh</p> <div> <p>BarFoo</p> <p>Voh</p> <p>foo_Bar</p> </div> <p>Voh</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_inline_directive_on_START_NS(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <p i18n:msg="">FooBar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual("""<html> <p>BarFoo</p> </html>""", tmpl.generate().render()) def test_translate_i18n_domain_with_inline_directive_on_START_NS_with_py_strip(self): tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo" py:strip=""> <p i18n:msg="">FooBar</p> </html>""") translations = DummyTranslations({'Bar': 'Voh'}) translations.add_domain('foo', {'FooBar': 'BarFoo'}) translator = Translator(translations) translator.setup(tmpl) self.assertEqual(""" <p>BarFoo</p> """, tmpl.generate().render()) def test_translate_i18n_domain_with_nested_includes(self): import os, shutil, tempfile from genshi.template.loader import TemplateLoader dirname = tempfile.mkdtemp(suffix='genshi_test') try: for idx in range(7): file1 = open(os.path.join(dirname, 'tmpl%d.html' % idx), 'w') try: file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" py:strip=""> <div>Included tmpl$idx</div> <p i18n:msg="idx">Bar $idx</p> <p i18n:domain="bar">Bar</p> <p i18n:msg="idx" i18n:domain="">Bar $idx</p> <p i18n:domain="" i18n:msg="idx">Bar $idx</p> <py:if test="idx &lt; 6"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </py:if> </html>""") finally: file1.close() file2 = open(os.path.join(dirname, 'tmpl10.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </html>""") finally: file2.close() def callback(template): translations = DummyTranslations({'Bar %(idx)s': 'Voh %(idx)s'}) translations.add_domain('foo', {'Bar %(idx)s': 'foo_Bar %(idx)s'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) translator.setup(template) loader = TemplateLoader([dirname], callback=callback) tmpl = loader.load('tmpl10.html') self.assertEqual("""<html> <div>Included tmpl0</div> <p>foo_Bar 0</p> <p>bar_Bar</p> <p>Voh 0</p> <p>Voh 0</p> <div>Included tmpl1</div> <p>foo_Bar 1</p> <p>bar_Bar</p> <p>Voh 1</p> <p>Voh 1</p> <div>Included tmpl2</div> <p>foo_Bar 2</p> <p>bar_Bar</p> <p>Voh 2</p> <p>Voh 2</p> <div>Included tmpl3</div> <p>foo_Bar 3</p> <p>bar_Bar</p> <p>Voh 3</p> <p>Voh 3</p> <div>Included tmpl4</div> <p>foo_Bar 4</p> <p>bar_Bar</p> <p>Voh 4</p> <p>Voh 4</p> <div>Included tmpl5</div> <p>foo_Bar 5</p> <p>bar_Bar</p> <p>Voh 5</p> <p>Voh 5</p> <div>Included tmpl6</div> <p>foo_Bar 6</p> <p>bar_Bar</p> <p>Voh 6</p> <p>Voh 6</p> </html>""", tmpl.generate(idx=-1).render()) finally: shutil.rmtree(dirname) def test_translate_i18n_domain_with_nested_includes_with_translatable_attrs(self): import os, shutil, tempfile from genshi.template.loader import TemplateLoader dirname = tempfile.mkdtemp(suffix='genshi_test') try: for idx in range(4): file1 = open(os.path.join(dirname, 'tmpl%d.html' % idx), 'w') try: file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" py:strip=""> <div>Included tmpl$idx</div> <p title="${dg('foo', 'Bar %(idx)s') % dict(idx=idx)}" i18n:msg="idx">Bar $idx</p> <p title="Bar" i18n:domain="bar">Bar</p> <p title="Bar" i18n:msg="idx" i18n:domain="">Bar $idx</p> <p i18n:msg="idx" i18n:domain="" title="Bar">Bar $idx</p> <p i18n:domain="" i18n:msg="idx" title="Bar">Bar $idx</p> <py:if test="idx &lt; 3"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </py:if> </html>""") finally: file1.close() file2 = open(os.path.join(dirname, 'tmpl10.html'), 'w') try: file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/" xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo"> <xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/> </html>""") finally: file2.close() translations = DummyTranslations({'Bar %(idx)s': 'Voh %(idx)s', 'Bar': 'Voh'}) translations.add_domain('foo', {'Bar %(idx)s': 'foo_Bar %(idx)s'}) translations.add_domain('bar', {'Bar': 'bar_Bar'}) translator = Translator(translations) def callback(template): translator.setup(template) loader = TemplateLoader([dirname], callback=callback) tmpl = loader.load('tmpl10.html') if IS_PYTHON2: dgettext = translations.dugettext else: dgettext = translations.dgettext self.assertEqual("""<html> <div>Included tmpl0</div> <p title="foo_Bar 0">foo_Bar 0</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 0</p> <p title="Voh">Voh 0</p> <p title="Voh">Voh 0</p> <div>Included tmpl1</div> <p title="foo_Bar 1">foo_Bar 1</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 1</p> <p title="Voh">Voh 1</p> <p title="Voh">Voh 1</p> <div>Included tmpl2</div> <p title="foo_Bar 2">foo_Bar 2</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 2</p> <p title="Voh">Voh 2</p> <p title="Voh">Voh 2</p> <div>Included tmpl3</div> <p title="foo_Bar 3">foo_Bar 3</p> <p title="bar_Bar">bar_Bar</p> <p title="Voh">Voh 3</p> <p title="Voh">Voh 3</p> <p title="Voh">Voh 3</p> </html>""", tmpl.generate(idx=-1, dg=dgettext).render()) finally: shutil.rmtree(dirname) class ExtractTestCase(unittest.TestCase): def test_markup_template_extraction(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <head> <title>Example</title> </head> <body> <h1>Example</h1> <p>${_("Hello, %(name)s") % dict(name=username)}</p> <p>${ngettext("You have %d item", "You have %d items", num)}</p> </body> </html>""") results = list(extract(buf, ['_', 'ngettext'], [], {})) self.assertEqual([ (3, None, 'Example', []), (6, None, 'Example', []), (7, '_', 'Hello, %(name)s', []), (8, 'ngettext', ('You have %d item', 'You have %d items', None), []), ], results) def test_extraction_without_text(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <p title="Bar">Foo</p> ${ngettext("Singular", "Plural", num)} </html>""") results = list(extract(buf, ['_', 'ngettext'], [], { 'extract_text': 'no' })) self.assertEqual([ (3, 'ngettext', ('Singular', 'Plural', None), []), ], results) def test_text_template_extraction(self): buf = StringIO("""${_("Dear %(name)s") % {'name': name}}, ${ngettext("Your item:", "Your items", len(items))} #for item in items * $item #end All the best, Foobar""") results = list(extract(buf, ['_', 'ngettext'], [], { 'template_class': 'genshi.template:TextTemplate' })) self.assertEqual([ (1, '_', 'Dear %(name)s', []), (3, 'ngettext', ('Your item:', 'Your items', None), []), (7, None, 'All the best,\n Foobar', []) ], results) def test_extraction_with_keyword_arg(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> ${gettext('Foobar', foo='bar')} </html>""") results = list(extract(buf, ['gettext'], [], {})) self.assertEqual([ (2, 'gettext', ('Foobar'), []), ], results) def test_extraction_with_nonstring_arg(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> ${dgettext(curdomain, 'Foobar')} </html>""") results = list(extract(buf, ['dgettext'], [], {})) self.assertEqual([ (2, 'dgettext', (None, 'Foobar'), []), ], results) def test_extraction_inside_ignored_tags(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <script type="text/javascript"> $('#llist').tabs({ remote: true, spinner: "${_('Please wait...')}" }); </script> </html>""") results = list(extract(buf, ['_'], [], {})) self.assertEqual([ (5, '_', 'Please wait...', []), ], results) def test_extraction_inside_ignored_tags_with_directives(self): buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/"> <script type="text/javascript"> <py:if test="foobar"> alert("This shouldn't be extracted"); </py:if> </script> </html>""") self.assertEqual([], list(extract(buf, ['_'], [], {}))) def test_extract_py_def_directive_with_py_strip(self): # Failed extraction from Trac tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" py:strip=""> <py:def function="diff_options_fields(diff)"> <label for="style">View differences</label> <select id="style" name="style"> <option selected="${diff.style == 'inline' or None}" value="inline">inline</option> <option selected="${diff.style == 'sidebyside' or None}" value="sidebyside">side by side</option> </select> <div class="field"> Show <input type="text" name="contextlines" id="contextlines" size="2" maxlength="3" value="${diff.options.contextlines &lt; 0 and 'all' or diff.options.contextlines}" /> <label for="contextlines">lines around each change</label> </div> <fieldset id="ignore" py:with="options = diff.options"> <legend>Ignore:</legend> <div class="field"> <input type="checkbox" id="ignoreblanklines" name="ignoreblanklines" checked="${options.ignoreblanklines or None}" /> <label for="ignoreblanklines">Blank lines</label> </div> <div class="field"> <input type="checkbox" id="ignorecase" name="ignorecase" checked="${options.ignorecase or None}" /> <label for="ignorecase">Case changes</label> </div> <div class="field"> <input type="checkbox" id="ignorewhitespace" name="ignorewhitespace" checked="${options.ignorewhitespace or None}" /> <label for="ignorewhitespace">White space changes</label> </div> </fieldset> <div class="buttons"> <input type="submit" name="update" value="${_('Update')}" /> </div> </py:def></html>""") translator = Translator() tmpl.add_directives(Translator.NAMESPACE, translator) messages = list(translator.extract(tmpl.stream)) self.assertEqual(10, len(messages)) self.assertEqual([ (3, None, 'View differences', []), (6, None, 'inline', []), (8, None, 'side by side', []), (10, None, 'Show', []), (13, None, 'lines around each change', []), (16, None, 'Ignore:', []), (20, None, 'Blank lines', []), (25, None, 'Case changes',[]), (30, None, 'White space changes', []), (34, '_', 'Update', [])], messages) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(Translator.__module__)) suite.addTest(unittest.makeSuite(TranslatorTestCase, 'test')) suite.addTest(unittest.makeSuite(MsgDirectiveTestCase, 'test')) suite.addTest(unittest.makeSuite(ChooseDirectiveTestCase, 'test')) suite.addTest(unittest.makeSuite(DomainDirectiveTestCase, 'test')) suite.addTest(unittest.makeSuite(ExtractTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
bsd-3-clause
mohamed--abdel-maksoud/chromium.src
chrome/browser/chromeos/login/version_info_updater.cc
5419
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/login/version_info_updater.h" #include <vector> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "base/sys_info.h" #include "base/task_runner_util.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/chromeos/policy/browser_policy_connector_chromeos.h" #include "chrome/browser/chromeos/policy/device_cloud_policy_manager_chromeos.h" #include "chrome/browser/chromeos/settings/cros_settings.h" #include "chrome/common/chrome_version_info.h" #include "chrome/grit/chromium_strings.h" #include "chrome/grit/generated_resources.h" #include "chromeos/settings/cros_settings_names.h" #include "content/public/browser/browser_thread.h" #include "ui/base/l10n/l10n_util.h" namespace chromeos { namespace { const char* const kReportingFlags[] = { chromeos::kReportDeviceVersionInfo, chromeos::kReportDeviceActivityTimes, chromeos::kReportDeviceBootMode, chromeos::kReportDeviceLocation, }; // Strings used to generate the serial number part of the version string. const char kSerialNumberPrefix[] = "SN:"; } // namespace /////////////////////////////////////////////////////////////////////////////// // VersionInfoUpdater public: VersionInfoUpdater::VersionInfoUpdater(Delegate* delegate) : cros_settings_(chromeos::CrosSettings::Get()), delegate_(delegate), weak_pointer_factory_(this) { } VersionInfoUpdater::~VersionInfoUpdater() { policy::BrowserPolicyConnectorChromeOS* connector = g_browser_process->platform_part()->browser_policy_connector_chromeos(); policy::DeviceCloudPolicyManagerChromeOS* policy_manager = connector->GetDeviceCloudPolicyManager(); if (policy_manager) policy_manager->core()->store()->RemoveObserver(this); } void VersionInfoUpdater::StartUpdate(bool is_official_build) { if (base::SysInfo::IsRunningOnChromeOS()) { base::PostTaskAndReplyWithResult( content::BrowserThread::GetBlockingPool(), FROM_HERE, base::Bind(&version_loader::GetVersion, is_official_build ? version_loader::VERSION_SHORT_WITH_DATE : version_loader::VERSION_FULL), base::Bind(&VersionInfoUpdater::OnVersion, weak_pointer_factory_.GetWeakPtr())); } else { UpdateVersionLabel(); } policy::BrowserPolicyConnectorChromeOS* connector = g_browser_process->platform_part()->browser_policy_connector_chromeos(); policy::DeviceCloudPolicyManagerChromeOS* policy_manager = connector->GetDeviceCloudPolicyManager(); if (policy_manager) { policy_manager->core()->store()->AddObserver(this); // Ensure that we have up-to-date enterprise info in case enterprise policy // is already fetched and has finished initialization. UpdateEnterpriseInfo(); } // Watch for changes to the reporting flags. base::Closure callback = base::Bind(&VersionInfoUpdater::UpdateEnterpriseInfo, base::Unretained(this)); for (unsigned int i = 0; i < arraysize(kReportingFlags); ++i) { subscriptions_.push_back( cros_settings_->AddSettingsObserver(kReportingFlags[i], callback).release()); } } void VersionInfoUpdater::UpdateVersionLabel() { if (version_text_.empty()) return; UpdateSerialNumberInfo(); chrome::VersionInfo version_info; std::string label_text = l10n_util::GetStringFUTF8( IDS_LOGIN_VERSION_LABEL_FORMAT, l10n_util::GetStringUTF16(IDS_PRODUCT_NAME), base::UTF8ToUTF16(version_info.Version()), base::UTF8ToUTF16(version_text_), base::UTF8ToUTF16(serial_number_text_)); // Workaround over incorrect width calculation in old fonts. // TODO(glotov): remove the following line when new fonts are used. label_text += ' '; if (delegate_) delegate_->OnOSVersionLabelTextUpdated(label_text); } void VersionInfoUpdater::UpdateEnterpriseInfo() { policy::BrowserPolicyConnectorChromeOS* connector = g_browser_process->platform_part()->browser_policy_connector_chromeos(); SetEnterpriseInfo(connector->GetEnterpriseDomain()); } void VersionInfoUpdater::SetEnterpriseInfo(const std::string& domain_name) { // Update the notification about device status reporting. if (delegate_ && !domain_name.empty()) { std::string enterprise_info; enterprise_info = l10n_util::GetStringFUTF8( IDS_DEVICE_OWNED_BY_NOTICE, base::UTF8ToUTF16(domain_name)); delegate_->OnEnterpriseInfoUpdated(enterprise_info); } } void VersionInfoUpdater::UpdateSerialNumberInfo() { std::string sn = policy::DeviceCloudPolicyManagerChromeOS::GetMachineID(); if (!sn.empty()) { serial_number_text_ = kSerialNumberPrefix; serial_number_text_.append(sn); } } void VersionInfoUpdater::OnVersion(const std::string& version) { version_text_ = version; UpdateVersionLabel(); } void VersionInfoUpdater::OnStoreLoaded(policy::CloudPolicyStore* store) { UpdateEnterpriseInfo(); } void VersionInfoUpdater::OnStoreError(policy::CloudPolicyStore* store) { UpdateEnterpriseInfo(); } } // namespace chromeos
bsd-3-clause
annulen/premake-dev-rgeary
src/host/luajit-2.0/src/lj_jit.h
15241
/* ** Common definitions for the JIT compiler. ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h */ #ifndef _LJ_JIT_H #define _LJ_JIT_H #include "lj_obj.h" #include "lj_ir.h" /* JIT engine flags. */ #define JIT_F_ON 0x00000001 /* CPU-specific JIT engine flags. */ #if LJ_TARGET_X86ORX64 #define JIT_F_CMOV 0x00000010 #define JIT_F_SSE2 0x00000020 #define JIT_F_SSE3 0x00000040 #define JIT_F_SSE4_1 0x00000080 #define JIT_F_P4 0x00000100 #define JIT_F_PREFER_IMUL 0x00000200 #define JIT_F_SPLIT_XMM 0x00000400 #define JIT_F_LEA_AGU 0x00000800 /* Names for the CPU-specific flags. Must match the order above. */ #define JIT_F_CPU_FIRST JIT_F_CMOV #define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM" #elif LJ_TARGET_ARM #define JIT_F_ARMV6_ 0x00000010 #define JIT_F_ARMV6T2_ 0x00000020 #define JIT_F_ARMV7 0x00000040 #define JIT_F_VFPV2 0x00000080 #define JIT_F_VFPV3 0x00000100 #define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7) #define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7) #define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3) /* Names for the CPU-specific flags. Must match the order above. */ #define JIT_F_CPU_FIRST JIT_F_ARMV6_ #define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3" #elif LJ_TARGET_PPC #define JIT_F_SQRT 0x00000010 #define JIT_F_ROUND 0x00000020 /* Names for the CPU-specific flags. Must match the order above. */ #define JIT_F_CPU_FIRST JIT_F_SQRT #define JIT_F_CPUSTRING "\4SQRT\5ROUND" #elif LJ_TARGET_MIPS #define JIT_F_MIPS32R2 0x00000010 /* Names for the CPU-specific flags. Must match the order above. */ #define JIT_F_CPU_FIRST JIT_F_MIPS32R2 #define JIT_F_CPUSTRING "\010MIPS32R2" #else #define JIT_F_CPU_FIRST 0 #define JIT_F_CPUSTRING "" #endif /* Optimization flags. */ #define JIT_F_OPT_MASK 0x0fff0000 #define JIT_F_OPT_FOLD 0x00010000 #define JIT_F_OPT_CSE 0x00020000 #define JIT_F_OPT_DCE 0x00040000 #define JIT_F_OPT_FWD 0x00080000 #define JIT_F_OPT_DSE 0x00100000 #define JIT_F_OPT_NARROW 0x00200000 #define JIT_F_OPT_LOOP 0x00400000 #define JIT_F_OPT_ABC 0x00800000 #define JIT_F_OPT_SINK 0x01000000 #define JIT_F_OPT_FUSE 0x02000000 /* Optimizations names for -O. Must match the order above. */ #define JIT_F_OPT_FIRST JIT_F_OPT_FOLD #define JIT_F_OPTSTRING \ "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse" /* Optimization levels set a fixed combination of flags. */ #define JIT_F_OPT_0 0 #define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE) #define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP) #define JIT_F_OPT_3 (JIT_F_OPT_2|\ JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE) #define JIT_F_OPT_DEFAULT JIT_F_OPT_3 #if LJ_TARGET_WINDOWS || LJ_64 /* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */ #define JIT_P_sizemcode_DEFAULT 64 #else /* Could go as low as 4K, but the mmap() overhead would be rather high. */ #define JIT_P_sizemcode_DEFAULT 32 #endif /* Optimization parameters and their defaults. Length is a char in octal! */ #define JIT_PARAMDEF(_) \ _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \ _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \ _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \ _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \ _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \ \ _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \ _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \ _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \ \ _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \ _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \ _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \ _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \ \ /* Size of each machine code area (in KBytes). */ \ _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \ /* Max. total size of all machine code areas (in KBytes). */ \ _(\010, maxmcode, 512) \ /* End of list. */ enum { #define JIT_PARAMENUM(len, name, value) JIT_P_##name, JIT_PARAMDEF(JIT_PARAMENUM) #undef JIT_PARAMENUM JIT_P__MAX }; #define JIT_PARAMSTR(len, name, value) #len #name #define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR) /* Trace compiler state. */ typedef enum { LJ_TRACE_IDLE, /* Trace compiler idle. */ LJ_TRACE_ACTIVE = 0x10, LJ_TRACE_RECORD, /* Bytecode recording active. */ LJ_TRACE_START, /* New trace started. */ LJ_TRACE_END, /* End of trace. */ LJ_TRACE_ASM, /* Assemble trace. */ LJ_TRACE_ERR /* Trace aborted with error. */ } TraceState; /* Post-processing action. */ typedef enum { LJ_POST_NONE, /* No action. */ LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */ LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */ LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */ LJ_POST_FIXBOOL, /* Fixup boolean result. */ LJ_POST_FIXCONST, /* Fixup constant results. */ LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */ } PostProc; /* Machine code type. */ #if LJ_TARGET_X86ORX64 typedef uint8_t MCode; #else typedef uint32_t MCode; #endif /* Stack snapshot header. */ typedef struct SnapShot { uint16_t mapofs; /* Offset into snapshot map. */ IRRef1 ref; /* First IR ref for this snapshot. */ uint8_t nslots; /* Number of valid slots. */ uint8_t topslot; /* Maximum frame extent. */ uint8_t nent; /* Number of compressed entries. */ uint8_t count; /* Count of taken exits for this snapshot. */ } SnapShot; #define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */ /* Compressed snapshot entry. */ typedef uint32_t SnapEntry; #define SNAP_FRAME 0x010000 /* Frame slot. */ #define SNAP_CONT 0x020000 /* Continuation slot. */ #define SNAP_NORESTORE 0x040000 /* No need to restore slot. */ #define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */ LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME); LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT); #define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref)) #define SNAP_TR(slot, tr) \ (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK))) #define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc)) #define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz)) #define snap_ref(sn) ((sn) & 0xffff) #define snap_slot(sn) ((BCReg)((sn) >> 24)) #define snap_isframe(sn) ((sn) & SNAP_FRAME) #define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn)) #define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref)) /* Snapshot and exit numbers. */ typedef uint32_t SnapNo; typedef uint32_t ExitNo; /* Trace number. */ typedef uint32_t TraceNo; /* Used to pass around trace numbers. */ typedef uint16_t TraceNo1; /* Stored trace number. */ /* Type of link. ORDER LJ_TRLINK */ typedef enum { LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */ LJ_TRLINK_ROOT, /* Link to other root trace. */ LJ_TRLINK_LOOP, /* Loop to same trace. */ LJ_TRLINK_TAILREC, /* Tail-recursion. */ LJ_TRLINK_UPREC, /* Up-recursion. */ LJ_TRLINK_DOWNREC, /* Down-recursion. */ LJ_TRLINK_INTERP, /* Fallback to interpreter. */ LJ_TRLINK_RETURN /* Return to interpreter. */ } TraceLink; /* Trace object. */ typedef struct GCtrace { GCHeader; uint8_t topslot; /* Top stack slot already checked to be allocated. */ uint8_t linktype; /* Type of link. */ IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */ GCRef gclist; IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */ IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */ uint16_t nsnap; /* Number of snapshots. */ uint16_t nsnapmap; /* Number of snapshot map elements. */ SnapShot *snap; /* Snapshot array. */ SnapEntry *snapmap; /* Snapshot map. */ GCRef startpt; /* Starting prototype. */ MRef startpc; /* Bytecode PC of starting instruction. */ BCIns startins; /* Original bytecode of starting instruction. */ MSize szmcode; /* Size of machine code. */ MCode *mcode; /* Start of machine code. */ MSize mcloop; /* Offset of loop start in machine code. */ uint16_t nchild; /* Number of child traces (root trace only). */ uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */ TraceNo1 traceno; /* Trace number. */ TraceNo1 link; /* Linked trace (or self for loops). */ TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */ TraceNo1 nextroot; /* Next root trace for same prototype. */ TraceNo1 nextside; /* Next side trace of same root trace. */ uint8_t sinktags; /* Trace has SINK tags. */ uint8_t unused1; #ifdef LUAJIT_USE_GDBJIT void *gdbjit_entry; /* GDB JIT entry. */ #endif } GCtrace; #define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o)) #define traceref(J, n) \ check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)])) LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist)); static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap) { if (snap+1 == &T->snap[T->nsnap]) return T->nsnapmap; else return (snap+1)->mapofs; } /* Round-robin penalty cache for bytecodes leading to aborted traces. */ typedef struct HotPenalty { MRef pc; /* Starting bytecode PC. */ uint16_t val; /* Penalty value, i.e. hotcount start. */ uint16_t reason; /* Abort reason (really TraceErr). */ } HotPenalty; #define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */ #define PENALTY_MIN (36*2) /* Minimum penalty value. */ #define PENALTY_MAX 60000 /* Maximum penalty value. */ #define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */ /* Round-robin backpropagation cache for narrowing conversions. */ typedef struct BPropEntry { IRRef1 key; /* Key: original reference. */ IRRef1 val; /* Value: reference after conversion. */ IRRef mode; /* Mode for this entry (currently IRCONV_*). */ } BPropEntry; /* Number of slots for the backpropagation cache. Must be a power of 2. */ #define BPROP_SLOTS 16 /* Scalar evolution analysis cache. */ typedef struct ScEvEntry { IRRef1 idx; /* Index reference. */ IRRef1 start; /* Constant start reference. */ IRRef1 stop; /* Constant stop reference. */ IRRef1 step; /* Constant step reference. */ IRType1 t; /* Scalar type. */ uint8_t dir; /* Direction. 1: +, 0: -. */ } ScEvEntry; /* 128 bit SIMD constants. */ enum { LJ_KSIMD_ABS, LJ_KSIMD_NEG, LJ_KSIMD__MAX }; /* Get 16 byte aligned pointer to SIMD constant. */ #define LJ_KSIMD(J, n) \ ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15)) /* Set/reset flag to activate the SPLIT pass for the current trace. */ #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) #define lj_needsplit(J) (J->needsplit = 1) #define lj_resetsplit(J) (J->needsplit = 0) #else #define lj_needsplit(J) UNUSED(J) #define lj_resetsplit(J) UNUSED(J) #endif /* Fold state is used to fold instructions on-the-fly. */ typedef struct FoldState { IRIns ins; /* Currently emitted instruction. */ IRIns left; /* Instruction referenced by left operand. */ IRIns right; /* Instruction referenced by right operand. */ } FoldState; /* JIT compiler state. */ typedef struct jit_State { GCtrace cur; /* Current trace. */ lua_State *L; /* Current Lua state. */ const BCIns *pc; /* Current PC. */ GCfunc *fn; /* Current function. */ GCproto *pt; /* Current prototype. */ TRef *base; /* Current frame base, points into J->slots. */ uint32_t flags; /* JIT engine flags. */ BCReg maxslot; /* Relative to baseslot. */ BCReg baseslot; /* Current frame base, offset into J->slots. */ uint8_t mergesnap; /* Allowed to merge with next snapshot. */ uint8_t needsnap; /* Need snapshot before recording next bytecode. */ IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */ uint8_t bcskip; /* Number of bytecode instructions to skip. */ FoldState fold; /* Fold state. */ const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */ MSize bc_extent; /* Extent of the range. */ TraceState state; /* Trace compiler state. */ int32_t instunroll; /* Unroll counter for instable loops. */ int32_t loopunroll; /* Unroll counter for loop ops in side traces. */ int32_t tailcalled; /* Number of successive tailcalls. */ int32_t framedepth; /* Current frame depth. */ int32_t retdepth; /* Return frame depth (count of RETF). */ MRef k64; /* Pointer to chained array of 64 bit constants. */ TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */ IRRef irtoplim; /* Upper limit of instuction buffer (biased). */ IRRef irbotlim; /* Lower limit of instuction buffer (biased). */ IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */ MSize sizesnap; /* Size of temp. snapshot buffer. */ SnapShot *snapbuf; /* Temp. snapshot buffer. */ SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */ MSize sizesnapmap; /* Size of temp. snapshot map buffer. */ PostProc postproc; /* Required post-processing after execution. */ #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) int needsplit; /* Need SPLIT pass. */ #endif GCRef *trace; /* Array of traces. */ TraceNo freetrace; /* Start of scan for next free trace. */ MSize sizetrace; /* Size of trace array. */ IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */ TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */ int32_t param[JIT_P__MAX]; /* JIT engine parameters. */ MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */ HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */ uint32_t penaltyslot; /* Round-robin index into penalty slots. */ uint32_t prngstate; /* PRNG state. */ BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */ uint32_t bpropslot; /* Round-robin index into bpropcache slots. */ ScEvEntry scev; /* Scalar evolution analysis cache slots. */ const BCIns *startpc; /* Bytecode PC of starting instruction. */ TraceNo parent; /* Parent of current side trace (0 for root traces). */ ExitNo exitno; /* Exit number in parent of current side trace. */ BCIns *patchpc; /* PC for pending re-patch. */ BCIns patchins; /* Instruction for pending re-patch. */ int mcprot; /* Protection of current mcode area. */ MCode *mcarea; /* Base of current mcode area. */ MCode *mctop; /* Top of current mcode area. */ MCode *mcbot; /* Bottom of current mcode area. */ size_t szmcarea; /* Size of current mcode area. */ size_t szallmcarea; /* Total size of all allocated mcode areas. */ TValue errinfo; /* Additional info element for trace errors. */ } #if LJ_TARGET_ARM LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */ #endif jit_State; /* Trivial PRNG e.g. used for penalty randomization. */ static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) { /* Yes, this LCG is very weak, but that doesn't matter for our use case. */ J->prngstate = J->prngstate * 1103515245 + 12345; return J->prngstate >> (32-bits); } #endif
bsd-3-clause
Windows-Readiness/WinDevWorkshop
RU/!RU 01. Introduction/01. Lab B. Solution/Exercise 2/SimpleNavigation/Properties/AssemblyInfo.cs
1052
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("SimpleNavigation")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("SimpleNavigation")] [assembly: AssemblyCopyright("Copyright © 2015")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")] [assembly: ComVisible(false)]
mit
kumakoko/KumaGL
third_lib/boost/1.75.0/boost/gil/extension/io/png/detail/writer_backend.hpp
16790
// // Copyright 2012 Christian Henning // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // #ifndef BOOST_GIL_EXTENSION_IO_PNG_DETAIL_WRITER_BACKEND_HPP #define BOOST_GIL_EXTENSION_IO_PNG_DETAIL_WRITER_BACKEND_HPP #include <boost/gil/extension/io/png/tags.hpp> #include <boost/gil/extension/io/png/detail/base.hpp> #include <boost/gil/extension/io/png/detail/supported_types.hpp> #include <boost/gil/io/base.hpp> #include <boost/gil/io/typedefs.hpp> namespace boost { namespace gil { #if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) #pragma warning(push) #pragma warning(disable:4512) //assignment operator could not be generated #pragma warning(disable:4611) //interaction between '_setjmp' and C++ object destruction is non-portable #endif /// /// PNG Writer Backend /// template< typename Device > struct writer_backend< Device , png_tag > : public detail::png_struct_info_wrapper { private: using this_t = writer_backend<Device, png_tag>; public: using format_tag_t = png_tag; /// /// Constructor /// writer_backend( const Device& io_dev , const image_write_info< png_tag >& info ) : png_struct_info_wrapper( false ) , _io_dev( io_dev ) , _info( info ) { // Create and initialize the png_struct with the desired error handler // functions. If you want to use the default stderr and longjump method, // you can supply NULL for the last three parameters. We also check that // the library version is compatible with the one used at compile time, // in case we are using dynamically linked libraries. REQUIRED. get()->_struct = png_create_write_struct( PNG_LIBPNG_VER_STRING , nullptr // user_error_ptr , nullptr // user_error_fn , nullptr // user_warning_fn ); io_error_if( get_struct() == nullptr , "png_writer: fail to call png_create_write_struct()" ); // Allocate/initialize the image information data. REQUIRED get()->_info = png_create_info_struct( get_struct() ); if( get_info() == nullptr ) { png_destroy_write_struct( &get()->_struct , nullptr ); io_error( "png_writer: fail to call png_create_info_struct()" ); } // Set error handling. REQUIRED if you aren't supplying your own // error handling functions in the png_create_write_struct() call. if( setjmp( png_jmpbuf( get_struct() ))) { //free all of the memory associated with the png_ptr and info_ptr png_destroy_write_struct( &get()->_struct , &get()->_info ); io_error( "png_writer: fail to call setjmp()" ); } init_io( get_struct() ); } protected: template< typename View > void write_header( const View& view ) { using png_rw_info_t = detail::png_write_support < typename channel_type<typename get_pixel_type<View>::type>::type, typename color_space_type<View>::type >; // Set the image information here. Width and height are up to 2^31, // bit_depth is one of 1, 2, 4, 8, or 16, but valid values also depend on // the color_type selected. color_type is one of PNG_COLOR_TYPE_GRAY, // PNG_COLOR_TYPE_GRAY_ALPHA, PNG_COLOR_TYPE_PALETTE, PNG_COLOR_TYPE_RGB, // or PNG_COLOR_TYPE_RGB_ALPHA. interlace is either PNG_INTERLACE_NONE or // PNG_INTERLACE_ADAM7, and the compression_type and filter_type MUST // currently be PNG_COMPRESSION_TYPE_BASE and PNG_FILTER_TYPE_BASE. REQUIRED png_set_IHDR( get_struct() , get_info() , static_cast< png_image_width::type >( view.width() ) , static_cast< png_image_height::type >( view.height() ) , static_cast< png_bitdepth::type >( png_rw_info_t::_bit_depth ) , static_cast< png_color_type::type >( png_rw_info_t::_color_type ) , _info._interlace_method , _info._compression_type , _info._filter_method ); #ifdef BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED if( _info._valid_cie_colors ) { png_set_cHRM( get_struct() , get_info() , _info._white_x , _info._white_y , _info._red_x , _info._red_y , _info._green_x , _info._green_y , _info._blue_x , _info._blue_y ); } if( _info._valid_file_gamma ) { png_set_gAMA( get_struct() , get_info() , _info._file_gamma ); } #else if( _info._valid_cie_colors ) { png_set_cHRM_fixed( get_struct() , get_info() , _info._white_x , _info._white_y , _info._red_x , _info._red_y , _info._green_x , _info._green_y , _info._blue_x , _info._blue_y ); } if( _info._valid_file_gamma ) { png_set_gAMA_fixed( get_struct() , get_info() , _info._file_gamma ); } #endif // BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED if( _info._valid_icc_profile ) { #if PNG_LIBPNG_VER_MINOR >= 5 png_set_iCCP( get_struct() , get_info() , const_cast< png_charp >( _info._icc_name.c_str() ) , _info._iccp_compression_type , reinterpret_cast< png_const_bytep >( & (_info._profile.front ()) ) , _info._profile_length ); #else png_set_iCCP( get_struct() , get_info() , const_cast< png_charp >( _info._icc_name.c_str() ) , _info._iccp_compression_type , const_cast< png_charp >( & (_info._profile.front()) ) , _info._profile_length ); #endif } if( _info._valid_intent ) { png_set_sRGB( get_struct() , get_info() , _info._intent ); } if( _info._valid_palette ) { png_set_PLTE( get_struct() , get_info() , const_cast< png_colorp >( &_info._palette.front() ) , _info._num_palette ); } if( _info._valid_background ) { png_set_bKGD( get_struct() , get_info() , const_cast< png_color_16p >( &_info._background ) ); } if( _info._valid_histogram ) { png_set_hIST( get_struct() , get_info() , const_cast< png_uint_16p >( &_info._histogram.front() ) ); } if( _info._valid_offset ) { png_set_oFFs( get_struct() , get_info() , _info._offset_x , _info._offset_y , _info._off_unit_type ); } if( _info._valid_pixel_calibration ) { std::vector< const char* > params( _info._num_params ); for( std::size_t i = 0; i < params.size(); ++i ) { params[i] = _info._params[ i ].c_str(); } png_set_pCAL( get_struct() , get_info() , const_cast< png_charp >( _info._purpose.c_str() ) , _info._X0 , _info._X1 , _info._cal_type , _info._num_params , const_cast< png_charp >( _info._units.c_str() ) , const_cast< png_charpp >( &params.front() ) ); } if( _info._valid_resolution ) { png_set_pHYs( get_struct() , get_info() , _info._res_x , _info._res_y , _info._phy_unit_type ); } if( _info._valid_significant_bits ) { png_set_sBIT( get_struct() , get_info() , const_cast< png_color_8p >( &_info._sig_bits ) ); } #ifndef BOOST_GIL_IO_PNG_1_4_OR_LOWER #ifdef BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED if( _info._valid_scale_factors ) { png_set_sCAL( get_struct() , get_info() , this->_info._scale_unit , this->_info._scale_width , this->_info._scale_height ); } #else #ifdef BOOST_GIL_IO_PNG_FIXED_POINT_SUPPORTED if( _info._valid_scale_factors ) { png_set_sCAL_fixed( get_struct() , get_info() , this->_info._scale_unit , this->_info._scale_width , this->_info._scale_height ); } #else if( _info._valid_scale_factors ) { png_set_sCAL_s( get_struct() , get_info() , this->_info._scale_unit , const_cast< png_charp >( this->_info._scale_width.c_str() ) , const_cast< png_charp >( this->_info._scale_height.c_str() ) ); } #endif // BOOST_GIL_IO_PNG_FIXED_POINT_SUPPORTED #endif // BOOST_GIL_IO_PNG_FLOATING_POINT_SUPPORTED #endif // BOOST_GIL_IO_PNG_1_4_OR_LOWER if( _info._valid_text ) { std::vector< png_text > texts( _info._num_text ); for( std::size_t i = 0; i < texts.size(); ++i ) { png_text pt; pt.compression = _info._text[i]._compression; pt.key = const_cast< png_charp >( this->_info._text[i]._key.c_str() ); pt.text = const_cast< png_charp >( this->_info._text[i]._text.c_str() ); pt.text_length = _info._text[i]._text.length(); texts[i] = pt; } png_set_text( get_struct() , get_info() , &texts.front() , _info._num_text ); } if( _info._valid_modification_time ) { png_set_tIME( get_struct() , get_info() , const_cast< png_timep >( &_info._mod_time ) ); } if( _info._valid_transparency_factors ) { int sample_max = ( 1u << _info._bit_depth ); /* libpng doesn't reject a tRNS chunk with out-of-range samples */ if( !( ( _info._color_type == PNG_COLOR_TYPE_GRAY && (int) _info._trans_values[0].gray > sample_max ) || ( _info._color_type == PNG_COLOR_TYPE_RGB &&( (int) _info._trans_values[0].red > sample_max || (int) _info._trans_values[0].green > sample_max || (int) _info._trans_values[0].blue > sample_max ) ) ) ) { //@todo Fix that once reading transparency values works /* png_set_tRNS( get_struct() , get_info() , trans , num_trans , trans_values ); */ } } // Compression Levels - valid values are [0,9] png_set_compression_level( get_struct() , _info._compression_level ); png_set_compression_mem_level( get_struct() , _info._compression_mem_level ); png_set_compression_strategy( get_struct() , _info._compression_strategy ); png_set_compression_window_bits( get_struct() , _info._compression_window_bits ); png_set_compression_method( get_struct() , _info._compression_method ); png_set_compression_buffer_size( get_struct() , _info._compression_buffer_size ); #ifdef BOOST_GIL_IO_PNG_DITHERING_SUPPORTED // Dithering if( _info._set_dithering ) { png_set_dither( get_struct() , &_info._dithering_palette.front() , _info._dithering_num_palette , _info._dithering_maximum_colors , &_info._dithering_histogram.front() , _info._full_dither ); } #endif // BOOST_GIL_IO_PNG_DITHERING_SUPPORTED // Filter if( _info._set_filter ) { png_set_filter( get_struct() , 0 , _info._filter ); } // Invert Mono if( _info._invert_mono ) { png_set_invert_mono( get_struct() ); } // True Bits if( _info._set_true_bits ) { png_set_sBIT( get_struct() , get_info() , &_info._true_bits.front() ); } // sRGB Intent if( _info._set_srgb_intent ) { png_set_sRGB( get_struct() , get_info() , _info._srgb_intent ); } // Strip Alpha if( _info._strip_alpha ) { png_set_strip_alpha( get_struct() ); } // Swap Alpha if( _info._swap_alpha ) { png_set_swap_alpha( get_struct() ); } png_write_info( get_struct() , get_info() ); } protected: static void write_data( png_structp png_ptr , png_bytep data , png_size_t length ) { static_cast< Device* >( png_get_io_ptr( png_ptr ))->write( data , length ); } static void flush( png_structp png_ptr ) { static_cast< Device* >(png_get_io_ptr(png_ptr) )->flush(); } private: void init_io( png_structp png_ptr ) { png_set_write_fn( png_ptr , static_cast< void* > ( &this->_io_dev ) , static_cast< png_rw_ptr > ( &this_t::write_data ) , static_cast< png_flush_ptr >( &this_t::flush ) ); } public: Device _io_dev; image_write_info< png_tag > _info; }; #if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) #pragma warning(pop) #endif } // namespace gil } // namespace boost #endif
mit
kjg/mail
spec/mail/attachments_list_spec.rb
13083
# encoding: utf-8 require 'spec_helper' def encode_base64(str) Mail::Encodings::Base64.encode(str) end def check_decoded(actual, expected) if RUBY_VERSION >= '1.9' expect(actual.encoding).to eq Encoding::BINARY expect(actual).to eq expected.force_encoding(Encoding::BINARY) else expect(actual).to eq expected end end describe "Attachments" do before(:each) do @mail = Mail.new @test_png = File.open(fixture('attachments', 'test.png'), 'rb', &:read) end describe "from direct content" do it "should work" do @mail.attachments['test.png'] = @test_png expect(@mail.attachments['test.png'].filename).to eq 'test.png' check_decoded(@mail.attachments[0].decoded, @test_png) end it "should work out magically the mime_type" do @mail.attachments['test.png'] = @test_png expect(@mail.attachments[0].mime_type).to eq 'image/png' end it "should assign the filename" do @mail.attachments['test.png'] = @test_png expect(@mail.attachments[0].filename).to eq 'test.png' end it "should assign mime-encoded multibyte filename" do @mail.attachments['てすと.txt'] = File.open(fixture('attachments', 'てすと.txt'), 'rb', &:read) expect(@mail.attachments).not_to be_blank expect(Mail::Encodings.decode_encode(@mail.attachments[0].filename, :decode)).to eq 'てすと.txt' end end describe "from a supplied Hash" do it "should work" do @mail.attachments['test.png'] = { :content => @test_png } expect(@mail.attachments[0].filename).to eq 'test.png' check_decoded(@mail.attachments[0].decoded, @test_png) end it "should allow you to override the content_type" do @mail.attachments['test.png'] = { :content => @test_png, :content_type => "application/x-gzip" } expect(@mail.attachments[0].content_type).to eq 'application/x-gzip' end it "should allow you to override the mime_type" do @mail.attachments['test.png'] = { :content => @test_png, :mime_type => "application/x-gzip" } expect(@mail.attachments[0].mime_type).to eq 'application/x-gzip' end it "should allow you to override the mime_type" do @mail.attachments['invoice.jpg'] = { :data => "you smiling", :mime_type => "image/x-jpg", :transfer_encoding => "base64" } expect(@mail.attachments[0].mime_type).to eq 'image/x-jpg' end end describe "decoding and encoding" do it "should set its content_transfer_encoding" do @mail.attachments['test.png'] = { :content => @test_png } @mail.ready_to_send! expect(@mail.attachments[0].content_transfer_encoding).to eq 'base64' end it "should encode its body to base64" do @mail.attachments['test.png'] = { :content => @test_png } @mail.ready_to_send! expect(@mail.attachments[0].encoded).to include(encode_base64(@test_png)) end it "should allow you to pass in an encoded attachment with an encoding" do encoded_data = encode_base64(@test_png) @mail.attachments['test.png'] = { :content => encoded_data, :encoding => 'base64' } check_decoded(@mail.attachments[0].decoded, @test_png) end it "should allow you set a mime type and encoding without overriding the encoding" do encoded = encode_base64('<foo/>') @mail.attachments['test.png'] = { :mime_type => 'text/xml', :content => encoded, :encoding => 'base64' } expect(@mail.attachments[0].content_transfer_encoding).to eq 'base64' check_decoded(@mail.attachments[0].decoded, '<foo/>') end it "should not allow you to pass in an encoded attachment with an unknown encoding" do base64_encoded_data = encode_base64(@test_png) expect {@mail.attachments['test.png'] = { :content => base64_encoded_data, :encoding => 'weird_encoding' }}.to raise_error end it "should be able to call read on the attachment to return the decoded data" do @mail.attachments['test.png'] = { :content => @test_png } if RUBY_VERSION >= '1.9' expected = @mail.attachments[0].read.force_encoding(@test_png.encoding) else expected = @mail.attachments[0].read end expect(expected).to eq @test_png end it "should only add one newline between attachment body and boundary" do contents = "I have\ntwo lines with trailing newlines\n\n" @mail.attachments['text.txt'] = { :content => contents} encoded = @mail.encoded regex = /\r\n#{Regexp.escape(contents.gsub(/\n/, "\r\n"))}\r\n--#{@mail.boundary}--\r\n\Z/ expect(encoded).to match regex end end describe "multiple attachments" do it "should allow you to pass in more than one attachment" do mail = Mail.new mail.attachments['test.pdf'] = File.open(fixture('attachments', 'test.pdf'), 'rb', &:read) mail.attachments['test.gif'] = File.open(fixture('attachments', 'test.gif'), 'rb', &:read) mail.attachments['test.jpg'] = File.open(fixture('attachments', 'test.jpg'), 'rb', &:read) mail.attachments['test.zip'] = File.open(fixture('attachments', 'test.zip'), 'rb', &:read) expect(mail.attachments[0].filename).to eq 'test.pdf' expect(mail.attachments[1].filename).to eq 'test.gif' expect(mail.attachments[2].filename).to eq 'test.jpg' expect(mail.attachments[3].filename).to eq 'test.zip' end end describe "inline attachments" do it "should set the content_disposition to inline or attachment as appropriate" do mail = Mail.new mail.attachments['test.pdf'] = File.open(fixture('attachments', 'test.pdf'), 'rb', &:read) expect(mail.attachments['test.pdf'].content_disposition).to eq 'attachment; filename=test.pdf' mail.attachments.inline['test.png'] = File.open(fixture('attachments', 'test.png'), 'rb', &:read) expect(mail.attachments.inline['test.png'].content_disposition).to eq 'inline; filename=test.png' end it "should return a cid" do mail = Mail.new mail.attachments.inline['test.png'] = @test_png expect(mail.attachments['test.png'].url).to eq "cid:#{mail.attachments['test.png'].cid}" end it "should respond true to inline?" do mail = Mail.new mail.attachments.inline['test.png'] = @test_png expect(mail.attachments['test.png']).to be_inline end end describe "getting the content ID from an attachment" do before(:each) do @mail = Mail.new @mail.attachments['test.gif'] = File.open(fixture('attachments', 'test.gif'), 'rb', &:read) @cid = @mail.attachments['test.gif'].content_id end it "should return a content-id for the attachment on creation if passed inline => true" do expect(@cid).not_to be_nil end it "should return a valid content-id on inline attachments" do expect(Mail::ContentIdField.new(@cid).errors).to be_empty end it "should provide a URL escaped content_id (without brackets) for use inside an email" do @inline = @mail.attachments['test.gif'].cid uri_parser = URI.const_defined?(:Parser) ? URI::Parser.new : URI expect(@inline).to eq uri_parser.escape(@cid.gsub(/^</, '').gsub(/>$/, '')) end end describe "setting the content type correctly" do it "should set the content type to multipart/mixed if none given and you add an attachment" do mail = Mail.new mail.attachments['test.pdf'] = File.open(fixture('attachments', 'test.pdf'), 'rb', &:read) mail.encoded expect(mail.mime_type).to eq 'multipart/mixed' end it "allows you to set the attachment before the content type" do mail = Mail.new mail.attachments["test.png"] = File.open(fixture('attachments', 'test.png'), 'rb', &:read) mail.body = "Lots of HTML" mail.mime_version = '1.0' mail.content_type = 'text/html; charset=UTF-8' end end describe "should handle filenames with non-7bit characters correctly" do it "should not raise an exception with a filename that contains a non-7bit-character" do filename = "f\u00f6\u00f6.b\u00e4r" if RUBY_VERSION >= '1.9' expect(filename.encoding).to eq Encoding::UTF_8 end mail = Mail.new expect { mail.attachments[filename] = File.open(fixture('attachments', 'test.pdf'), 'rb', &:read) }.not_to raise_error end end end describe "reading emails with attachments" do describe "test emails" do it "should find the attachment using content location" do mail = Mail.read(fixture(File.join('emails', 'attachment_emails', 'attachment_content_location.eml'))) expect(mail.attachments.length).to eq 1 end it "should find an attachment defined with 'name' and Content-Disposition: attachment" do mail = Mail.read(fixture(File.join('emails', 'attachment_emails', 'attachment_content_disposition.eml'))) expect(mail.attachments.length).to eq 1 end it "should use the content-type filename or name over the content-disposition filename" do mail = Mail.read(fixture(File.join('emails', 'attachment_emails', 'attachment_content_disposition.eml'))) expect(mail.attachments[0].filename).to eq 'hello.rb' end it "should decode an attachment" do mail = Mail.read(fixture(File.join('emails', 'attachment_emails', 'attachment_pdf.eml'))) expect(mail.attachments[0].decoded.length).to eq 1026 end it "should find an attachment that has an encoded name value" do mail = Mail.read(fixture(File.join('emails', 'attachment_emails', 'attachment_with_encoded_name.eml'))) expect(mail.attachments.length).to eq 1 result = mail.attachments[0].filename if RUBY_VERSION >= '1.9' expected = "01 Quien Te Dij\212at. Pitbull.mp3".force_encoding(result.encoding) else expected = "01 Quien Te Dij\212at. Pitbull.mp3" end expect(result).to eq expected end it "should find an attachment that has a name not surrounded by quotes" do mail = Mail.read(fixture(File.join('emails', 'attachment_emails', "attachment_with_unquoted_name.eml"))) expect(mail.attachments.length).to eq 1 expect(mail.attachments.first.filename).to eq "This is a test.txt" end it "should find attachments inside parts with content-type message/rfc822" do mail = Mail.read(fixture(File.join("emails", "attachment_emails", "attachment_message_rfc822.eml"))) expect(mail.attachments.length).to eq 1 expect(mail.attachments[0].decoded.length).to eq 1026 end it "attach filename decoding (issue 83)" do data = <<-limitMAIL Subject: aaa From: [email protected] To: [email protected] Content-Type: multipart/mixed; boundary=0016e64c0af257c3a7048b69e1ac --0016e64c0af257c3a7048b69e1ac Content-Type: multipart/alternative; boundary=0016e64c0af257c3a1048b69e1aa --0016e64c0af257c3a1048b69e1aa Content-Type: text/plain; charset=ISO-8859-1 aaa --0016e64c0af257c3a1048b69e1aa Content-Type: text/html; charset=ISO-8859-1 aaa<br> --0016e64c0af257c3a1048b69e1aa-- --0016e64c0af257c3a7048b69e1ac Content-Type: text/plain; charset=US-ASCII; name="=?utf-8?b?Rm90bzAwMDkuanBn?=" Content-Disposition: attachment; filename="=?utf-8?b?Rm90bzAwMDkuanBn?=" Content-Transfer-Encoding: base64 X-Attachment-Id: f_gbneqxxy0 YWFhCg== --0016e64c0af257c3a7048b69e1ac-- limitMAIL mail = Mail.new(data) #~ puts Mail::Encodings.decode_encode(mail.attachments[0].filename, :decode) expect(mail.attachments[0].filename).to eq "Foto0009.jpg" end end end describe "attachment order" do it "should be preserved instead when content type exists" do mail = Mail.new do to "[email protected]" from "[email protected]" subject "a subject" date Time.now text_part do content_type 'text/plain; charset=UTF-8' body "a \nsimplebody\n" end end mail.attachments['test.zip'] = File.open(fixture('attachments', 'test.zip'), 'rb', &:read) mail.attachments['test.pdf'] = File.open(fixture('attachments', 'test.pdf'), 'rb', &:read) mail.attachments['test.gif'] = File.open(fixture('attachments', 'test.gif'), 'rb', &:read) mail.attachments['test.jpg'] = File.open(fixture('attachments', 'test.jpg'), 'rb', &:read) expect(mail.attachments[0].filename).to eq 'test.zip' expect(mail.attachments[1].filename).to eq 'test.pdf' expect(mail.attachments[2].filename).to eq 'test.gif' expect(mail.attachments[3].filename).to eq 'test.jpg' mail2 = Mail.new(mail.encoded) expect(mail2.attachments[0].filename).to eq 'test.zip' expect(mail2.attachments[1].filename).to eq 'test.pdf' expect(mail2.attachments[2].filename).to eq 'test.gif' expect(mail2.attachments[3].filename).to eq 'test.jpg' end end
mit
tarquasso/softroboticfish6
fish/pi/ros/catkin_ws/src/rosserial/devel/share/gennodejs/ros/rosserial_msgs/find.js
1464
/* * Copyright 2016 Rethink Robotics * * Copyright 2016 Chris Smith * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; let fs = require('fs'); let path = require('path'); let cmakePath = process.env.CMAKE_PREFIX_PATH; let cmakePaths = cmakePath.split(':'); let jsMsgPath = 'share/gennodejs/ros'; let packagePaths = {}; module.exports = function (messagePackage) { if (packagePaths.hasOwnProperty(messagePackage)) { return packagePaths[messagePackage]; } // else const found = cmakePaths.some((cmakePath) => { let path_ = path.join(cmakePath, jsMsgPath, messagePackage, '_index.js'); if (fs.existsSync(path_)) { packagePaths[messagePackage] = require(path_); return true; } return false; }); if (found) { return packagePaths[messagePackage]; } // else throw new Error('Unable to find message package ' + messagePackage + ' from CMAKE_PREFIX_PATH'); };
mit
EdwardStudy/myghostblog
versions/1.25.7/node_modules/eslint/lib/rules/accessor-pairs.js
5257
/** * @fileoverview Rule to flag wrapping non-iife in parens * @author Gyandeep Singh */ "use strict"; //------------------------------------------------------------------------------ // Helpers //------------------------------------------------------------------------------ /** * Checks whether or not a given node is an `Identifier` node which was named a given name. * @param {ASTNode} node - A node to check. * @param {string} name - An expected name of the node. * @returns {boolean} `true` if the node is an `Identifier` node which was named as expected. */ function isIdentifier(node, name) { return node.type === "Identifier" && node.name === name; } /** * Checks whether or not a given node is an argument of a specified method call. * @param {ASTNode} node - A node to check. * @param {number} index - An expected index of the node in arguments. * @param {string} object - An expected name of the object of the method. * @param {string} property - An expected name of the method. * @returns {boolean} `true` if the node is an argument of the specified method call. */ function isArgumentOfMethodCall(node, index, object, property) { const parent = node.parent; return ( parent.type === "CallExpression" && parent.callee.type === "MemberExpression" && parent.callee.computed === false && isIdentifier(parent.callee.object, object) && isIdentifier(parent.callee.property, property) && parent.arguments[index] === node ); } /** * Checks whether or not a given node is a property descriptor. * @param {ASTNode} node - A node to check. * @returns {boolean} `true` if the node is a property descriptor. */ function isPropertyDescriptor(node) { // Object.defineProperty(obj, "foo", {set: ...}) if (isArgumentOfMethodCall(node, 2, "Object", "defineProperty") || isArgumentOfMethodCall(node, 2, "Reflect", "defineProperty") ) { return true; } /* * Object.defineProperties(obj, {foo: {set: ...}}) * Object.create(proto, {foo: {set: ...}}) */ const grandparent = node.parent.parent; return grandparent.type === "ObjectExpression" && ( isArgumentOfMethodCall(grandparent, 1, "Object", "create") || isArgumentOfMethodCall(grandparent, 1, "Object", "defineProperties") ); } //------------------------------------------------------------------------------ // Rule Definition //------------------------------------------------------------------------------ module.exports = { meta: { docs: { description: "enforce getter and setter pairs in objects", category: "Best Practices", recommended: false, url: "https://eslint.org/docs/rules/accessor-pairs" }, schema: [{ type: "object", properties: { getWithoutSet: { type: "boolean" }, setWithoutGet: { type: "boolean" } }, additionalProperties: false }], messages: { getter: "Getter is not present.", setter: "Setter is not present." } }, create(context) { const config = context.options[0] || {}; const checkGetWithoutSet = config.getWithoutSet === true; const checkSetWithoutGet = config.setWithoutGet !== false; /** * Checks a object expression to see if it has setter and getter both present or none. * @param {ASTNode} node The node to check. * @returns {void} * @private */ function checkLonelySetGet(node) { let isSetPresent = false; let isGetPresent = false; const isDescriptor = isPropertyDescriptor(node); for (let i = 0, end = node.properties.length; i < end; i++) { const property = node.properties[i]; let propToCheck = ""; if (property.kind === "init") { if (isDescriptor && !property.computed) { propToCheck = property.key.name; } } else { propToCheck = property.kind; } switch (propToCheck) { case "set": isSetPresent = true; break; case "get": isGetPresent = true; break; default: // Do nothing } if (isSetPresent && isGetPresent) { break; } } if (checkSetWithoutGet && isSetPresent && !isGetPresent) { context.report({ node, messageId: "getter" }); } else if (checkGetWithoutSet && isGetPresent && !isSetPresent) { context.report({ node, messageId: "setter" }); } } return { ObjectExpression(node) { if (checkSetWithoutGet || checkGetWithoutSet) { checkLonelySetGet(node); } } }; } };
mit
junhuac/MQUIC
src/base/android/linker/legacy_linker_jni.cc
16030
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This is the version of the Android-specific Chromium linker that uses // the crazy linker to load libraries. // This source code *cannot* depend on anything from base/ or the C++ // STL, to keep the final library small, and avoid ugly dependency issues. #include "legacy_linker_jni.h" #include <crazy_linker.h> #include <fcntl.h> #include <jni.h> #include <limits.h> #include <stddef.h> #include <stdlib.h> #include <unistd.h> #include "linker_jni.h" namespace chromium_android_linker { namespace { // Retrieve the SDK build version and pass it into the crazy linker. This // needs to be done early in initialization, before any other crazy linker // code is run. // |env| is the current JNI environment handle. // On success, return true. bool InitSDKVersionInfo(JNIEnv* env) { jint value = 0; if (!InitStaticInt(env, "android/os/Build$VERSION", "SDK_INT", &value)) return false; crazy_set_sdk_build_version(static_cast<int>(value)); LOG_INFO("Set SDK build version to %d", static_cast<int>(value)); return true; } // The linker uses a single crazy_context_t object created on demand. // There is no need to protect this against concurrent access, locking // is already handled on the Java side. crazy_context_t* GetCrazyContext() { static crazy_context_t* s_crazy_context = nullptr; if (!s_crazy_context) { // Create new context. s_crazy_context = crazy_context_create(); // Ensure libraries located in the same directory as the linker // can be loaded before system ones. crazy_context_add_search_path_for_address( s_crazy_context, reinterpret_cast<void*>(&s_crazy_context)); } return s_crazy_context; } // A scoped crazy_library_t that automatically closes the handle // on scope exit, unless Release() has been called. class ScopedLibrary { public: ScopedLibrary() : lib_(nullptr) {} ~ScopedLibrary() { if (lib_) crazy_library_close_with_context(lib_, GetCrazyContext()); } crazy_library_t* Get() { return lib_; } crazy_library_t** GetPtr() { return &lib_; } crazy_library_t* Release() { crazy_library_t* ret = lib_; lib_ = nullptr; return ret; } private: crazy_library_t* lib_; }; template <class LibraryOpener> bool GenericLoadLibrary(JNIEnv* env, const char* library_name, jlong load_address, jobject lib_info_obj, const LibraryOpener& opener) { LOG_INFO("Called for %s, at address 0x%llx", library_name, load_address); crazy_context_t* context = GetCrazyContext(); if (!IsValidAddress(load_address)) { LOG_ERROR("Invalid address 0x%llx", load_address); return false; } // Set the desired load address (0 means randomize it). crazy_context_set_load_address(context, static_cast<size_t>(load_address)); ScopedLibrary library; if (!opener.Open(library.GetPtr(), library_name, context)) { return false; } crazy_library_info_t info; if (!crazy_library_get_info(library.Get(), context, &info)) { LOG_ERROR("Could not get library information for %s: %s", library_name, crazy_context_get_error(context)); return false; } // Release library object to keep it alive after the function returns. library.Release(); s_lib_info_fields.SetLoadInfo(env, lib_info_obj, info.load_address, info.load_size); LOG_INFO("Success loading library %s", library_name); return true; } // Used for opening the library in a regular file. class FileLibraryOpener { public: bool Open(crazy_library_t** library, const char* library_name, crazy_context_t* context) const; }; bool FileLibraryOpener::Open(crazy_library_t** library, const char* library_name, crazy_context_t* context) const { if (!crazy_library_open(library, library_name, context)) { LOG_ERROR("Could not open %s: %s", library_name, crazy_context_get_error(context)); return false; } return true; } // Used for opening the library in a zip file. class ZipLibraryOpener { public: explicit ZipLibraryOpener(const char* zip_file) : zip_file_(zip_file) { } bool Open(crazy_library_t** library, const char* library_name, crazy_context_t* context) const; private: const char* zip_file_; }; bool ZipLibraryOpener::Open(crazy_library_t** library, const char* library_name, crazy_context_t* context) const { if (!crazy_library_open_in_zip_file(library, zip_file_, library_name, context)) { LOG_ERROR("Could not open %s in zip file %s: %s", library_name, zip_file_, crazy_context_get_error(context)); return false; } return true; } // Load a library with the chromium linker. This will also call its // JNI_OnLoad() method, which shall register its methods. Note that // lazy native method resolution will _not_ work after this, because // Dalvik uses the system's dlsym() which won't see the new library, // so explicit registration is mandatory. // // |env| is the current JNI environment handle. // |clazz| is the static class handle for org.chromium.base.Linker, // and is ignored here. // |library_name| is the library name (e.g. libfoo.so). // |load_address| is an explicit load address. // |library_info| is a LibInfo handle used to communicate information // with the Java side. // Return true on success. jboolean LoadLibrary(JNIEnv* env, jclass clazz, jstring library_name, jlong load_address, jobject lib_info_obj) { String lib_name(env, library_name); FileLibraryOpener opener; return GenericLoadLibrary(env, lib_name.c_str(), static_cast<size_t>(load_address), lib_info_obj, opener); } // Load a library from a zipfile with the chromium linker. The // library in the zipfile must be uncompressed and page aligned. // The basename of the library is given. The library is expected // to be lib/<abi_tag>/crazy.<basename>. The <abi_tag> used will be the // same as the abi for this linker. The "crazy." prefix is included // so that the Android Package Manager doesn't extract the library into // /data/app-lib. // // Loading the library will also call its JNI_OnLoad() method, which // shall register its methods. Note that lazy native method resolution // will _not_ work after this, because Dalvik uses the system's dlsym() // which won't see the new library, so explicit registration is mandatory. // // |env| is the current JNI environment handle. // |clazz| is the static class handle for org.chromium.base.Linker, // and is ignored here. // |zipfile_name| is the filename of the zipfile containing the library. // |library_name| is the library base name (e.g. libfoo.so). // |load_address| is an explicit load address. // |library_info| is a LibInfo handle used to communicate information // with the Java side. // Returns true on success. jboolean LoadLibraryInZipFile(JNIEnv* env, jclass clazz, jstring zipfile_name, jstring library_name, jlong load_address, jobject lib_info_obj) { String zipfile_name_str(env, zipfile_name); String lib_name(env, library_name); ZipLibraryOpener opener(zipfile_name_str.c_str()); return GenericLoadLibrary(env, lib_name.c_str(), static_cast<size_t>(load_address), lib_info_obj, opener); } // Class holding the Java class and method ID for the Java side Linker // postCallbackOnMainThread method. struct JavaCallbackBindings_class { jclass clazz; jmethodID method_id; // Initialize an instance. bool Init(JNIEnv* env, jclass linker_class) { clazz = reinterpret_cast<jclass>(env->NewGlobalRef(linker_class)); return InitStaticMethodId(env, linker_class, "postCallbackOnMainThread", "(J)V", &method_id); } }; static JavaCallbackBindings_class s_java_callback_bindings; // Designated receiver function for callbacks from Java. Its name is known // to the Java side. // |env| is the current JNI environment handle and is ignored here. // |clazz| is the static class handle for org.chromium.base.Linker, // and is ignored here. // |arg| is a pointer to an allocated crazy_callback_t, deleted after use. void RunCallbackOnUiThread(JNIEnv* env, jclass clazz, jlong arg) { crazy_callback_t* callback = reinterpret_cast<crazy_callback_t*>(arg); LOG_INFO("Called back from java with handler %p, opaque %p", callback->handler, callback->opaque); crazy_callback_run(callback); delete callback; } // Request a callback from Java. The supplied crazy_callback_t is valid only // for the duration of this call, so we copy it to a newly allocated // crazy_callback_t and then call the Java side's postCallbackOnMainThread. // This will call back to to our RunCallbackOnUiThread some time // later on the UI thread. // |callback_request| is a crazy_callback_t. // |poster_opaque| is unused. // Returns true if the callback request succeeds. static bool PostForLaterExecution(crazy_callback_t* callback_request, void* poster_opaque UNUSED) { crazy_context_t* context = GetCrazyContext(); JavaVM* vm; int minimum_jni_version; crazy_context_get_java_vm(context, reinterpret_cast<void**>(&vm), &minimum_jni_version); // Do not reuse JNIEnv from JNI_OnLoad, but retrieve our own. JNIEnv* env; if (JNI_OK != vm->GetEnv( reinterpret_cast<void**>(&env), minimum_jni_version)) { LOG_ERROR("Could not create JNIEnv"); return false; } // Copy the callback; the one passed as an argument may be temporary. crazy_callback_t* callback = new crazy_callback_t(); *callback = *callback_request; LOG_INFO("Calling back to java with handler %p, opaque %p", callback->handler, callback->opaque); jlong arg = static_cast<jlong>(reinterpret_cast<uintptr_t>(callback)); env->CallStaticVoidMethod( s_java_callback_bindings.clazz, s_java_callback_bindings.method_id, arg); // Back out and return false if we encounter a JNI exception. if (env->ExceptionCheck() == JNI_TRUE) { env->ExceptionDescribe(); env->ExceptionClear(); delete callback; return false; } return true; } jboolean CreateSharedRelro(JNIEnv* env, jclass clazz, jstring library_name, jlong load_address, jobject lib_info_obj) { String lib_name(env, library_name); LOG_INFO("Called for %s", lib_name.c_str()); if (!IsValidAddress(load_address)) { LOG_ERROR("Invalid address 0x%llx", load_address); return false; } ScopedLibrary library; if (!crazy_library_find_by_name(lib_name.c_str(), library.GetPtr())) { LOG_ERROR("Could not find %s", lib_name.c_str()); return false; } crazy_context_t* context = GetCrazyContext(); size_t relro_start = 0; size_t relro_size = 0; int relro_fd = -1; if (!crazy_library_create_shared_relro(library.Get(), context, static_cast<size_t>(load_address), &relro_start, &relro_size, &relro_fd)) { LOG_ERROR("Could not create shared RELRO sharing for %s: %s\n", lib_name.c_str(), crazy_context_get_error(context)); return false; } s_lib_info_fields.SetRelroInfo(env, lib_info_obj, relro_start, relro_size, relro_fd); return true; } jboolean UseSharedRelro(JNIEnv* env, jclass clazz, jstring library_name, jobject lib_info_obj) { String lib_name(env, library_name); LOG_INFO("Called for %s, lib_info_ref=%p", lib_name.c_str(), lib_info_obj); ScopedLibrary library; if (!crazy_library_find_by_name(lib_name.c_str(), library.GetPtr())) { LOG_ERROR("Could not find %s", lib_name.c_str()); return false; } crazy_context_t* context = GetCrazyContext(); size_t relro_start = 0; size_t relro_size = 0; int relro_fd = -1; s_lib_info_fields.GetRelroInfo(env, lib_info_obj, &relro_start, &relro_size, &relro_fd); LOG_INFO("library=%s relro start=%p size=%p fd=%d", lib_name.c_str(), (void*)relro_start, (void*)relro_size, relro_fd); if (!crazy_library_use_shared_relro(library.Get(), context, relro_start, relro_size, relro_fd)) { LOG_ERROR("Could not use shared RELRO for %s: %s", lib_name.c_str(), crazy_context_get_error(context)); return false; } LOG_INFO("Library %s using shared RELRO section!", lib_name.c_str()); return true; } const JNINativeMethod kNativeMethods[] = { {"nativeLoadLibrary", "(" "Ljava/lang/String;" "J" "Lorg/chromium/base/library_loader/Linker$LibInfo;" ")" "Z", reinterpret_cast<void*>(&LoadLibrary)}, {"nativeLoadLibraryInZipFile", "(" "Ljava/lang/String;" "Ljava/lang/String;" "J" "Lorg/chromium/base/library_loader/Linker$LibInfo;" ")" "Z", reinterpret_cast<void*>(&LoadLibraryInZipFile)}, {"nativeRunCallbackOnUiThread", "(" "J" ")" "V", reinterpret_cast<void*>(&RunCallbackOnUiThread)}, {"nativeCreateSharedRelro", "(" "Ljava/lang/String;" "J" "Lorg/chromium/base/library_loader/Linker$LibInfo;" ")" "Z", reinterpret_cast<void*>(&CreateSharedRelro)}, {"nativeUseSharedRelro", "(" "Ljava/lang/String;" "Lorg/chromium/base/library_loader/Linker$LibInfo;" ")" "Z", reinterpret_cast<void*>(&UseSharedRelro)}, }; const size_t kNumNativeMethods = sizeof(kNativeMethods) / sizeof(kNativeMethods[0]); } // namespace bool LegacyLinkerJNIInit(JavaVM* vm, JNIEnv* env) { LOG_INFO("Entering"); // Initialize SDK version info. LOG_INFO("Retrieving SDK version info"); if (!InitSDKVersionInfo(env)) return false; // Register native methods. jclass linker_class; if (!InitClassReference(env, "org/chromium/base/library_loader/LegacyLinker", &linker_class)) return false; LOG_INFO("Registering native methods"); if (env->RegisterNatives(linker_class, kNativeMethods, kNumNativeMethods) < 0) return false; // Resolve and save the Java side Linker callback class and method. LOG_INFO("Resolving callback bindings"); if (!s_java_callback_bindings.Init(env, linker_class)) { return false; } // Save JavaVM* handle into context. crazy_context_t* context = GetCrazyContext(); crazy_context_set_java_vm(context, vm, JNI_VERSION_1_4); // Register the function that the crazy linker can call to post code // for later execution. crazy_context_set_callback_poster(context, &PostForLaterExecution, nullptr); return true; } } // namespace chromium_android_linker
mit
snehasen/Agilesite
source/docs/asset-folders.md
657
title: Asset Folders --- Assets are non-post files in `source` folder, such as images, CSS or JavaScript files. Hexo provides a more convenient way to manage assets. To enable this, modify `post_asset_folder` setting. ``` yaml post_asset_folder: true ``` Once `post_asset_folder` setting is enabled, Hexo will create a folder with the same name as the new post. You can put all assets related to the post into the folder. So that you can use them more easily. ## Tag Plugins Several tag plugins are added in Hexo 3 for you to include assets in posts more easily. ``` {% asset_path slug %} {% asset_img slug [title] %} {% asset_link slug [title] %} ```
mit
nbarbettini/corefx
src/Common/src/System/Net/SafeCloseSocket.cs
9141
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using Microsoft.Win32.SafeHandles; using System.Diagnostics; using System.Threading; namespace System.Net.Sockets { // This class implements a safe socket handle. // It uses an inner and outer SafeHandle to do so. The inner // SafeHandle holds the actual socket, but only ever has one // reference to it. The outer SafeHandle guards the inner // SafeHandle with real ref counting. When the outer SafeHandle // is cleaned up, it releases the inner SafeHandle - since // its ref is the only ref to the inner SafeHandle, it deterministically // gets closed at that point - no races with concurrent IO calls. // This allows Close() on the outer SafeHandle to deterministically // close the inner SafeHandle, in turn allowing the inner SafeHandle // to block the user thread in case a graceful close has been // requested. (It's not legal to block any other thread - such closes // are always abortive.) internal partial class SafeCloseSocket : #if DEBUG DebugSafeHandleMinusOneIsInvalid #else SafeHandleMinusOneIsInvalid #endif { protected SafeCloseSocket() : base(true) { } private InnerSafeCloseSocket _innerSocket; private volatile bool _released; #if DEBUG private InnerSafeCloseSocket _innerSocketCopy; #endif public override bool IsInvalid { get { return IsClosed || base.IsInvalid; } } #if DEBUG public void AddRef() { try { // The inner socket can be closed by CloseAsIs and when SafeHandle runs ReleaseHandle. InnerSafeCloseSocket innerSocket = Volatile.Read(ref _innerSocket); if (innerSocket != null) { innerSocket.AddRef(); } } catch (Exception e) { Debug.Fail("SafeCloseSocket.AddRef after inner socket disposed." + e); } } public void Release() { try { // The inner socket can be closed by CloseAsIs and when SafeHandle runs ReleaseHandle. InnerSafeCloseSocket innerSocket = Volatile.Read(ref _innerSocket); if (innerSocket != null) { innerSocket.Release(); } } catch (Exception e) { Debug.Fail("SafeCloseSocket.Release after inner socket disposed." + e); } } #endif private void SetInnerSocket(InnerSafeCloseSocket socket) { _innerSocket = socket; SetHandle(socket.DangerousGetHandle()); #if DEBUG _innerSocketCopy = socket; #endif } private static SafeCloseSocket CreateSocket(InnerSafeCloseSocket socket) { SafeCloseSocket ret = new SafeCloseSocket(); CreateSocket(socket, ret); if (NetEventSource.IsEnabled) NetEventSource.Info(null, ret); return ret; } protected static void CreateSocket(InnerSafeCloseSocket socket, SafeCloseSocket target) { if (socket != null && socket.IsInvalid) { target.SetHandleAsInvalid(); return; } bool b = false; try { socket.DangerousAddRef(ref b); } catch { if (b) { socket.DangerousRelease(); b = false; } } finally { if (b) { target.SetInnerSocket(socket); socket.Dispose(); } else { target.SetHandleAsInvalid(); } } } protected override bool ReleaseHandle() { if (NetEventSource.IsEnabled) NetEventSource.Info(this, $"_innerSocket={_innerSocket}"); _released = true; InnerSafeCloseSocket innerSocket = _innerSocket == null ? null : Interlocked.Exchange<InnerSafeCloseSocket>(ref _innerSocket, null); #if DEBUG // On AppDomain unload we may still have pending Overlapped operations. // ThreadPoolBoundHandle should handle this scenario by canceling them. innerSocket?.LogRemainingOperations(); #endif InnerReleaseHandle(); innerSocket?.DangerousRelease(); return true; } internal void CloseAsIs() { if (NetEventSource.IsEnabled) NetEventSource.Info(this, $"_innerSocket={_innerSocket}"); #if DEBUG // If this throws it could be very bad. try { #endif InnerSafeCloseSocket innerSocket = _innerSocket == null ? null : Interlocked.Exchange<InnerSafeCloseSocket>(ref _innerSocket, null); Dispose(); if (innerSocket != null) { // Wait until it's safe. SpinWait sw = new SpinWait(); while (!_released) { sw.SpinOnce(); } // Now free it with blocking. innerSocket.BlockingRelease(); } InnerReleaseHandle(); #if DEBUG } catch (Exception exception) when (!ExceptionCheck.IsFatal(exception)) { NetEventSource.Fail(this, $"handle:{handle}, error:{exception}"); throw; } #endif } internal sealed partial class InnerSafeCloseSocket : SafeHandleMinusOneIsInvalid { private InnerSafeCloseSocket() : base(true) { } private bool _blockable; public override bool IsInvalid { get { return IsClosed || base.IsInvalid; } } // This method is implicitly reliable and called from a CER. protected override bool ReleaseHandle() { bool ret = false; #if DEBUG try { #endif if (NetEventSource.IsEnabled) NetEventSource.Info(this, $"handle:{handle}"); SocketError errorCode = InnerReleaseHandle(); return ret = errorCode == SocketError.Success; #if DEBUG } catch (Exception exception) { if (!ExceptionCheck.IsFatal(exception)) { NetEventSource.Fail(this, $"handle:{handle}, error:{exception}"); } ret = true; // Avoid a second assert. throw; } finally { _closeSocketThread = Environment.CurrentManagedThreadId; _closeSocketTick = Environment.TickCount; if (!ret) { NetEventSource.Fail(this, $"ReleaseHandle failed. handle:{handle}"); } } #endif } #if DEBUG private IntPtr _closeSocketHandle; private SocketError _closeSocketResult = unchecked((SocketError)0xdeadbeef); private SocketError _closeSocketLinger = unchecked((SocketError)0xdeadbeef); private int _closeSocketThread; private int _closeSocketTick; private int _refCount = 0; public void AddRef() { Interlocked.Increment(ref _refCount); } public void Release() { Interlocked.MemoryBarrier(); Debug.Assert(_refCount > 0, "InnerSafeCloseSocket: Release() called more times than AddRef"); Interlocked.Decrement(ref _refCount); } public void LogRemainingOperations() { Interlocked.MemoryBarrier(); if (NetEventSource.IsEnabled) NetEventSource.Info(this, $"Releasing with pending operations: {_refCount}"); } #endif // Use this method to close the socket handle using the linger options specified on the socket. // Guaranteed to only be called once, under a CER, and not if regular DangerousRelease is called. internal void BlockingRelease() { #if DEBUG // Expected to have outstanding operations such as Accept. LogRemainingOperations(); #endif _blockable = true; DangerousRelease(); } } } }
mit
johanbrandhorst/protobuf
vendor/github.com/sclevine/agouti/api/api.go
418
// Package api provides a generic, low-level WebDriver API client for Go. // All methods map directly to endpoints of the WebDriver Wire Protocol: // https://code.google.com/p/selenium/wiki/JsonWireProtocol // // This package was previously internal to the agouti package. It currently // does not have a fixed API, but this will change in the near future // (with the addition of adequate documentation). package api
mit
afuerstenau/daily-notes
vendor/cache/ruby/2.5.0/gems/activerecord-5.0.6/lib/active_record/attribute_methods/serialization.rb
2841
module ActiveRecord module AttributeMethods module Serialization extend ActiveSupport::Concern module ClassMethods # If you have an attribute that needs to be saved to the database as an # object, and retrieved as the same object, then specify the name of that # attribute using this method and it will be handled automatically. The # serialization is done through YAML. If +class_name+ is specified, the # serialized object must be of that class on assignment and retrieval. # Otherwise SerializationTypeMismatch will be raised. # # Empty objects as <tt>{}</tt>, in the case of +Hash+, or <tt>[]</tt>, in the case of # +Array+, will always be persisted as null. # # Keep in mind that database adapters handle certain serialization tasks # for you. For instance: +json+ and +jsonb+ types in PostgreSQL will be # converted between JSON object/array syntax and Ruby +Hash+ or +Array+ # objects transparently. There is no need to use #serialize in this # case. # # For more complex cases, such as conversion to or from your application # domain objects, consider using the ActiveRecord::Attributes API. # # ==== Parameters # # * +attr_name+ - The field name that should be serialized. # * +class_name_or_coder+ - Optional, a coder object, which responds to `.load` / `.dump` # or a class name that the object type should be equal to. # # ==== Example # # # Serialize a preferences attribute. # class User < ActiveRecord::Base # serialize :preferences # end # # # Serialize preferences using JSON as coder. # class User < ActiveRecord::Base # serialize :preferences, JSON # end # # # Serialize preferences as Hash using YAML coder. # class User < ActiveRecord::Base # serialize :preferences, Hash # end def serialize(attr_name, class_name_or_coder = Object) # When ::JSON is used, force it to go through the Active Support JSON encoder # to ensure special objects (e.g. Active Record models) are dumped correctly # using the #as_json hook. coder = if class_name_or_coder == ::JSON Coders::JSON elsif [:load, :dump].all? { |x| class_name_or_coder.respond_to?(x) } class_name_or_coder else Coders::YAMLColumn.new(class_name_or_coder) end decorate_attribute_type(attr_name, :serialize) do |type| Type::Serialized.new(type, coder) end end end end end end
mit
sekcheong/referencesource
System/net/System/Net/Configuration/PerformanceCountersElement.cs
1389
//------------------------------------------------------------------------------ // <copyright file="PerformanceCountersElement.cs" company="Microsoft Corporation"> // Copyright (c) Microsoft Corporation. All rights reserved. // </copyright> //------------------------------------------------------------------------------ namespace System.Net.Configuration { using System; using System.Configuration; using System.Reflection; using System.Security.Permissions; public sealed class PerformanceCountersElement : ConfigurationElement { public PerformanceCountersElement() { this.properties.Add(this.enabled); } [ConfigurationProperty(ConfigurationStrings.Enabled, DefaultValue=false)] public bool Enabled { get { return (bool) this[this.enabled]; } set { this[this.enabled] = value; } } protected override ConfigurationPropertyCollection Properties { get { return this.properties; } } ConfigurationPropertyCollection properties = new ConfigurationPropertyCollection(); readonly ConfigurationProperty enabled = new ConfigurationProperty(ConfigurationStrings.Enabled, typeof(bool), false, ConfigurationPropertyOptions.None); } }
mit
gregoriusxu/Umbraco-CMS
src/Umbraco.Web.UI.Client/src/views/common/dialogs/mediapicker.controller.js
6762
//used for the media picker dialog angular.module("umbraco") .controller("Umbraco.Dialogs.MediaPickerController", function ($scope, mediaResource, umbRequestHelper, entityResource, $log, mediaHelper, eventsService, treeService, $cookies, $element, $timeout, notificationsService) { var dialogOptions = $scope.dialogOptions; $scope.onlyImages = dialogOptions.onlyImages; $scope.showDetails = dialogOptions.showDetails; $scope.multiPicker = (dialogOptions.multiPicker && dialogOptions.multiPicker !== "0") ? true : false; $scope.startNodeId = dialogOptions.startNodeId ? dialogOptions.startNodeId : -1; $scope.cropSize = dialogOptions.cropSize; $scope.filesUploading = 0; $scope.dropping = false; $scope.progress = 0; $scope.options = { url: umbRequestHelper.getApiUrl("mediaApiBaseUrl", "PostAddFile") + "?origin=blueimp", autoUpload: true, dropZone: $element.find(".umb-dialogs-mediapicker.browser"), fileInput: $element.find("input.uploader"), formData: { currentFolder: -1 } }; //preload selected item $scope.target = undefined; if(dialogOptions.currentTarget){ $scope.target = dialogOptions.currentTarget; } $scope.submitFolder = function(e) { if (e.keyCode === 13) { e.preventDefault(); $scope.showFolderInput = false; mediaResource .addFolder($scope.newFolderName, $scope.options.formData.currentFolder) .then(function(data) { //we've added a new folder so lets clear the tree cache for that specific item treeService.clearCache({ cacheKey: "__media", //this is the main media tree cache key childrenOf: data.parentId //clear the children of the parent }); $scope.gotoFolder(data); }); } }; $scope.gotoFolder = function(folder) { if(!folder){ folder = {id: -1, name: "Media", icon: "icon-folder"}; } if (folder.id > 0) { entityResource.getAncestors(folder.id, "media") .then(function(anc) { // anc.splice(0,1); $scope.path = _.filter(anc, function (f) { return f.path.indexOf($scope.startNodeId) !== -1; }); }); } else { $scope.path = []; } //mediaResource.rootMedia() mediaResource.getChildren(folder.id) .then(function(data) { $scope.searchTerm = ""; $scope.images = data.items ? data.items : []; }); $scope.options.formData.currentFolder = folder.id; $scope.currentFolder = folder; }; //This executes prior to the whole processing which we can use to get the UI going faster, //this also gives us the start callback to invoke to kick of the whole thing $scope.$on('fileuploadadd', function (e, data) { $scope.$apply(function () { $scope.filesUploading++; }); }); //when one is finished $scope.$on('fileuploaddone', function (e, data) { $scope.filesUploading--; if ($scope.filesUploading == 0) { $scope.$apply(function () { $scope.progress = 0; $scope.gotoFolder($scope.currentFolder); }); } //Show notifications!!!! if (data.result && data.result.notifications && angular.isArray(data.result.notifications)) { for (var n = 0; n < data.result.notifications.length; n++) { notificationsService.showNotification(data.result.notifications[n]); } } }); // All these sit-ups are to add dropzone area and make sure it gets removed if dragging is aborted! $scope.$on('fileuploaddragover', function (e, data) { if (!$scope.dragClearTimeout) { $scope.$apply(function () { $scope.dropping = true; }); } else { $timeout.cancel($scope.dragClearTimeout); } $scope.dragClearTimeout = $timeout(function () { $scope.dropping = null; $scope.dragClearTimeout = null; }, 300); }); $scope.clickHandler = function(image, ev, select) { ev.preventDefault(); if (image.isFolder && !select) { $scope.gotoFolder(image); }else{ eventsService.emit("dialogs.mediaPicker.select", image); //we have 3 options add to collection (if multi) show details, or submit it right back to the callback if ($scope.multiPicker) { $scope.select(image); image.cssclass = ($scope.dialogData.selection.indexOf(image) > -1) ? "selected" : ""; }else if($scope.showDetails) { $scope.target= image; $scope.target.url = mediaHelper.resolveFile(image); }else{ $scope.submit(image); } } }; $scope.exitDetails = function(){ if(!$scope.currentFolder){ $scope.gotoFolder(); } $scope.target = undefined; }; //default root item if(!$scope.target){ $scope.gotoFolder({ id: $scope.startNodeId, name: "Media", icon: "icon-folder" }); } });
mit
Originate/exosphere
vendor/github.com/moby/moby/cli/command/volume/inspect_test.go
4211
package volume import ( "bytes" "fmt" "io/ioutil" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/cli/internal/test" "github.com/pkg/errors" // Import builders to get the builder function as package function . "github.com/docker/docker/cli/internal/test/builders" "github.com/docker/docker/pkg/testutil/assert" "github.com/docker/docker/pkg/testutil/golden" ) func TestVolumeInspectErrors(t *testing.T) { testCases := []struct { args []string flags map[string]string volumeInspectFunc func(volumeID string) (types.Volume, error) expectedError string }{ { expectedError: "requires at least 1 argument", }, { args: []string{"foo"}, volumeInspectFunc: func(volumeID string) (types.Volume, error) { return types.Volume{}, errors.Errorf("error while inspecting the volume") }, expectedError: "error while inspecting the volume", }, { args: []string{"foo"}, flags: map[string]string{ "format": "{{invalid format}}", }, expectedError: "Template parsing error", }, { args: []string{"foo", "bar"}, volumeInspectFunc: func(volumeID string) (types.Volume, error) { if volumeID == "foo" { return types.Volume{ Name: "foo", }, nil } return types.Volume{}, errors.Errorf("error while inspecting the volume") }, expectedError: "error while inspecting the volume", }, } for _, tc := range testCases { buf := new(bytes.Buffer) cmd := newInspectCommand( test.NewFakeCli(&fakeClient{ volumeInspectFunc: tc.volumeInspectFunc, }, buf), ) cmd.SetArgs(tc.args) for key, value := range tc.flags { cmd.Flags().Set(key, value) } cmd.SetOutput(ioutil.Discard) assert.Error(t, cmd.Execute(), tc.expectedError) } } func TestVolumeInspectWithoutFormat(t *testing.T) { testCases := []struct { name string args []string volumeInspectFunc func(volumeID string) (types.Volume, error) }{ { name: "single-volume", args: []string{"foo"}, volumeInspectFunc: func(volumeID string) (types.Volume, error) { if volumeID != "foo" { return types.Volume{}, errors.Errorf("Invalid volumeID, expected %s, got %s", "foo", volumeID) } return *Volume(), nil }, }, { name: "multiple-volume-with-labels", args: []string{"foo", "bar"}, volumeInspectFunc: func(volumeID string) (types.Volume, error) { return *Volume(VolumeName(volumeID), VolumeLabels(map[string]string{ "foo": "bar", })), nil }, }, } for _, tc := range testCases { buf := new(bytes.Buffer) cmd := newInspectCommand( test.NewFakeCli(&fakeClient{ volumeInspectFunc: tc.volumeInspectFunc, }, buf), ) cmd.SetArgs(tc.args) assert.NilError(t, cmd.Execute()) actual := buf.String() expected := golden.Get(t, []byte(actual), fmt.Sprintf("volume-inspect-without-format.%s.golden", tc.name)) assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) } } func TestVolumeInspectWithFormat(t *testing.T) { volumeInspectFunc := func(volumeID string) (types.Volume, error) { return *Volume(VolumeLabels(map[string]string{ "foo": "bar", })), nil } testCases := []struct { name string format string args []string volumeInspectFunc func(volumeID string) (types.Volume, error) }{ { name: "simple-template", format: "{{.Name}}", args: []string{"foo"}, volumeInspectFunc: volumeInspectFunc, }, { name: "json-template", format: "{{json .Labels}}", args: []string{"foo"}, volumeInspectFunc: volumeInspectFunc, }, } for _, tc := range testCases { buf := new(bytes.Buffer) cmd := newInspectCommand( test.NewFakeCli(&fakeClient{ volumeInspectFunc: tc.volumeInspectFunc, }, buf), ) cmd.SetArgs(tc.args) cmd.Flags().Set("format", tc.format) assert.NilError(t, cmd.Execute()) actual := buf.String() expected := golden.Get(t, []byte(actual), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name)) assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) } }
mit
nfl/es6-module-loader
src/system.js
1825
/* ********************************************************************************************* System Loader Implementation - Implemented to https://github.com/jorendorff/js-loaders/blob/master/browser-loader.js - <script type="module"> supported ********************************************************************************************* */ var System; function SystemLoader() { Loader.call(this); this.paths = {}; } // NB no specification provided for System.paths, used ideas discussed in https://github.com/jorendorff/js-loaders/issues/25 function applyPaths(paths, name) { // most specific (most number of slashes in path) match wins var pathMatch = '', wildcard, maxSlashCount = 0; // check to see if we have a paths entry for (var p in paths) { var pathParts = p.split('*'); if (pathParts.length > 2) throw new TypeError('Only one wildcard in a path is permitted'); // exact path match if (pathParts.length == 1) { if (name == p) { pathMatch = p; break; } } // wildcard path match else { var slashCount = p.split('/').length; if (slashCount >= maxSlashCount && name.substr(0, pathParts[0].length) == pathParts[0] && name.substr(name.length - pathParts[1].length) == pathParts[1]) { maxSlashCount = slashCount; pathMatch = p; wildcard = name.substr(pathParts[0].length, name.length - pathParts[1].length - pathParts[0].length); } } } var outPath = paths[pathMatch] || name; if (typeof wildcard == 'string') outPath = outPath.replace('*', wildcard); return outPath; } // inline Object.create-style class extension function LoaderProto() {} LoaderProto.prototype = Loader.prototype; SystemLoader.prototype = new LoaderProto();
mit
HackerOO7/android_kernel_huawei_u8951
fs/ext4/ialloc.c
32786
/* * linux/fs/ext4/ialloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card ([email protected]) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * BSD ufs-inspired inode and directory allocation by * Stephen Tweedie ([email protected]), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller ([email protected]), 1995 */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/random.h> #include <linux/bitops.h> #include <linux/blkdev.h> #include <asm/byteorder.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include <trace/events/ext4.h> /* * ialloc.c contains the inodes allocation and deallocation routines */ /* * The free inodes are managed by bitmaps. A file system contains several * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block. Each descriptor contains the number of the bitmap block and * the free blocks count in the block. */ /* * To avoid calling the atomic setbit hundreds or thousands of times, we only * need to use it within a single byte (to ensure we get endianness right). * We can use memset for the rest of the bitmap as there are no other users. */ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) { int i; if (start_bit >= end_bit) return; ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) ext4_set_bit(i, bitmap); if (i < end_bit) memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); } /* Initializes an uninitialized inode bitmap */ static unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, ext4_group_t block_group, struct ext4_group_desc *gdp) { struct ext4_sb_info *sbi = EXT4_SB(sb); J_ASSERT_BH(bh, buffer_locked(bh)); /* If checksum is bad mark all blocks and inodes use to prevent * allocation, essentially implementing a per-group read-only flag. */ if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { ext4_error(sb, "Checksum bad for group %u", block_group); ext4_free_group_clusters_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0); memset(bh->b_data, 0xff, sb->s_blocksize); return 0; } memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); return EXT4_INODES_PER_GROUP(sb); } void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); set_bitmap_uptodate(bh); } unlock_buffer(bh); put_bh(bh); } /* * Read the inode allocation bitmap for a given block_group, reading * into the specified slot in the superblock's bitmap cache. * * Return buffer_head of bitmap on success or NULL. */ static struct buffer_head * ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) { struct ext4_group_desc *desc; struct buffer_head *bh = NULL; ext4_fsblk_t bitmap_blk; desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return NULL; bitmap_blk = ext4_inode_bitmap(sb, desc); bh = sb_getblk(sb, bitmap_blk); if (unlikely(!bh)) { ext4_error(sb, "Cannot read inode bitmap - " "block_group = %u, inode_bitmap = %llu", block_group, bitmap_blk); return NULL; } if (bitmap_uptodate(bh)) return bh; lock_buffer(bh); if (bitmap_uptodate(bh)) { unlock_buffer(bh); return bh; } ext4_lock_group(sb, block_group); if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { ext4_init_inode_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); return bh; } ext4_unlock_group(sb, block_group); if (buffer_uptodate(bh)) { /* * if not uninit if bh is uptodate, * bitmap is also uptodate */ set_bitmap_uptodate(bh); unlock_buffer(bh); return bh; } /* * submit the buffer_head for reading */ trace_ext4_load_inode_bitmap(sb, block_group); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); submit_bh(READ, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { put_bh(bh); ext4_error(sb, "Cannot read inode bitmap - " "block_group = %u, inode_bitmap = %llu", block_group, bitmap_blk); return NULL; } return bh; } /* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk. */ void ext4_free_inode(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; int is_directory; unsigned long ino; struct buffer_head *bitmap_bh = NULL; struct buffer_head *bh2; ext4_group_t block_group; unsigned long bit; struct ext4_group_desc *gdp; struct ext4_super_block *es; struct ext4_sb_info *sbi; int fatal = 0, err, count, cleared; if (!sb) { printk(KERN_ERR "EXT4-fs: %s:%d: inode on " "nonexistent device\n", __func__, __LINE__); return; } if (atomic_read(&inode->i_count) > 1) { ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d", __func__, __LINE__, inode->i_ino, atomic_read(&inode->i_count)); return; } if (inode->i_nlink) { ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n", __func__, __LINE__, inode->i_ino, inode->i_nlink); return; } sbi = EXT4_SB(sb); ino = inode->i_ino; ext4_debug("freeing inode %lu\n", ino); trace_ext4_free_inode(inode); /* * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ dquot_initialize(inode); ext4_xattr_delete_inode(handle, inode); dquot_free_inode(inode); dquot_drop(inode); is_directory = S_ISDIR(inode->i_mode); /* Do this BEFORE marking the inode not in use or returning an error */ ext4_clear_inode(inode); es = EXT4_SB(sb)->s_es; if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { ext4_error(sb, "reserved or nonexistent inode %lu", ino); goto error_return; } block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); if (!bitmap_bh) goto error_return; BUFFER_TRACE(bitmap_bh, "get_write_access"); fatal = ext4_journal_get_write_access(handle, bitmap_bh); if (fatal) goto error_return; fatal = -ESRCH; gdp = ext4_get_group_desc(sb, block_group, &bh2); if (gdp) { BUFFER_TRACE(bh2, "get_write_access"); fatal = ext4_journal_get_write_access(handle, bh2); } ext4_lock_group(sb, block_group); cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data); if (fatal || !cleared) { ext4_unlock_group(sb, block_group); goto out; } count = ext4_free_inodes_count(sb, gdp) + 1; ext4_free_inodes_set(sb, gdp, count); if (is_directory) { count = ext4_used_dirs_count(sb, gdp) - 1; ext4_used_dirs_set(sb, gdp, count); percpu_counter_dec(&sbi->s_dirs_counter); } gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_unlock_group(sb, block_group); percpu_counter_inc(&sbi->s_freeinodes_counter); if (sbi->s_log_groups_per_flex) { ext4_group_t f = ext4_flex_group(sbi, block_group); atomic_inc(&sbi->s_flex_groups[f].free_inodes); if (is_directory) atomic_dec(&sbi->s_flex_groups[f].used_dirs); } BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); out: if (cleared) { BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!fatal) fatal = err; ext4_mark_super_dirty(sb); } else ext4_error(sb, "bit already cleared for inode %lu", ino); error_return: brelse(bitmap_bh); ext4_std_error(sb, fatal); } struct orlov_stats { __u32 free_inodes; __u32 free_clusters; __u32 used_dirs; }; /* * Helper function for Orlov's allocator; returns critical information * for a particular block group or flex_bg. If flex_size is 1, then g * is a block group number; otherwise it is flex_bg number. */ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, int flex_size, struct orlov_stats *stats) { struct ext4_group_desc *desc; struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; if (flex_size > 1) { stats->free_inodes = atomic_read(&flex_group[g].free_inodes); stats->free_clusters = atomic_read(&flex_group[g].free_clusters); stats->used_dirs = atomic_read(&flex_group[g].used_dirs); return; } desc = ext4_get_group_desc(sb, g, NULL); if (desc) { stats->free_inodes = ext4_free_inodes_count(sb, desc); stats->free_clusters = ext4_free_group_clusters(sb, desc); stats->used_dirs = ext4_used_dirs_count(sb, desc); } else { stats->free_inodes = 0; stats->free_clusters = 0; stats->used_dirs = 0; } } /* * Orlov's allocator for directories. * * We always try to spread first-level directories. * * If there are blockgroups with both free inodes and free blocks counts * not worse than average we return one with smallest directory count. * Otherwise we simply return a random group. * * For the rest rules look so: * * It's OK to put directory into a group unless * it has too many directories already (max_dirs) or * it has too few free inodes left (min_inodes) or * it has too few free blocks left (min_blocks) or * Parent's group is preferred, if it doesn't satisfy these * conditions we search cyclically through the rest. If none * of the groups look good we just look for a group with more * free inodes than average (starting at parent's group). */ static int find_group_orlov(struct super_block *sb, struct inode *parent, ext4_group_t *group, umode_t mode, const struct qstr *qstr) { ext4_group_t parent_group = EXT4_I(parent)->i_block_group; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t real_ngroups = ext4_get_groups_count(sb); int inodes_per_group = EXT4_INODES_PER_GROUP(sb); unsigned int freei, avefreei, grp_free; ext4_fsblk_t freeb, avefreec; unsigned int ndirs; int max_dirs, min_inodes; ext4_grpblk_t min_clusters; ext4_group_t i, grp, g, ngroups; struct ext4_group_desc *desc; struct orlov_stats stats; int flex_size = ext4_flex_bg_size(sbi); struct dx_hash_info hinfo; ngroups = real_ngroups; if (flex_size > 1) { ngroups = (real_ngroups + flex_size - 1) >> sbi->s_log_groups_per_flex; parent_group >>= sbi->s_log_groups_per_flex; } freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; freeb = EXT4_C2B(sbi, percpu_counter_read_positive(&sbi->s_freeclusters_counter)); avefreec = freeb; do_div(avefreec, ngroups); ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); if (S_ISDIR(mode) && ((parent == sb->s_root->d_inode) || (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { int best_ndir = inodes_per_group; int ret = -1; if (qstr) { hinfo.hash_version = DX_HASH_HALF_MD4; hinfo.seed = sbi->s_hash_seed; ext4fs_dirhash(qstr->name, qstr->len, &hinfo); grp = hinfo.hash; } else get_random_bytes(&grp, sizeof(grp)); parent_group = (unsigned)grp % ngroups; for (i = 0; i < ngroups; i++) { g = (parent_group + i) % ngroups; get_orlov_stats(sb, g, flex_size, &stats); if (!stats.free_inodes) continue; if (stats.used_dirs >= best_ndir) continue; if (stats.free_inodes < avefreei) continue; if (stats.free_clusters < avefreec) continue; grp = g; ret = 0; best_ndir = stats.used_dirs; } if (ret) goto fallback; found_flex_bg: if (flex_size == 1) { *group = grp; return 0; } /* * We pack inodes at the beginning of the flexgroup's * inode tables. Block allocation decisions will do * something similar, although regular files will * start at 2nd block group of the flexgroup. See * ext4_ext_find_goal() and ext4_find_near(). */ grp *= flex_size; for (i = 0; i < flex_size; i++) { if (grp+i >= real_ngroups) break; desc = ext4_get_group_desc(sb, grp+i, NULL); if (desc && ext4_free_inodes_count(sb, desc)) { *group = grp+i; return 0; } } goto fallback; } max_dirs = ndirs / ngroups + inodes_per_group / 16; min_inodes = avefreei - inodes_per_group*flex_size / 4; if (min_inodes < 1) min_inodes = 1; min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4; /* * Start looking in the flex group where we last allocated an * inode for this parent directory */ if (EXT4_I(parent)->i_last_alloc_group != ~0) { parent_group = EXT4_I(parent)->i_last_alloc_group; if (flex_size > 1) parent_group >>= sbi->s_log_groups_per_flex; } for (i = 0; i < ngroups; i++) { grp = (parent_group + i) % ngroups; get_orlov_stats(sb, grp, flex_size, &stats); if (stats.used_dirs >= max_dirs) continue; if (stats.free_inodes < min_inodes) continue; if (stats.free_clusters < min_clusters) continue; goto found_flex_bg; } fallback: ngroups = real_ngroups; avefreei = freei / ngroups; fallback_retry: parent_group = EXT4_I(parent)->i_block_group; for (i = 0; i < ngroups; i++) { grp = (parent_group + i) % ngroups; desc = ext4_get_group_desc(sb, grp, NULL); grp_free = ext4_free_inodes_count(sb, desc); if (desc && grp_free && grp_free >= avefreei) { *group = grp; return 0; } } if (avefreei) { /* * The free-inodes counter is approximate, and for really small * filesystems the above test can fail to find any blockgroups */ avefreei = 0; goto fallback_retry; } return -1; } static int find_group_other(struct super_block *sb, struct inode *parent, ext4_group_t *group, umode_t mode) { ext4_group_t parent_group = EXT4_I(parent)->i_block_group; ext4_group_t i, last, ngroups = ext4_get_groups_count(sb); struct ext4_group_desc *desc; int flex_size = ext4_flex_bg_size(EXT4_SB(sb)); /* * Try to place the inode is the same flex group as its * parent. If we can't find space, use the Orlov algorithm to * find another flex group, and store that information in the * parent directory's inode information so that use that flex * group for future allocations. */ if (flex_size > 1) { int retry = 0; try_again: parent_group &= ~(flex_size-1); last = parent_group + flex_size; if (last > ngroups) last = ngroups; for (i = parent_group; i < last; i++) { desc = ext4_get_group_desc(sb, i, NULL); if (desc && ext4_free_inodes_count(sb, desc)) { *group = i; return 0; } } if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) { retry = 1; parent_group = EXT4_I(parent)->i_last_alloc_group; goto try_again; } /* * If this didn't work, use the Orlov search algorithm * to find a new flex group; we pass in the mode to * avoid the topdir algorithms. */ *group = parent_group + flex_size; if (*group > ngroups) *group = 0; return find_group_orlov(sb, parent, group, mode, NULL); } /* * Try to place the inode in its parent directory */ *group = parent_group; desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc) && ext4_free_group_clusters(sb, desc)) return 0; /* * We're going to place this inode in a different blockgroup from its * parent. We want to cause files in a common directory to all land in * the same blockgroup. But we want files which are in a different * directory which shares a blockgroup with our parent to land in a * different blockgroup. * * So add our directory's i_ino into the starting point for the hash. */ *group = (*group + parent->i_ino) % ngroups; /* * Use a quadratic hash to find a group with a free inode and some free * blocks. */ for (i = 1; i < ngroups; i <<= 1) { *group += i; if (*group >= ngroups) *group -= ngroups; desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc) && ext4_free_group_clusters(sb, desc)) return 0; } /* * That failed: try linear search for a free inode, even if that group * has no free blocks. */ *group = parent_group; for (i = 0; i < ngroups; i++) { if (++*group >= ngroups) *group = 0; desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc)) return 0; } return -1; } /* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, const struct qstr *qstr, __u32 goal, uid_t *owner) { struct super_block *sb; struct buffer_head *inode_bitmap_bh = NULL; struct buffer_head *group_desc_bh; ext4_group_t ngroups, group = 0; unsigned long ino = 0; struct inode *inode; struct ext4_group_desc *gdp = NULL; struct ext4_inode_info *ei; struct ext4_sb_info *sbi; int ret2, err = 0; struct inode *ret; ext4_group_t i; ext4_group_t flex_group; /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; ngroups = ext4_get_groups_count(sb); trace_ext4_request_inode(dir, mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ei = EXT4_I(inode); sbi = EXT4_SB(sb); if (!goal) goal = sbi->s_inode_goal; if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); ret2 = 0; goto got_group; } if (S_ISDIR(mode)) ret2 = find_group_orlov(sb, dir, &group, mode, qstr); else ret2 = find_group_other(sb, dir, &group, mode); got_group: EXT4_I(dir)->i_last_alloc_group = group; err = -ENOSPC; if (ret2 == -1) goto out; /* * Normally we will only go through one pass of this loop, * unless we get unlucky and it turns out the group we selected * had its last inode grabbed by someone else. */ for (i = 0; i < ngroups; i++, ino = 0) { err = -EIO; gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) goto fail; brelse(inode_bitmap_bh); inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); if (!inode_bitmap_bh) goto fail; repeat_in_this_group: ino = ext4_find_next_zero_bit((unsigned long *) inode_bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino); if (ino >= EXT4_INODES_PER_GROUP(sb)) { if (++group == ngroups) group = 0; continue; } if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { ext4_error(sb, "reserved inode found cleared - " "inode=%lu", ino + 1); continue; } ext4_lock_group(sb, group); ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data); ext4_unlock_group(sb, group); ino++; /* the inode bitmap is zero-based */ if (!ret2) goto got; /* we grabbed the inode! */ if (ino < EXT4_INODES_PER_GROUP(sb)) goto repeat_in_this_group; } err = -ENOSPC; goto out; got: /* We may have to initialize the block bitmap if it isn't already */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { struct buffer_head *block_bitmap_bh; block_bitmap_bh = ext4_read_block_bitmap(sb, group); BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); err = ext4_journal_get_write_access(handle, block_bitmap_bh); if (err) { brelse(block_bitmap_bh); goto fail; } BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh); brelse(block_bitmap_bh); /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); } ext4_unlock_group(sb, group); if (err) goto fail; } BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode_bitmap_bh); if (err) goto fail; BUFFER_TRACE(group_desc_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, group_desc_bh); if (err) goto fail; /* Update the relevant bg descriptor fields */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { int free; struct ext4_group_info *grp = ext4_get_group_info(sb, group); down_read(&grp->alloc_sem); /* protect vs itable lazyinit */ ext4_lock_group(sb, group); /* while we modify the bg desc */ free = EXT4_INODES_PER_GROUP(sb) - ext4_itable_unused_count(sb, gdp); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); free = 0; } /* * Check the relative inode number against the last used * relative inode number in this group. if it is greater * we need to update the bg_itable_unused count */ if (ino > free) ext4_itable_unused_set(sb, gdp, (EXT4_INODES_PER_GROUP(sb) - ino)); up_read(&grp->alloc_sem); } ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); if (S_ISDIR(mode)) { ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); if (sbi->s_log_groups_per_flex) { ext4_group_t f = ext4_flex_group(sbi, group); atomic_inc(&sbi->s_flex_groups[f].used_dirs); } } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ext4_unlock_group(sb, group); } BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); if (err) goto fail; BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); if (err) goto fail; percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); ext4_mark_super_dirty(sb); if (sbi->s_log_groups_per_flex) { flex_group = ext4_flex_group(sbi, group); atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); } if (owner) { inode->i_mode = mode; inode->i_uid = owner[0]; inode->i_gid = owner[1]; } else if (test_opt(sb, GRPID)) { inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; } else inode_init_owner(inode, dir, mode); inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = ext4_current_time(inode); memset(ei->i_data, 0, sizeof(ei->i_data)); ei->i_dir_start_lookup = 0; ei->i_disksize = 0; /* Don't inherit extent flag from directory, amongst others. */ ei->i_flags = ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED); ei->i_file_acl = 0; ei->i_dtime = 0; ei->i_block_group = group; ei->i_last_alloc_group = ~0; ext4_set_inode_flags(inode); if (IS_DIRSYNC(inode)) ext4_handle_sync(handle); if (insert_inode_locked(inode) < 0) { /* * Likely a bitmap corruption causing inode to be allocated * twice. */ err = -EIO; goto fail; } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ext4_set_inode_state(inode, EXT4_STATE_NEW); ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; ret = inode; dquot_initialize(inode); err = dquot_alloc_inode(inode); if (err) goto fail_drop; err = ext4_init_acl(handle, inode, dir); if (err) goto fail_free_drop; err = ext4_init_security(handle, inode, dir, qstr); if (err) goto fail_free_drop; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { /* set extent flag only for directory, file and normal symlink*/ if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); ext4_ext_tree_init(handle, inode); } } if (ext4_handle_valid(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; ei->i_datasync_tid = handle->h_transaction->t_tid; } err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_std_error(sb, err); goto fail_free_drop; } ext4_debug("allocating inode %lu\n", inode->i_ino); trace_ext4_allocate_inode(inode, dir, mode); goto really_out; fail: ext4_std_error(sb, err); out: iput(inode); ret = ERR_PTR(err); really_out: brelse(inode_bitmap_bh); return ret; fail_free_drop: dquot_free_inode(inode); fail_drop: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; clear_nlink(inode); unlock_new_inode(inode); iput(inode); brelse(inode_bitmap_bh); return ERR_PTR(err); } /* Verify that we are loading a valid orphan from disk */ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) { unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); ext4_group_t block_group; int bit; struct buffer_head *bitmap_bh; struct inode *inode = NULL; long err = -EIO; /* Error cases - e2fsck has already cleaned up for us */ if (ino > max_ino) { ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); goto error; } block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); if (!bitmap_bh) { ext4_warning(sb, "inode bitmap error for orphan %lu", ino); goto error; } /* Having the inode bit set should be a 100% indicator that this * is a valid orphan (no e2fsck run on fs). Orphans also include * inodes that were being truncated, so we can't check i_nlink==0. */ if (!ext4_test_bit(bit, bitmap_bh->b_data)) goto bad_orphan; inode = ext4_iget(sb, ino); if (IS_ERR(inode)) goto iget_failed; /* * If the orphans has i_nlinks > 0 then it should be able to be * truncated, otherwise it won't be removed from the orphan list * during processing and an infinite loop will result. */ if (inode->i_nlink && !ext4_can_truncate(inode)) goto bad_orphan; if (NEXT_ORPHAN(inode) > max_ino) goto bad_orphan; brelse(bitmap_bh); return inode; iget_failed: err = PTR_ERR(inode); inode = NULL; bad_orphan: ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", bit, (unsigned long long)bitmap_bh->b_blocknr, ext4_test_bit(bit, bitmap_bh->b_data)); printk(KERN_NOTICE "inode=%p\n", inode); if (inode) { printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", is_bad_inode(inode)); printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", NEXT_ORPHAN(inode)); printk(KERN_NOTICE "max_ino=%lu\n", max_ino); printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink); /* Avoid freeing blocks if we got a bad deleted inode */ if (inode->i_nlink == 0) inode->i_blocks = 0; iput(inode); } brelse(bitmap_bh); error: return ERR_PTR(err); } unsigned long ext4_count_free_inodes(struct super_block *sb) { unsigned long desc_count; struct ext4_group_desc *gdp; ext4_group_t i, ngroups = ext4_get_groups_count(sb); #ifdef EXT4FS_DEBUG struct ext4_super_block *es; unsigned long bitmap_count, x; struct buffer_head *bitmap_bh = NULL; es = EXT4_SB(sb)->s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; desc_count += ext4_free_inodes_count(sb, gdp); brelse(bitmap_bh); bitmap_bh = ext4_read_inode_bitmap(sb, i); if (!bitmap_bh) continue; x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", (unsigned long) i, ext4_free_inodes_count(sb, gdp), x); bitmap_count += x; } brelse(bitmap_bh); printk(KERN_DEBUG "ext4_count_free_inodes: " "stored = %u, computed = %lu, %lu\n", le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); return desc_count; #else desc_count = 0; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; desc_count += ext4_free_inodes_count(sb, gdp); cond_resched(); } return desc_count; #endif } /* Called at mount-time, super-block is locked */ unsigned long ext4_count_dirs(struct super_block * sb) { unsigned long count = 0; ext4_group_t i, ngroups = ext4_get_groups_count(sb); for (i = 0; i < ngroups; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; count += ext4_used_dirs_count(sb, gdp); } return count; } /* * Zeroes not yet zeroed inode table - just write zeroes through the whole * inode table. Must be called without any spinlock held. The only place * where it is called from on active part of filesystem is ext4lazyinit * thread, so we do not need any special locks, however we have to prevent * inode allocation from the current group, so we take alloc_sem lock, to * block ext4_new_inode() until we are finished. */ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, int barrier) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; struct buffer_head *group_desc_bh; handle_t *handle; ext4_fsblk_t blk; int num, ret = 0, used_blks = 0; /* This should not happen, but just to be sure check this */ if (sb->s_flags & MS_RDONLY) { ret = 1; goto out; } gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) goto out; /* * We do not need to lock this, because we are the only one * handling this flag. */ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) goto out; handle = ext4_journal_start_sb(sb, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } down_write(&grp->alloc_sem); /* * If inode bitmap was already initialized there may be some * used inodes so we need to skip blocks with used inodes in * inode table. */ /* yanzhijun for remount system */ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) used_blks = (EXT4_INODES_PER_GROUP(sb) - ext4_itable_unused_count(sb, gdp))/sbi->s_inodes_per_block; if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { ext4_error(sb, "Something is wrong with group %u: " "used itable blocks: %d; " "itable unused count: %u", group, used_blks, ext4_itable_unused_count(sb, gdp)); ret = 1; goto err_out; } blk = ext4_inode_table(sb, gdp) + used_blks; num = sbi->s_itb_per_group - used_blks; BUFFER_TRACE(group_desc_bh, "get_write_access"); ret = ext4_journal_get_write_access(handle, group_desc_bh); if (ret) goto err_out; /* * Skip zeroout if the inode table is full. But we set the ZEROED * flag anyway, because obviously, when it is full it does not need * further zeroing. */ if (unlikely(num == 0)) goto skip_zeroout; ext4_debug("going to zero out inode table in group %d\n", group); ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS); if (ret < 0) goto err_out; if (barrier) blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL); skip_zeroout: ext4_lock_group(sb, group); gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ext4_unlock_group(sb, group); BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); ret = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); err_out: up_write(&grp->alloc_sem); ext4_journal_stop(handle); out: return ret; }
gpl-2.0
leon196/Lines
src/bower_components/pixi.js/examples/example 8 - Dragging/index.html
3218
<!DOCTYPE HTML> <html> <head> <title>pixi.js example 8 Dragging</title> <style> body { margin: 0; padding: 0; background-color: #FFFFFF; } </style> <script src="../../bin/pixi.dev.js"></script> </head> <body> <script> // create an new instance of a pixi stage var stage = new PIXI.Stage(0x97C56E); // make it interactive stage.interactive = true; // create a renderer instance var renderer = PIXI.autoDetectRenderer(window.innerWidth, window.innerHeight, null); // add the renderer view element to the DOM document.body.appendChild(renderer.view); renderer.view.style.position = "absolute"; renderer.view.style.top = "0px"; renderer.view.style.left = "0px"; requestAnimFrame( animate ); // create a texture from an image path var texture = PIXI.Texture.fromImage("bunny.png"); for (var i = 0; i < 10; i++) { createBunny(Math.random() * window.innerWidth, Math.random() * window.innerHeight) }; function createBunny(x, y) { // create our little bunny friend.. var bunny = new PIXI.Sprite(texture); // enable the bunny to be interactive.. this will allow it to respond to mouse and touch events bunny.interactive = true; // this button mode will mean the hand cursor appears when you rollover the bunny with your mouse bunny.buttonMode = true; // center the bunnys anchor point bunny.anchor.x = 0.5; bunny.anchor.y = 0.5; // make it a bit bigger, so its easier to touch bunny.scale.x = bunny.scale.y = 3; // use the mousedown and touchstart bunny.mousedown = bunny.touchstart = function(data) { // stop the default event... data.originalEvent.preventDefault(); // store a reference to the data // The reason for this is because of multitouch // we want to track the movement of this particular touch this.data = data; this.alpha = 0.9; this.dragging = true; }; // set the events for when the mouse is released or a touch is released bunny.mouseup = bunny.mouseupoutside = bunny.touchend = bunny.touchendoutside = function(data) { this.alpha = 1 this.dragging = false; // set the interaction data to null this.data = null; }; // set the callbacks for when the mouse or a touch moves bunny.mousemove = bunny.touchmove = function(data) { if(this.dragging) { var newPosition = this.data.getLocalPosition(this.parent); this.position.x = newPosition.x; this.position.y = newPosition.y; } } // move the sprite to its designated position bunny.position.x = x; bunny.position.y = y; // add it to the stage stage.addChild(bunny); } function animate() { requestAnimFrame(animate); // render the stage renderer.render(stage); } </script> </body> </html>
gpl-2.0
codeaurora-unoffical/linux-msm
drivers/gpu/drm/i915/Makefile
5069
# SPDX-License-Identifier: GPL-2.0 # # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Add a set of useful warning flags and enable -Werror for CI to prevent # trivial mistakes from creeping in. We have to do this piecemeal as we reject # any patch that isn't warning clean, so turning on -Wall -Wextra (or W=1) we # need to filter out dubious warnings. Still it is our interest # to keep running locally with W=1 C=1 until we are completely clean. # # Note the danger in using -Wall -Wextra is that when CI updates gcc we # will most likely get a sudden build breakage... Hopefully we will fix # new warnings before CI updates! subdir-ccflags-y := -Wall -Wextra subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) subdir-ccflags-y += $(call cc-disable-warning, type-limits) subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) # clang warnings subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # Fine grained warnings disable CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init) subdir-ccflags-y += \ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) # Please keep these build lists sorted! # core driver code i915-y := i915_drv.o \ i915_irq.o \ i915_memcpy.o \ i915_mm.o \ i915_params.o \ i915_pci.o \ i915_suspend.o \ i915_syncmap.o \ i915_sw_fence.o \ i915_sysfs.o \ intel_csr.o \ intel_device_info.o \ intel_pm.o \ intel_runtime_pm.o \ intel_workarounds.o i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o # GEM code i915-y += i915_cmd_parser.o \ i915_gem_batch_pool.o \ i915_gem_clflush.o \ i915_gem_context.o \ i915_gem_dmabuf.o \ i915_gem_evict.o \ i915_gem_execbuffer.o \ i915_gem_fence_reg.o \ i915_gem_gtt.o \ i915_gem_internal.o \ i915_gem.o \ i915_gem_object.o \ i915_gem_render_state.o \ i915_gem_shrinker.o \ i915_gem_stolen.o \ i915_gem_tiling.o \ i915_gem_userptr.o \ i915_gemfs.o \ i915_query.o \ i915_request.o \ i915_timeline.o \ i915_trace_points.o \ i915_vma.o \ intel_breadcrumbs.o \ intel_engine_cs.o \ intel_hangcheck.o \ intel_lrc.o \ intel_mocs.o \ intel_ringbuffer.o \ intel_uncore.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support i915-y += intel_uc.o \ intel_uc_fw.o \ intel_guc.o \ intel_guc_ads.o \ intel_guc_ct.o \ intel_guc_fw.o \ intel_guc_log.o \ intel_guc_submission.o \ intel_huc.o \ intel_huc_fw.o # autogenerated null render state i915-y += intel_renderstate_gen6.o \ intel_renderstate_gen7.o \ intel_renderstate_gen8.o \ intel_renderstate_gen9.o # modesetting core code i915-y += intel_audio.o \ intel_atomic.o \ intel_atomic_plane.o \ intel_bios.o \ intel_cdclk.o \ intel_color.o \ intel_display.o \ intel_dpio_phy.o \ intel_dpll_mgr.o \ intel_fbc.o \ intel_fifo_underrun.o \ intel_frontbuffer.o \ intel_hdcp.o \ intel_hotplug.o \ intel_modes.o \ intel_overlay.o \ intel_psr.o \ intel_sideband.o \ intel_sprite.o i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o # modesetting output/encoder code i915-y += dvo_ch7017.o \ dvo_ch7xxx.o \ dvo_ivch.o \ dvo_ns2501.o \ dvo_sil164.o \ dvo_tfp410.o \ icl_dsi.o \ intel_crt.o \ intel_ddi.o \ intel_dp_aux_backlight.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ intel_dsi_dcs_backlight.o \ intel_dsi_vbt.o \ intel_dvo.o \ intel_hdmi.o \ intel_i2c.o \ intel_lspcon.o \ intel_lvds.o \ intel_panel.o \ intel_sdvo.o \ intel_tv.o \ vlv_dsi.o \ vlv_dsi_pll.o # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/i915_random.o \ selftests/i915_selftest.o \ selftests/igt_flush_test.o # virtual gpu code i915-y += i915_vgpu.o # perf code i915-y += i915_perf.o \ i915_oa_hsw.o \ i915_oa_bdw.o \ i915_oa_chv.o \ i915_oa_sklgt2.o \ i915_oa_sklgt3.o \ i915_oa_sklgt4.o \ i915_oa_bxt.o \ i915_oa_kblgt2.o \ i915_oa_kblgt3.o \ i915_oa_glk.o \ i915_oa_cflgt2.o \ i915_oa_cflgt3.o \ i915_oa_cnl.o \ i915_oa_icl.o ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o include $(src)/gvt/Makefile endif # LPE Audio for VLV and CHT i915-y += intel_lpe_audio.o obj-$(CONFIG_DRM_I915) += i915.o
gpl-2.0
ysleu/RTL8685
uClinux-dist/lib/libg/gtermcap/termcap.c
17787
/* Work-alike for termcap, plus extra features. Copyright (C) 1985, 1986, 1993, 2002 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Emacs config.h may rename various library functions such as malloc. */ #ifdef HAVE_CONFIG_H #include "config.h" #else /* not HAVE_CONFIG_H */ #ifdef __linux__ #undef STDC_HEADERS #define STDC_HEADERS #define HAVE_UNISTD_H #define HAVE_SYS_IOCTL_H #define HAVE_TERMIOS_H #else #if defined(HAVE_STRING_H) || defined(STDC_HEADERS) #define bcopy(s, d, n) memcpy ((d), (s), (n)) #endif #endif #ifdef STDC_HEADERS #include <stdlib.h> #include <string.h> #else char *getenv (); char *malloc (); char *realloc (); #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef _POSIX_VERSION #include <fcntl.h> #endif #ifdef HAVE_SYS_IOCTL_H #include <stdio.h> #include <sys/ioctl.h> #endif #ifdef HAVE_TERMIOS_H #include <termios.h> #endif #endif /* not HAVE_CONFIG_H */ #ifndef NULL #define NULL (char *) 0 #endif /* BUFSIZE is the initial size allocated for the buffer for reading the termcap file. It is not a limit. Make it large normally for speed. Make it variable when debugging, so can exercise increasing the space dynamically. */ #ifndef BUFSIZE #ifdef DEBUG #define BUFSIZE bufsize int bufsize = 128; #else #define BUFSIZE 2048 #endif #endif #ifdef TIOCGWINSZ #define ADJUST_WIN_EXTENT #endif #ifndef emacs static void memory_out () { write (2, "virtual memory exhausted\n", 25); exit (1); } static char * xmalloc (size) unsigned size; { register char *tem = malloc (size); if (!tem) memory_out (); return tem; } static char * xrealloc (ptr, size) char *ptr; unsigned size; { register char *tem = realloc (ptr, size); if (!tem) memory_out (); return tem; } #endif /* not emacs */ /* Looking up capabilities in the entry already found. */ /* The pointer to the data made by tgetent is left here for tgetnum, tgetflag and tgetstr to find. */ static char *term_entry; static char *tgetst1 (); /* Search entry BP for capability CAP. Return a pointer to the capability (in BP) if found, 0 if not found. */ static char * find_capability (bp, cap) register char *bp, *cap; { if (bp) for (; *bp; bp++) if (bp[0] == ':' && bp[1] == cap[0] && bp[2] == cap[1]) return &bp[4]; return NULL; } int tgetnum (cap) char *cap; { register char *ptr = find_capability (term_entry, cap); if (!ptr || ptr[-1] != '#') return -1; return atoi (ptr); } int tgetflag (cap) char *cap; { register char *ptr = find_capability (term_entry, cap); return ptr && ptr[-1] == ':'; } /* Look up a string-valued capability CAP. If AREA is non-null, it points to a pointer to a block in which to store the string. That pointer is advanced over the space used. If AREA is null, space is allocated with `malloc'. */ char * tgetstr (cap, area) char *cap; char **area; { register char *ptr = find_capability (term_entry, cap); if (!ptr || (ptr[-1] != '=' && ptr[-1] != '~')) return NULL; return tgetst1 (ptr, area); } /* Table, indexed by a character in range 0100 to 0140 with 0100 subtracted, gives meaning of character following \, or a space if no special meaning. Eight characters per line within the string. */ static char esctab[] = " \007\010 \033\014 \ \012 \ \015 \011 \013 \ "; /* PTR points to a string value inside a termcap entry. Copy that value, processing \ and ^ abbreviations, into the block that *AREA points to, or to newly allocated storage if AREA is NULL. Return the address to which we copied the value, or NULL if PTR is NULL. */ static char * tgetst1 (ptr, area) char *ptr; char **area; { register char *p, *r; register int c; register int size; char *ret; register int c1; if (!ptr) return NULL; /* `ret' gets address of where to store the string. */ if (!area) { /* Compute size of block needed (may overestimate). */ p = ptr; while ((c = *p++) && c != ':' && c != '\n') ; ret = (char *) xmalloc (p - ptr + 1); } else ret = *area; /* Copy the string value, stopping at null or colon. Also process ^ and \ abbreviations. */ p = ptr; r = ret; while ((c = *p++) && c != ':' && c != '\n') { if (c == '^') c = *p++ & 037; else if (c == '\\') { c = *p++; if (c >= '0' && c <= '7') { c -= '0'; size = 0; while (++size < 3 && (c1 = *p) >= '0' && c1 <= '7') { c *= 8; c += c1 - '0'; p++; } } else if (c >= 0100 && c < 0200) { c1 = esctab[(c & ~040) - 0100]; if (c1 != ' ') c = c1; } } *r++ = c; } *r = '\0'; /* Update *AREA. */ if (area) *area = r + 1; return ret; } /* Outputting a string with padding. */ #ifdef __linux__ #include <termios.h> speed_t ospeed; #else short ospeed; #endif /* If OSPEED is 0, we use this as the actual baud rate. */ int tputs_baud_rate; char PC; /* Actual baud rate if positive; - baud rate / 100 if negative. */ static short speeds[] = { #ifdef VMS 0, 50, 75, 110, 134, 150, -3, -6, -12, -18, -20, -24, -36, -48, -72, -96, -192 #else /* not VMS */ 0, 50, 75, 110, 135, 150, -2, -3, -6, -12, -18, -24, -48, -96, -192, -384 #endif /* not VMS */ }; void tputs (str, nlines, outfun) register char *str; int nlines; register int (*outfun) (); { register int padcount = 0; register int speed; #ifdef emacs extern baud_rate; speed = baud_rate; #else if (ospeed == 0) speed = tputs_baud_rate; else speed = speeds[ospeed]; #endif if (!str) return; while (*str >= '0' && *str <= '9') { padcount += *str++ - '0'; padcount *= 10; } if (*str == '.') { str++; padcount += *str++ - '0'; } if (*str == '*') { str++; padcount *= nlines; } while (*str) (*outfun) (*str++); /* padcount is now in units of tenths of msec. */ padcount *= speeds[ospeed]; padcount += 500; padcount /= 1000; if (speeds[ospeed] < 0) padcount = -padcount; else { padcount += 50; padcount /= 100; } while (padcount-- > 0) (*outfun) (PC); } /* Finding the termcap entry in the termcap data base. */ struct buffer { char *beg; int size; char *ptr; int ateof; int full; }; /* Forward declarations of static functions. */ static int scan_file (); static char *gobble_line (); static int compare_contin (); static int name_match (); #ifdef ADJUST_WIN_EXTENT #ifdef TIOCGWINSZ static int get_win_extent(li, co) int *li, *co; { struct winsize ws; /* Some TIOCGWINSZ may be broken. Make sure ws.ws_row and * ws.ws_col are not zero. */ if (ioctl(0, TIOCGWINSZ, &ws) != 0 || !ws.ws_row || !ws.ws_col) return -1; *li = ws.ws_row; *co = ws.ws_col; return 0; } #endif /* TIOCGWINSZ */ static int adjust_win_extent(bpp, howalloc, li, co) char **bpp; int howalloc; /* 0 must do in place, 1 must use malloc, 2 must use realloc */ int li, co; { int licolen, o_len, t, colon; char *licobuf, *s; if (li < 0 || co < 0) return 0; for (s = *bpp, colon = -1; *s; ++s) if (*s == ':' && colon < 0) colon = s - *bpp; o_len = s - *bpp; licolen = 11; for (t = li; (t /= 10) > 0; ++licolen); for (t = co; (t /= 10) > 0; ++licolen); licobuf = xmalloc(licolen + 1); sprintf(licobuf, ":li#%d:co#%d:", li, co); if (howalloc == 0) { bcopy(*bpp + colon, *bpp + colon + licolen, o_len - colon + 1); bcopy(licobuf, *bpp + colon, licolen); } else if (howalloc == 1) { char *newbp; newbp = xmalloc(o_len + licolen + 1); bcopy(*bpp, newbp, colon); bcopy(licobuf, newbp + colon, licolen); strcpy(newbp + colon + licolen, *bpp + colon); *bpp = newbp; } else /* (howalloc == 2) */ { char *newbp; newbp = xrealloc(*bpp, o_len + licolen + 1); bcopy(newbp + colon, newbp + colon + licolen, o_len - colon + 1); bcopy(licobuf, newbp + colon, licolen); *bpp = newbp; } free(licobuf); return 1; } #endif /* ADJUST_WIN_EXTENT */ #ifdef VMS #include <rmsdef.h> #include <fab.h> #include <nam.h> static int valid_filename_p (fn) char *fn; { struct FAB fab = cc$rms_fab; struct NAM nam = cc$rms_nam; char esa[NAM$C_MAXRSS]; fab.fab$l_fna = fn; fab.fab$b_fns = strlen(fn); fab.fab$l_nam = &nam; fab.fab$l_fop = FAB$M_NAM; nam.nam$l_esa = esa; nam.nam$b_ess = sizeof esa; return SYS$PARSE(&fab, 0, 0) == RMS$_NORMAL; } #else /* !VMS */ #define valid_filename_p(fn) (*(fn) == '/') #endif /* !VMS */ /* Find the termcap entry data for terminal type NAME and store it in the block that BP points to. Record its address for future use. If BP is null, space is dynamically allocated. Return -1 if there is some difficulty accessing the data base of terminal types, 0 if the data base is accessible but the type NAME is not defined in it, and some other value otherwise. */ int tgetent (bp, name) char *bp, *name; { register char *termcap_name; register int fd; struct buffer buf; register char *bp1; char *bp2; char *term; int malloc_size = 0; register int c; char *tcenv; /* TERMCAP value, if it contains :tc=. */ char *indirect = NULL; /* Terminal type in :tc= in TERMCAP value. */ int filep; #ifdef ADJUST_WIN_EXTENT int li, co; /* #lines and columns on this tty */ if (get_win_extent(&li, &co) != 0) li = co = -1; #endif /* ADJUST_WIN_EXTENT */ termcap_name = getenv ("TERMCAP"); if (termcap_name && *termcap_name == '\0') termcap_name = NULL; filep = termcap_name && valid_filename_p (termcap_name); /* If termcap_name is non-null and starts with / (in the un*x case, that is), it is a file name to use instead of /etc/termcap. If it is non-null and does not start with /, it is the entry itself, but only if the name the caller requested matches the TERM variable. */ if (termcap_name && !filep && !strcmp (name, getenv ("TERM"))) { indirect = tgetst1 (find_capability (termcap_name, "tc"), (char **) 0); if (!indirect) { if (!bp) { bp = termcap_name; #ifdef ADJUST_WIN_EXTENT if (adjust_win_extent(&bp, 1, li, co)) malloc_size = 1; /* force return of bp */ #endif /* ADJUST_WIN_EXTENT */ } else { strcpy (bp, termcap_name); #ifdef ADJUST_WIN_EXTENT adjust_win_extent(&bp, 0, li, co); #endif /* ADJUST_WIN_EXTENT */ } goto ret; } else { /* It has tc=. Need to read /etc/termcap. */ tcenv = termcap_name; termcap_name = NULL; } } if (!termcap_name || !filep) #ifdef VMS termcap_name = "emacs_library:[etc]termcap.dat"; #else termcap_name = "/etc/termcap"; #endif /* Here we know we must search a file and termcap_name has its name. */ fd = open (termcap_name, 0, 0); if (fd < 0) return -1; buf.size = BUFSIZE; /* Add 1 to size to ensure room for terminating null. */ buf.beg = (char *) xmalloc (buf.size + 1); term = indirect ? indirect : name; if (!bp) { malloc_size = indirect ? strlen (tcenv) + 1 : buf.size; bp = (char *) xmalloc (malloc_size); } bp1 = bp; if (indirect) /* Copy the data from the environment variable. */ { strcpy (bp, tcenv); bp1 += strlen (tcenv); } while (term) { /* Scan the file, reading it via buf, till find start of main entry. */ if (scan_file (term, fd, &buf) == 0) { close (fd); free (buf.beg); if (malloc_size) free (bp); return 0; } /* Free old `term' if appropriate. */ if (term != name) free (term); /* If BP is malloc'd by us, make sure it is big enough. */ if (malloc_size) { malloc_size = bp1 - bp + buf.size; termcap_name = (char *) xrealloc (bp, malloc_size); bp1 += termcap_name - bp; bp = termcap_name; } bp2 = bp1; /* Copy the line of the entry from buf into bp. */ termcap_name = buf.ptr; while ((*bp1++ = c = *termcap_name++) && c != '\n') /* Drop out any \ newline sequence. */ if (c == '\\' && *termcap_name == '\n') { bp1--; termcap_name++; } *bp1 = '\0'; /* Does this entry refer to another terminal type's entry? If something is found, copy it into heap and null-terminate it. */ term = tgetst1 (find_capability (bp2, "tc"), (char **) 0); } close (fd); free (buf.beg); if (malloc_size) bp = (char *) xrealloc (bp, bp1 - bp + 1); #ifdef ADJUST_WIN_EXTENT adjust_win_extent(&bp, malloc_size ? 2 : 0, li, co); #endif /* ADJUST_WIN_EXTENT */ ret: term_entry = bp; if (malloc_size) return (int) bp; return 1; } /* Given file open on FD and buffer BUFP, scan the file from the beginning until a line is found that starts the entry for terminal type STR. Return 1 if successful, with that line in BUFP, or 0 if no entry is found in the file. */ static int scan_file (str, fd, bufp) char *str; int fd; register struct buffer *bufp; { register char *end; bufp->ptr = bufp->beg; bufp->full = 0; bufp->ateof = 0; *bufp->ptr = '\0'; lseek (fd, 0L, 0); while (!bufp->ateof) { /* Read a line into the buffer. */ end = NULL; do { /* if it is continued, append another line to it, until a non-continued line ends. */ end = gobble_line (fd, bufp, end); } while (!bufp->ateof && end[-2] == '\\'); if (*bufp->ptr != '#' && name_match (bufp->ptr, str)) return 1; /* Discard the line just processed. */ bufp->ptr = end; } return 0; } /* Return nonzero if NAME is one of the names specified by termcap entry LINE. */ static int name_match (line, name) char *line, *name; { register char *tem; if (!compare_contin (line, name)) return 1; /* This line starts an entry. Is it the right one? */ for (tem = line; *tem && *tem != '\n' && *tem != ':'; tem++) if (*tem == '|' && !compare_contin (tem + 1, name)) return 1; return 0; } static int compare_contin (str1, str2) register char *str1, *str2; { register int c1, c2; while (1) { c1 = *str1++; c2 = *str2++; while (c1 == '\\' && *str1 == '\n') { str1++; while ((c1 = *str1++) == ' ' || c1 == '\t'); } if (c2 == '\0') { /* End of type being looked up. */ if (c1 == '|' || c1 == ':') /* If end of name in data base, we win. */ return 0; else return 1; } else if (c1 != c2) return 1; } } /* Make sure that the buffer <- BUFP contains a full line of the file open on FD, starting at the place BUFP->ptr points to. Can read more of the file, discard stuff before BUFP->ptr, or make the buffer bigger. Return the pointer to after the newline ending the line, or to the end of the file, if there is no newline to end it. Can also merge on continuation lines. If APPEND_END is non-null, it points past the newline of a line that is continued; we add another line onto it and regard the whole thing as one line. The caller decides when a line is continued. */ static char * gobble_line (fd, bufp, append_end) int fd; register struct buffer *bufp; char *append_end; { register char *end; register int nread; register char *buf = bufp->beg; register char *tem; if (!append_end) append_end = bufp->ptr; while (1) { end = append_end; while (*end && *end != '\n') end++; if (*end) break; if (bufp->ateof) return buf + bufp->full; if (bufp->ptr == buf) { if (bufp->full == bufp->size) { bufp->size *= 2; /* Add 1 to size to ensure room for terminating null. */ tem = (char *) xrealloc (buf, bufp->size + 1); bufp->ptr = (bufp->ptr - buf) + tem; append_end = (append_end - buf) + tem; bufp->beg = buf = tem; } } else { append_end -= bufp->ptr - buf; bcopy (bufp->ptr, buf, bufp->full -= bufp->ptr - buf); bufp->ptr = buf; } if (!(nread = read (fd, buf + bufp->full, bufp->size - bufp->full))) bufp->ateof = 1; bufp->full += nread; buf[bufp->full] = '\0'; } return end + 1; } #ifdef TEST #ifdef NULL #undef NULL #endif #include <stdio.h> main (argc, argv) int argc; char **argv; { char *term; char *buf; term = argv[1]; printf ("TERM: %s\n", term); buf = (char *) tgetent (0, term); if ((int) buf <= 0) { printf ("No entry.\n"); return 0; } printf ("Entry: %s\n", buf); tprint ("cm"); tprint ("AL"); printf ("co: %d\n", tgetnum ("co")); printf ("am: %d\n", tgetflag ("am")); } tprint (cap) char *cap; { char *x = tgetstr (cap, 0); register char *y; printf ("%s: ", cap); if (x) { for (y = x; *y; y++) if (*y <= ' ' || *y == 0177) printf ("\\%0o", *y); else putchar (*y); free (x); } else printf ("none"); putchar ('\n'); } #endif /* TEST */
gpl-2.0
totalspectrum/binutils-propeller
gold/testsuite/common_test_3.c
1137
/* common_test_3.c -- test common symbol name conflicts Copyright (C) 2009-2017 Free Software Foundation, Inc. Written by Ian Lance Taylor <[email protected]> This file is part of gold. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Define a function with a default version whose name is the same as a common symbol. This file will wind up in a shared library. */ void c1_v1 (void); void c1_v1 (void) { } __asm__ (".symver c1_v1,c1@@VER1");
gpl-2.0
foxsat-hdr/linux-kernel
drivers/s390/net/qeth_proc.c
13354
/* * * linux/drivers/s390/net/qeth_fs.c ($Revision: 1.13 $) * * Linux on zSeries OSA Express and HiperSockets support * This file contains code related to procfs. * * Copyright 2000,2003 IBM Corporation * * Author(s): Thomas Spatzier <[email protected]> * */ #include <linux/module.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/list.h> #include <linux/rwsem.h> #include "qeth.h" #include "qeth_mpc.h" #include "qeth_fs.h" const char *VERSION_QETH_PROC_C = "$Revision: 1.13 $"; /***** /proc/qeth *****/ #define QETH_PROCFILE_NAME "qeth" static struct proc_dir_entry *qeth_procfile; static void * qeth_procfile_seq_start(struct seq_file *s, loff_t *offset) { struct list_head *next_card = NULL; int i = 0; down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); if (*offset == 0) return SEQ_START_TOKEN; /* get card at pos *offset */ list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices) if (++i == *offset) return next_card; return NULL; } static void qeth_procfile_seq_stop(struct seq_file *s, void* it) { up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); } static void * qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct list_head *next_card = NULL; struct list_head *current_card; if (it == SEQ_START_TOKEN) { next_card = qeth_ccwgroup_driver.driver.devices.next; if (next_card->next == next_card) /* list empty */ return NULL; (*offset)++; } else { current_card = (struct list_head *)it; if (current_card->next == &qeth_ccwgroup_driver.driver.devices) return NULL; /* end of list reached */ next_card = current_card->next; (*offset)++; } return next_card; } static inline const char * qeth_get_router_str(struct qeth_card *card, int ipv) { int routing_type = 0; if (ipv == 4){ routing_type = card->options.route4.type; } else { #ifdef CONFIG_QETH_IPV6 routing_type = card->options.route6.type; #else return "n/a"; #endif /* CONFIG_QETH_IPV6 */ } if (routing_type == PRIMARY_ROUTER) return "pri"; else if (routing_type == SECONDARY_ROUTER) return "sec"; else if (routing_type == MULTICAST_ROUTER) { if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) return "mc+"; return "mc"; } else if (routing_type == PRIMARY_CONNECTOR) { if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) return "p+c"; return "p.c"; } else if (routing_type == SECONDARY_CONNECTOR) { if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) return "s+c"; return "s.c"; } else if (routing_type == NO_ROUTER) return "no"; else return "unk"; } static int qeth_procfile_seq_show(struct seq_file *s, void *it) { struct device *device; struct qeth_card *card; char tmp[12]; /* for qeth_get_prioq_str */ if (it == SEQ_START_TOKEN){ seq_printf(s, "devices CHPID interface " "cardtype port chksum prio-q'ing rtr4 " "rtr6 fsz cnt\n"); seq_printf(s, "-------------------------- ----- ---------- " "-------------- ---- ------ ---------- ---- " "---- ----- -----\n"); } else { device = list_entry(it, struct device, driver_list); card = device->driver_data; seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ", CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card), card->info.chpid, QETH_CARD_IFNAME(card), qeth_get_cardname_short(card), card->info.portno); if (card->lan_online) seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n", qeth_get_checksum_str(card), qeth_get_prioq_str(card, tmp), qeth_get_router_str(card, 4), qeth_get_router_str(card, 6), qeth_get_bufsize_str(card), card->qdio.in_buf_pool.buf_count); else seq_printf(s, " +++ LAN OFFLINE +++\n"); } return 0; } static struct seq_operations qeth_procfile_seq_ops = { .start = qeth_procfile_seq_start, .stop = qeth_procfile_seq_stop, .next = qeth_procfile_seq_next, .show = qeth_procfile_seq_show, }; static int qeth_procfile_open(struct inode *inode, struct file *file) { return seq_open(file, &qeth_procfile_seq_ops); } static struct file_operations qeth_procfile_fops = { .owner = THIS_MODULE, .open = qeth_procfile_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /***** /proc/qeth_perf *****/ #define QETH_PERF_PROCFILE_NAME "qeth_perf" static struct proc_dir_entry *qeth_perf_procfile; #ifdef CONFIG_QETH_PERF_STATS static void * qeth_perf_procfile_seq_start(struct seq_file *s, loff_t *offset) { struct list_head *next_card = NULL; int i = 0; down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); /* get card at pos *offset */ list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){ if (i == *offset) return next_card; i++; } return NULL; } static void qeth_perf_procfile_seq_stop(struct seq_file *s, void* it) { up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); } static void * qeth_perf_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct list_head *current_card = (struct list_head *)it; if (current_card->next == &qeth_ccwgroup_driver.driver.devices) return NULL; /* end of list reached */ (*offset)++; return current_card->next; } static int qeth_perf_procfile_seq_show(struct seq_file *s, void *it) { struct device *device; struct qeth_card *card; device = list_entry(it, struct device, driver_list); card = device->driver_data; seq_printf(s, "For card with devnos %s/%s/%s (%s):\n", CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card), QETH_CARD_IFNAME(card) ); seq_printf(s, " Skb's/buffers received : %li/%i\n" " Skb's/buffers sent : %li/%i\n\n", card->stats.rx_packets, card->perf_stats.bufs_rec, card->stats.tx_packets, card->perf_stats.bufs_sent ); seq_printf(s, " Skb's/buffers sent without packing : %li/%i\n" " Skb's/buffers sent with packing : %i/%i\n\n", card->stats.tx_packets - card->perf_stats.skbs_sent_pack, card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack, card->perf_stats.skbs_sent_pack, card->perf_stats.bufs_sent_pack ); seq_printf(s, " Skbs sent in SG mode : %i\n" " Skb fragments sent in SG mode : %i\n\n", card->perf_stats.sg_skbs_sent, card->perf_stats.sg_frags_sent); seq_printf(s, " large_send tx (in Kbytes) : %i\n" " large_send count : %i\n\n", card->perf_stats.large_send_bytes >> 10, card->perf_stats.large_send_cnt); seq_printf(s, " Packing state changes no pkg.->packing : %i/%i\n" " Watermarks L/H : %i/%i\n" " Current buffer usage (outbound q's) : " "%i/%i/%i/%i\n\n", card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp, QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK, atomic_read(&card->qdio.out_qs[0]->used_buffers), (card->qdio.no_out_queues > 1)? atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0, (card->qdio.no_out_queues > 2)? atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0, (card->qdio.no_out_queues > 3)? atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0 ); seq_printf(s, " Inbound handler time (in us) : %i\n" " Inbound handler count : %i\n" " Inbound do_QDIO time (in us) : %i\n" " Inbound do_QDIO count : %i\n\n" " Outbound handler time (in us) : %i\n" " Outbound handler count : %i\n\n" " Outbound time (in us, incl QDIO) : %i\n" " Outbound count : %i\n" " Outbound do_QDIO time (in us) : %i\n" " Outbound do_QDIO count : %i\n\n", card->perf_stats.inbound_time, card->perf_stats.inbound_cnt, card->perf_stats.inbound_do_qdio_time, card->perf_stats.inbound_do_qdio_cnt, card->perf_stats.outbound_handler_time, card->perf_stats.outbound_handler_cnt, card->perf_stats.outbound_time, card->perf_stats.outbound_cnt, card->perf_stats.outbound_do_qdio_time, card->perf_stats.outbound_do_qdio_cnt ); return 0; } static struct seq_operations qeth_perf_procfile_seq_ops = { .start = qeth_perf_procfile_seq_start, .stop = qeth_perf_procfile_seq_stop, .next = qeth_perf_procfile_seq_next, .show = qeth_perf_procfile_seq_show, }; static int qeth_perf_procfile_open(struct inode *inode, struct file *file) { return seq_open(file, &qeth_perf_procfile_seq_ops); } static struct file_operations qeth_perf_procfile_fops = { .owner = THIS_MODULE, .open = qeth_perf_procfile_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #define qeth_perf_procfile_created qeth_perf_procfile #else #define qeth_perf_procfile_created 1 #endif /* CONFIG_QETH_PERF_STATS */ /***** /proc/qeth_ipa_takeover *****/ #define QETH_IPATO_PROCFILE_NAME "qeth_ipa_takeover" static struct proc_dir_entry *qeth_ipato_procfile; static void * qeth_ipato_procfile_seq_start(struct seq_file *s, loff_t *offset) { struct list_head *next_card = NULL; int i = 0; down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); /* TODO: finish this */ /* * maybe SEQ_SATRT_TOKEN can be returned for offset 0 * output driver settings then; * else output setting for respective card */ /* get card at pos *offset */ list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){ if (i == *offset) return next_card; i++; } return NULL; } static void qeth_ipato_procfile_seq_stop(struct seq_file *s, void* it) { up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); } static void * qeth_ipato_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct list_head *current_card = (struct list_head *)it; /* TODO: finish this */ /* * maybe SEQ_SATRT_TOKEN can be returned for offset 0 * output driver settings then; * else output setting for respective card */ if (current_card->next == &qeth_ccwgroup_driver.driver.devices) return NULL; /* end of list reached */ (*offset)++; return current_card->next; } static int qeth_ipato_procfile_seq_show(struct seq_file *s, void *it) { struct device *device; struct qeth_card *card; /* TODO: finish this */ /* * maybe SEQ_SATRT_TOKEN can be returned for offset 0 * output driver settings then; * else output setting for respective card */ device = list_entry(it, struct device, driver_list); card = device->driver_data; return 0; } static struct seq_operations qeth_ipato_procfile_seq_ops = { .start = qeth_ipato_procfile_seq_start, .stop = qeth_ipato_procfile_seq_stop, .next = qeth_ipato_procfile_seq_next, .show = qeth_ipato_procfile_seq_show, }; static int qeth_ipato_procfile_open(struct inode *inode, struct file *file) { return seq_open(file, &qeth_ipato_procfile_seq_ops); } static struct file_operations qeth_ipato_procfile_fops = { .owner = THIS_MODULE, .open = qeth_ipato_procfile_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int __init qeth_create_procfs_entries(void) { qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME, S_IFREG | 0444, NULL); if (qeth_procfile) qeth_procfile->proc_fops = &qeth_procfile_fops; #ifdef CONFIG_QETH_PERF_STATS qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME, S_IFREG | 0444, NULL); if (qeth_perf_procfile) qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops; #endif /* CONFIG_QETH_PERF_STATS */ qeth_ipato_procfile = create_proc_entry(QETH_IPATO_PROCFILE_NAME, S_IFREG | 0444, NULL); if (qeth_ipato_procfile) qeth_ipato_procfile->proc_fops = &qeth_ipato_procfile_fops; if (qeth_procfile && qeth_ipato_procfile && qeth_perf_procfile_created) return 0; else return -ENOMEM; } void __exit qeth_remove_procfs_entries(void) { if (qeth_procfile) remove_proc_entry(QETH_PROCFILE_NAME, NULL); if (qeth_perf_procfile) remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL); if (qeth_ipato_procfile) remove_proc_entry(QETH_IPATO_PROCFILE_NAME, NULL); } /* ONLY FOR DEVELOPMENT! -> make it as module */ /* static void qeth_create_sysfs_entries(void) { struct device *dev; down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices, driver_list) qeth_create_device_attributes(dev); up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); } static void qeth_remove_sysfs_entries(void) { struct device *dev; down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices, driver_list) qeth_remove_device_attributes(dev); up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); } static int __init qeth_fs_init(void) { printk(KERN_INFO "qeth_fs_init\n"); qeth_create_procfs_entries(); qeth_create_sysfs_entries(); return 0; } static void __exit qeth_fs_exit(void) { printk(KERN_INFO "qeth_fs_exit\n"); qeth_remove_procfs_entries(); qeth_remove_sysfs_entries(); } module_init(qeth_fs_init); module_exit(qeth_fs_exit); MODULE_LICENSE("GPL"); */
gpl-2.0