content
stringlengths
10
4.9M
#include "../lib/memory.h" Chip8Emu_Memory::Chip8Emu_Memory() { memory.reset(); } Chip8Emu_Memory::~Chip8Emu_Memory() { memory.reset(); } void Chip8Emu_Memory::setBit(uint16_t addr) { memory.set(addr, true); } void Chip8Emu_Memory::setBit(uint16_t addr, bool value) { memory.set(addr, value); } void Chip8Emu_Memory::clearBit(uint16_t addr) { setBit(addr, false); } bool Chip8Emu_Memory::getBit(uint16_t addr) { return (bool) memory[addr]; } void Chip8Emu_Memory::setBits(uint16_t addr1, uint16_t addr2) { setBits(addr1, addr2, true); } void Chip8Emu_Memory::setBits(uint16_t addr1, uint16_t addr2, bool value) { for (int i = addr1, i <= addr2; i++) { setBit(i, value); } } void Chip8Emu_Memory::clearBits(uint16_t addr1, uint16_t addr2) { setBits(addr1, addr2, false); } void Chip8Emu_Memory::setAllBits() { memory.set(); } void Chip8Emu_Memory::setAllBits(bool value) { setBits(0x00, CHIP_8_MEM_SIZE -1, value); } void Chip8Emu_Memory::clearAllBits() { memory.reset(); } // Chip8Emu_Memory::getBits(uint16_t addr1, uint16_t addr2) { // }
// Copyright (c) 2019, the R8 project authors. Please see the AUTHORS file // for details. All rights reserved. Use of this source code is governed by a // BSD-style license that can be found in the LICENSE file. package com.android.tools.r8.desugar.desugaredlibrary.desugaredlibraryjdktests; import static com.android.tools.r8.ToolHelper.JDK_TESTS_BUILD_DIR; import static com.android.tools.r8.utils.FileUtils.CLASS_EXTENSION; import static com.android.tools.r8.utils.FileUtils.JAVA_EXTENSION; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import com.android.tools.r8.D8TestCompileResult; import com.android.tools.r8.D8TestRunResult; import com.android.tools.r8.TestParameters; import com.android.tools.r8.TestRuntime; import com.android.tools.r8.ToolHelper; import com.android.tools.r8.ToolHelper.DexVm.Version; import com.android.tools.r8.utils.AndroidApiLevel; import com.android.tools.r8.utils.BooleanUtils; import com.android.tools.r8.utils.StringUtils; import java.io.File; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; import java.util.Collections; import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; @RunWith(Parameterized.class) public class Jdk11StreamTests extends Jdk11CoreLibTestBase { private final TestParameters parameters; private final boolean shrinkDesugaredLibrary; @Parameters(name = "{1}, shrinkDesugaredLibrary: {0}") public static List<Object[]> data() { // TODO(134732760): Support Dalvik VMs, currently fails because libjavacrypto is required and // present only in ART runtimes. return buildParameters( BooleanUtils.values(), getTestParameters() .withDexRuntimesStartingFromIncluding(Version.V5_1_1) .withAllApiLevels() .build()); } public Jdk11StreamTests(boolean shrinkDesugaredLibrary, TestParameters parameters) { this.shrinkDesugaredLibrary = shrinkDesugaredLibrary; this.parameters = parameters; } private static final Path JDK_11_STREAM_TEST_CLASSES_DIR = Paths.get(ToolHelper.JDK_11_TESTS_CLASSES_DIR + "Stream"); private static final Path JDK_11_STREAM_TEST_FILES_DIR = Paths.get("third_party/openjdk/jdk-11-test/java/util/stream/test"); private static Path[] JDK_11_STREAM_TEST_COMPILED_FILES; private static Path[] getJdk11StreamTestFiles() throws Exception { Path[] files = getAllFilesWithSuffixInDirectory(JDK_11_STREAM_TEST_FILES_DIR, JAVA_EXTENSION); assert files.length > 0; return files; } private static String[] FAILING_RUNNABLE_TESTS = new String[] { // J9 failure. "org/openjdk/tests/java/util/stream/SpliteratorTest.java", "org/openjdk/tests/java/util/stream/WhileOpStatefulTest.java", "org/openjdk/tests/java/util/stream/IterateTest.java", "org/openjdk/tests/java/util/stream/WhileOpTest.java", // forEach failure "org/openjdk/tests/java/util/stream/FindFirstOpTest.java", "org/openjdk/tests/java/util/stream/MapOpTest.java", // Disabled because explicit cast done on a wrapped value. // "org/openjdk/tests/java/util/SplittableRandomTest.java", // Assertion error "org/openjdk/tests/java/util/stream/StreamCloseTest.java", "org/openjdk/tests/java/util/stream/CollectAndSummaryStatisticsTest.java", "org/openjdk/tests/java/util/stream/CountTest.java", // J9 Random problem "org/openjdk/tests/java/util/stream/LongPrimitiveOpsTests.java", "org/openjdk/tests/java/util/stream/IntPrimitiveOpsTests.java", "org/openjdk/tests/java/util/stream/DistinctOpTest.java", "org/openjdk/tests/java/util/stream/DoublePrimitiveOpsTests.java" }; // Disabled because time to run > 1 min for each test. // Can be used for experimentation/testing purposes. private static String[] LONG_RUNNING_TESTS = new String[] { "org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest.java", "org/openjdk/tests/java/util/stream/CountLargeTest.java", "org/openjdk/tests/java/util/stream/RangeTest.java", "org/openjdk/tests/java/util/stream/CollectorsTest.java", "org/openjdk/tests/java/util/stream/FlatMapOpTest.java", "org/openjdk/tests/java/util/stream/StreamSpliteratorTest.java", "org/openjdk/tests/java/util/stream/StreamLinkTest.java", "org/openjdk/tests/java/util/stream/StreamBuilderTest.java", "org/openjdk/tests/java/util/stream/SliceOpTest.java", "org/openjdk/tests/java/util/stream/ToArrayOpTest.java" }; private static String[] SUCCESSFUL_RUNNABLE_TESTS = new String[] { "org/openjdk/tests/java/util/MapTest.java", "org/openjdk/tests/java/util/FillableStringTest.java", "org/openjdk/tests/java/util/stream/ForEachOpTest.java", "org/openjdk/tests/java/util/stream/CollectionAndMapModifyStreamTest.java", "org/openjdk/tests/java/util/stream/GroupByOpTest.java", "org/openjdk/tests/java/util/stream/PrimitiveAverageOpTest.java", "org/openjdk/tests/java/util/stream/TeeOpTest.java", "org/openjdk/tests/java/util/stream/MinMaxTest.java", "org/openjdk/tests/java/util/stream/ConcatTest.java", "org/openjdk/tests/java/util/stream/StreamParSeqTest.java", "org/openjdk/tests/java/util/stream/ReduceByOpTest.java", "org/openjdk/tests/java/util/stream/ConcatOpTest.java", "org/openjdk/tests/java/util/stream/IntReduceTest.java", "org/openjdk/tests/java/util/stream/SortedOpTest.java", "org/openjdk/tests/java/util/stream/MatchOpTest.java", "org/openjdk/tests/java/util/stream/IntSliceOpTest.java", "org/openjdk/tests/java/util/stream/SequentialOpTest.java", "org/openjdk/tests/java/util/stream/PrimitiveSumTest.java", "org/openjdk/tests/java/util/stream/ReduceTest.java", "org/openjdk/tests/java/util/stream/IntUniqOpTest.java", "org/openjdk/tests/java/util/stream/FindAnyOpTest.java" }; private static Map<String, String> getRunnableTests(String[] tests) { IdentityHashMap<String, String> pathToName = new IdentityHashMap<>(); int javaExtSize = JAVA_EXTENSION.length(); for (String runnableTest : tests) { String nameWithoutJavaExt = runnableTest.substring(0, runnableTest.length() - javaExtSize); pathToName.put( JDK_11_STREAM_TEST_CLASSES_DIR + "/" + nameWithoutJavaExt + CLASS_EXTENSION, nameWithoutJavaExt.replace("/", ".")); } return pathToName; } private static String[] missingDesugaredMethods() { // These methods are from Java 9 and not supported in the current desugared libraries. return new String[]{ // Stream "takeWhile(", "dropWhile(", "iterate(", "ofNullable(", "range(", "doubles(", // Collectors "filtering(", "flatMapping(", }; } @BeforeClass public static void compileJdk11StreamTests() throws Exception { File streamClassesDir = new File(JDK_11_STREAM_TEST_CLASSES_DIR.toString()); assert streamClassesDir.exists() || streamClassesDir.mkdirs(); List<String> options = Arrays.asList( "--add-reads", "java.base=ALL-UNNAMED", "--patch-module", "java.base=" + JDK_11_JAVA_BASE_EXTENSION_CLASSES_DIR); javac(TestRuntime.getCheckedInJdk11(), getStaticTemp()) .addOptions(options) .addClasspathFiles( Collections.singletonList(Paths.get(JDK_TESTS_BUILD_DIR + "testng-6.10.jar"))) .addSourceFiles(getJdk11StreamTestFiles()) .setOutputPath(JDK_11_STREAM_TEST_CLASSES_DIR) .compile(); JDK_11_STREAM_TEST_COMPILED_FILES = getAllFilesWithSuffixInDirectory(JDK_11_STREAM_TEST_CLASSES_DIR, CLASS_EXTENSION); assert JDK_11_STREAM_TEST_COMPILED_FILES.length > 0; } @Test public void testStream() throws Exception { Assume.assumeFalse( "getAllFilesWithSuffixInDirectory() seems to find different files on Windows", ToolHelper.isWindows()); Assume.assumeTrue( "Requires Java base extensions, should add it when not desugaring", parameters.getApiLevel().getLevel() < AndroidApiLevel.N.getLevel()); D8TestCompileResult compileResult = compileStreamTestsToDex(); runSuccessfulTests(compileResult); runFailingTests(compileResult); } private D8TestCompileResult compileStreamTestsToDex() throws Exception { KeepRuleConsumer keepRuleConsumer = createKeepRuleConsumer(parameters); List<Path> filesToCompile = Arrays.stream(JDK_11_STREAM_TEST_COMPILED_FILES) .filter(file -> !file.toString().contains("lang/invoke")) .collect(Collectors.toList()); return testForD8() .addProgramFiles(filesToCompile) .addProgramFiles(getPathsFiles()) .addProgramFiles(getSafeVarArgsFile()) .addProgramFiles(testNGSupportProgramFiles()) .addOptionsModification(opt -> opt.testing.trackDesugaredAPIConversions = true) .addLibraryFiles(ToolHelper.getAndroidJar(AndroidApiLevel.P)) .setMinApi(parameters.getApiLevel()) .enableCoreLibraryDesugaring(parameters.getApiLevel(), keepRuleConsumer) .compile() .addDesugaredCoreLibraryRunClassPath( this::buildDesugaredLibraryWithJavaBaseExtension, parameters.getApiLevel(), keepRuleConsumer.get(), shrinkDesugaredLibrary) .withArtFrameworks() .withArt6Plus64BitsLib(); } private void runSuccessfulTests(D8TestCompileResult compileResult) throws Exception { String verbosity = "2"; // Increase verbosity for debugging. Map<String, String> runnableTests = getRunnableTests(SUCCESSFUL_RUNNABLE_TESTS); for (String path : runnableTests.keySet()) { assert runnableTests.get(path) != null; D8TestRunResult result = compileResult.run( parameters.getRuntime(), "TestNGMainRunner", verbosity, runnableTests.get(path)); assertTrue( result .getStdOut() .endsWith( StringUtils.lines("Tests result in " + runnableTests.get(path) + ": SUCCESS"))); } } private void runFailingTests(D8TestCompileResult compileResult) throws Exception { // For failing runnable tests, we just ensure that they do not fail due to desugaring, but // due to an expected failure (missing API, etc.). String verbosity = "2"; // Increase verbosity for debugging. Map<String, String> runnableTests = getRunnableTests(FAILING_RUNNABLE_TESTS); for (String path : runnableTests.keySet()) { assert runnableTests.get(path) != null; D8TestRunResult result = compileResult.run( parameters.getRuntime(), "TestNGMainRunner", verbosity, runnableTests.get(path)); if (result .getStdOut() .endsWith( StringUtils.lines("Tests result in " + runnableTests.get(path) + ": SUCCESS"))) { // The test succeeds, this can happen on tests succeeding only on high API levels. assertTrue( parameters.getRuntime().asDex().getMinApiLevel().getLevel() >= AndroidApiLevel.N.getLevel()); } else if (result.getStdOut().contains("java.lang.NoSuchMethodError") && Arrays.stream(missingDesugaredMethods()) .anyMatch(method -> result.getStdOut().contains(method))) { // TODO(b/134732760): support Java 9 APIs. } else if (result .getStdOut() .contains("java.lang.NoSuchMethodError: No interface method forEach")) { // TODO(b/134732760): fix tests no to use Iterable#forEach } else if (result.getStdOut().contains("in class Ljava/util/Random") && result.getStdOut().contains("java.lang.NoSuchMethodError")) { // TODO(b/134732760): Random Java 9 Apis, support or do not use them. } else if (result.getStdOut().contains("java.lang.AssertionError")) { // TODO(b/134732760): Investigate and fix these issues. } else if (result.getStdErr().contains("$r8$wrapper$")) { // Use of high library API on low API device, cannot do anything about this. assertTrue( parameters.getRuntime().asDex().getMinApiLevel().getLevel() < AndroidApiLevel.N.getLevel()); } else { String errorMessage = "STDOUT:\n" + result.getStdOut() + "STDERR:\n" + result.getStdErr(); fail(errorMessage); } } } }
<reponame>TheDiveO/lxkns<filename>discover/usernames_test.go // Copyright 2020 <NAME>. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not // use this file except in compliance with the License. You may obtain a copy // of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package discover import ( "os" "os/user" "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/thediveo/lxkns/model" "github.com/thediveo/lxkns/nstest" "github.com/thediveo/testbasher" ) var _ = Describe("maps UIDs", func() { It("returns same information as library queries", func() { myuid := os.Getuid() u, err := user.LookupId(strconv.FormatUint(uint64(myuid), 10)) Expect(err).To(Succeed()) Expect(u).NotTo(BeNil()) myusername := u.Username u, err = user.LookupId("0") Expect(err).To(Succeed()) Expect(u).NotTo(BeNil()) rootname := u.Username unames := userNamesFromPasswd(etcpasswd) Expect(unames).To(HaveKeyWithValue(uint32(0), rootname)) Expect(unames).To(HaveKeyWithValue(uint32(myuid), myusername)) }) It("switches into initial namespace and reads user names", func() { // This test is unusual, as we can carry it out only when we're inside // a separate mount namespace, so we can't immediately see the users // on the host system itself. We need some checks to ensure that we're // going to test things in the correct setup. if os.Geteuid() != 0 { Skip("needs root") } allns := Namespaces(WithStandardDiscovery()) if _, ok := allns.Processes[1]; !ok { Skip("needs root capabilities and PID=host") } mymntns := allns.Processes[1].Namespaces[model.MountNS] initialmntns := allns.Processes[model.PIDType(os.Getpid())].Namespaces[model.MountNS] if mymntns == initialmntns { Skip("needs container with different mount namespace") } if initialmntns == nil { Skip("needs PID=host") } // In order to check the data we want to discover, we need an // independent second view. Now, that's a job for safety, not for // reliability. scripts := testbasher.Basher{} scripts.Common(nstest.NamespaceUtilsScript) // Remember: we're here now in a container with root privileges. And // this needs awk in the host. And then there are probably differences // between nsenter made by Alpine(hmpf) and nsenter on the host system // in terms of their CLI flags, so we need to detect the CLI flag // variant to use... scripts.Script("main", ` ENTERMNT=$(nsenter -h 2>&1 | grep -q -e "--mnt" && echo "--mnt" || echo "-m") nsenter -t 1 ${ENTERMNT} -- /usr/bin/awk -F: 'BEGIN{printf "{"}{printf "\"%s\":%s,",$1,$3}END{printf "\"guardian-fooobar\":666}\n"}' /etc/passwd read `) scriptscmd := scripts.Start("main") var useruidmap map[string]uint32 scriptscmd.Decode(&useruidmap) Expect(useruidmap).To(HaveKeyWithValue("guardian-fooobar", uint32(666))) hostuidusermap := UidUsernameMap{} for user, uid := range useruidmap { if uid != 666 { hostuidusermap[uint32(uid)] = user } } scriptscmd.Close() scripts.Done() usernames := DiscoverUserNames(allns.Namespaces) Expect(len(usernames)).To(Equal(len(useruidmap) - 1)) for uid, username := range hostuidusermap { Expect(usernames[uid]).To(Equal(username), "missing uid %d: %q", uid, username) } }) })
<reponame>vbogatyrov/tdi-studio-se<filename>main/plugins/org.talend.designer.components.libs/libs_src/talend-spark/src/main/java/org/talend/spark/operation/HBaseLoad.java // ============================================================================ // // Copyright (C) 2006-2018 Talend Inc. - www.talend.com // // This source code is available under agreement available at // %InstallDIR%\features\org.talend.rcp.branding.%PRODUCTNAME%\%PRODUCTNAME%license.txt // // You should have received a copy of the agreement // along with this program; if not, write to Talend SA // 9 rue Pages 92150 Suresnes, France // // ============================================================================ package org.talend.spark.operation; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.function.Function; import scala.Tuple2; public class HBaseLoad<T> { @SuppressWarnings("deprecation") public static JavaRDD<List<Object>> hbaseRDD(JavaSparkContext ctx, String zookeeperHost, String zookeeperPort, String table, final String columns, Map<String, String> properties) { Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", zookeeperHost); conf.set("hbase.zookeeper.property.clientPort", zookeeperPort); conf.set("mapred.input.dir", table); conf.set("hbase.mapred.tablecolumns", columns); for(Entry<String, String> e:properties.entrySet()) { conf.set(e.getKey(), e.getValue()); } JavaPairRDD<ImmutableBytesWritable, Result> hbaseRDD = ctx.hadoopRDD( new org.apache.hadoop.mapred.JobConf(conf), org.apache.hadoop.hbase.mapred.TableInputFormat.class, org.apache.hadoop.hbase.io.ImmutableBytesWritable.class, org.apache.hadoop.hbase.client.Result.class); JavaRDD<List<Object>> rdd = hbaseRDD .map(new Function<Tuple2<ImmutableBytesWritable, Result>, List<Object>>() { private static final long serialVersionUID = 1L; public List<Object> call( Tuple2<ImmutableBytesWritable, Result> in) throws Exception { Result res = in._2; List<Object> ra = new ArrayList<Object>(); for (String s : columns.split(" ")) { byte[] rowResult = res.getValue( org.apache.hadoop.hbase.util.Bytes .toBytes(s.split(":")[0]), org.apache.hadoop.hbase.util.Bytes .toBytes(s.split(":")[1])); ra.add(org.apache.hadoop.hbase.util.Bytes .toString(rowResult)); } return ra; } }); return rdd; } }
{-# LANGUAGE TypeFamilies #-} module ShouldFail where class C3 a where data S3 a :: * data S3n a :: * foo3 :: a -> S3 a foo3n :: a -> S3n a bar3 :: S3 a -> a bar3n :: S3n a -> a instance C3 Int where data S3 Int = D3Int newtype S3n Int = D3Intn () foo3 _ = D3Int foo3n _ = D3Intn () bar3 D3Int = 1 bar3n (D3Intn _) = 1 instance C3 Char where data S3 Char = D3Char foo3 _ = D3Char bar3 D3Char = 'c' bar3' :: S3 Char -> Char bar3' D3Char = 'a' -- must fail: Can't match Int against Char bar3wrong' D3Int = 1 bar3wrong' D3Char = 'a'
// the tool I used to extract the text from .xml added header and trailer that had to be removed. protected static Map<Character, Double> createCorpus(boolean writeFile) { Map<Character, Double> map = new HashMap<>(); String line = null; String str = null; DictionaryNormalizer.initMap(); for (int i = 1; i <= 85; i++) { try (final FileReader fr = new FileReader("corpus/" + i + ".txt"); final BufferedReader reader = new BufferedReader(fr)) { reader.readLine(); while ((line = reader.readLine()) != null && !line.trim().equals("TAPoRware Tool Parameter Summary")) { line = DictionaryNormalizer.normalize(line); line = line.toUpperCase(); for (Character c : line.toCharArray()) { if (map.containsKey(c)) { map.put(c, map.get(c) + 1.0); } else { map.put(c, 1.0); } } } } catch (IOException e) { } } Double sum = 0.0; for (Character c : map.keySet()) { sum += map.get(c); } for (Character c : map.keySet()) { map.put(c, map.get(c) / sum); } if (writeFile) { writeMapToFile(map, "csv/corpus.csv"); try (FileOutputStream fos = new FileOutputStream("corpus.ser"); ObjectOutputStream oos = new ObjectOutputStream(fos);) { oos.writeObject(map); } catch (IOException ioe) { } } return map; }
//***************************************************************************** // Copyright (C) 2014 Texas Instruments Incorporated // // All rights reserved. Property of Texas Instruments Incorporated. // Restricted rights to use, duplicate or disclose this code are // granted through contract. // The program may not be used without the written permission of // Texas Instruments Incorporated or against the terms and conditions // stipulated in the agreement under which this program has been supplied, // and under no circumstances can it be used with non-TI connectivity device. // //***************************************************************************** #include <string.h> #include <stdlib.h> #include "serial_wifi.h" unsigned char atoc(char data); unsigned char atod(char data); long atolong(char *data, unsigned long *retLong); unsigned char ascii_to_char(char b1, char b2); //***************************************************************************** // //! atoc //! //! @param none //! //! @return hexadecimal equivalent //! //! @brief Convert nibble to hexdecimal from ASCII // //***************************************************************************** unsigned char atoc(char data) { unsigned char ucRes; if ((data >= 0x30) && (data <= 0x39)) { ucRes = data - 0x30; } else { if (data == 'a') { ucRes = 0x0a; ; } else if (data == 'b') { ucRes = 0x0b; } else if (data == 'c') { ucRes = 0x0c; } else if (data == 'd') { ucRes = 0x0d; } else if (data == 'e') { ucRes = 0x0e; } else if (data == 'f') { ucRes = 0x0f; } } return ucRes; } //***************************************************************************** // //! atod //! //! \param none //! //! \return Decimal value of ASCII char //! //! \brief Convert ASCII char to decimal // //***************************************************************************** unsigned char atod(char data) { unsigned char retVal = 0xff; if ((data >= 0x30) && (data <= 0x39)) { retVal = data - 0x30; } return retVal; } //***************************************************************************** // //! atolong //! //! \param none //! //! \return Return long value else -1 as error //! //! \brief Convert ASCII string to long // //***************************************************************************** long atolong(char *data, unsigned long *retLong) { unsigned char cycleCount = 0; unsigned char digit; if ((data == NULL) || (retLong == NULL)) { return (-1); } *retLong = 0; while ((digit = atod(*data)) != 0xff) { *retLong *= 10; *retLong += digit; data++; cycleCount++; } return cycleCount; } //***************************************************************************** // //! ascii_to_char //! //! @param b1 first byte //! @param b2 second byte //! //! @return The converted character //! //! @brief Convert 2 bytes in ASCII into one character // //***************************************************************************** unsigned char ascii_to_char(char b1, char b2) { unsigned char ucRes; ucRes = (atoc(b1)) << 4 | (atoc(b2)); return ucRes; } //***************************************************************************** // //! htoa //! //! @param none //! //! @return status //! //! @brief Converts hexa string to ascii // //***************************************************************************** int htoa() { int i, len; char Byte[8]; len = strlen(uBufRx.g_RecvBuf); //check data validity for (i = 0; i < len; i++) { if (((uBufRx.g_RecvBuf[i] >= 0x30) && (uBufRx.g_RecvBuf[i] <= 0x39)) || \ ((uBufRx.g_RecvBuf[i] >= 0x41) && (uBufRx.g_RecvBuf[i] <= 0x46)) || \ ((uBufRx.g_RecvBuf[i] >= 0x61) && (uBufRx.g_RecvBuf[i] <= 0x66))) { continue; } else { return -1; } } for (i = 0; i < (len / 2); i++) { strncpy(Byte, &uBufRx.g_RecvBuf[i * 2], 2); uBufRx.g_RecvBuf[i] = strtol(Byte, NULL, 16); } return 0; } //***************************************************************************** // //! btoa //! //! @param none //! //! @return status //! //! @brief Converts binary string to ascii // //***************************************************************************** int btoa() { int i, len; char Byte[8]; len = strlen(uBufRx.g_RecvBuf); //check data validity for (i = 0; i < len; i++) { if ((uBufRx.g_RecvBuf[i] != 0x30) && (uBufRx.g_RecvBuf[i] != 0x31)) { return -1; } } for (i = 0; i < (len / 8); i++) { strncpy(Byte, &uBufRx.g_RecvBuf[i * 8], 8); uBufRx.g_RecvBuf[i] = strtol(Byte, NULL, 2); } return 0; }
def add_mandatory_optional_obj(obj, label, thread): layout = QtWidgets.QVBoxLayout() layout.addWidget(QtWidgets.QLabel("Add attribute:")) if type(obj) == GlobalConstr: l_cls = L_ITEM_CLASS + L_LOOP_CLASS + L_DATA_CLASS l_flag_mandatory = len(l_cls)*[False] elif type(obj) == DataConstr: l_cls = L_ITEM_CLASS + L_LOOP_CLASS l_flag_mandatory = len(l_cls) * [False] else: l_cls = obj.mandatory_classes+obj.optional_classes l_flag_mandatory = len(obj.mandatory_classes) * [True] + len(obj.optional_classes) * [False] l_cls_name_module_bases = [(_, _.__name__, _.__module__, _.__bases__[0].__name__, str(int(not(_f)))) for _, _f in zip(l_cls, l_flag_mandatory)] l_cls_name_module_bases.sort(key=lambda x: x[4]+x[3]+x[2]+x[1]) l_cls = [_[0] for _ in l_cls_name_module_bases] n_row, n_col = len(l_cls)+1, 3 table_widget_1 = QtWidgets.QTableWidget(n_row, n_col) table_widget_1.setHorizontalHeaderLabels(["class_name", "type", "module"]) header = table_widget_1.horizontalHeader() header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch) header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents) header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents) for i_row, _cls_name_module_bases in enumerate(l_cls_name_module_bases): s_name = _cls_name_module_bases[1] s_module = ".".join(_cls_name_module_bases[2].split(".")[:-1]) s_bases = _cls_name_module_bases[3] s_flag_m = _cls_name_module_bases[4] if s_bases == "GlobalConstr": s_bases = "global block " elif s_bases == "DataConstr": s_bases = "data block " elif s_bases == "LoopConstr": s_bases = "loop " elif s_bases == "ItemConstr": s_bases = "items " s_doc = _cls_name_module_bases[0].__doc__ table_widget_1_item = QtWidgets.QTableWidgetItem(s_name) table_widget_1_item.setCheckState(0) table_widget_1_item.setToolTip(s_doc) table_widget_1.setItem(i_row, 0, table_widget_1_item) table_widget_2_item = QtWidgets.QTableWidgetItem(s_bases) table_widget_2_item.setToolTip(s_doc) table_widget_1.setItem(i_row, 1, table_widget_2_item) table_widget_3_item = QtWidgets.QTableWidgetItem(s_module) table_widget_3_item.setToolTip(s_doc) table_widget_1.setItem(i_row, 2, table_widget_3_item) if s_flag_m=="0": table_widget_1_item.setBackground(QtGui.QColor(255, 255, 240)) table_widget_2_item.setBackground(QtGui.QColor(255, 255, 240)) table_widget_3_item.setBackground(QtGui.QColor(255, 255, 240)) else: table_widget_1_item.setBackground(QtGui.QColor(255, 255, 255)) table_widget_2_item.setBackground(QtGui.QColor(255, 255, 255)) table_widget_3_item.setBackground(QtGui.QColor(255, 255, 255)) pb_add = QtWidgets.QPushButton("Add objects") table_widget_1.setCellWidget(n_row-1, 0, pb_add) table_widget_1.setVerticalHeaderLabels(n_row*[""]) pb_add.clicked.connect(lambda : add_obj_by_table(table_widget_1, l_cls, obj, thread)) layout.addWidget(table_widget_1) return layout
#pragma once #include "test.hpp" static Test* s_current_test;
Last week, Pastor Lance Wallnau told the world that he knew of a man who was “cured” of his homosexuality because he ate a special piece of cake. Wallnau even streamed his anecdote on Periscope and Right Wing Watch later amplified it. As Wallnau told it, there were some “hookers” who used to hang out at a bar who were saved by a fellow patron who had found Christianity. Together, Wallnau said, these individuals “baked a cake for the owner of the bar, who was gay and very adamantly anti-Christian” and prayed over it that he would leave homosexuality. “It was an anointed cake,” Wallnau said, “and they made the cake and gave it as a gift. And when he ate the cake … the power of God hit him.” The “presence of God” fell upon the bar owner, Wallnau recounted, and he then got baptized, at which point “the spirit that was working him got broken off,” thus freeing him from his life of homosexuality. Wallnau didn’t mention the name of the bar, the name of the bar owner, or show us the Gay Card the man had to turn back in when he changed his sexual orientation. But he’s a Christian, so we’re supposed to believe him. That’s how it works. Now, however, Wallnau is furious that people are quoting him verbatim, making him look like a damn fool. The actual story, which appears in Transformation: Change The Marketplace and You Change the World by Ed Silvoso — and which should still be taken with a huge grain of salt — is that a gay bar manager was “saved” by a Christian. To celebrate, he made a cake for the bar’s owner… who ate it, got “saved” himself, and then converted the bar into a church. It’s still a ridiculous story… but nothing in there suggests the gay person stopped being gay because he ate cake, which is precisely what Wallnau said last week. That’s why, in his latest video, he apologized for spreading the misinformation and explained how cake will not change someone’s sexual orientation. … I’m just kidding. Wallnau criticized the publications that quoted him directly and called them all “fake news.” Despite the fact that it was Wallnau who got the story wrong and that the media was simply reporting what he had originally said, Wallnau is now bizarrely claiming that he was misquoted. “So you see, they were not quoting me correctly in The Huffington Puffington Post,” Wallnau declared, “because the guy didn’t get delivered … Ex-gay guy ends up getting delivered by water baptism, not by the cake. This is what the liberals do, they always misquote you. Get it straight!” “They got hung up on the cake,” he stated, “which was actually [the owner], not a homosexual. Once again, fake news from The Huffington Puffington Post and the Dallas News and one thousand Twitter feeds!” I’m struggling to understand how they misquoted him when the original video is right there for everyone to see… But that’s religious people for you: When the evidence isn’t there, they insist we accept their beliefs on faith. When the evidence is there, they deny it. (via Right Wing Watch. Image via Shutterstock. Portions of this article were published earlier)
# Adapted from https://github.com/huggingface/transformers/blob/v2.3.0/transformers/data/processors/glue.py # # Additional license and copyright information for this source code are available at: # https://github.com/huggingface/transformers/blob/master/LICENSE """ FEVER processors and helpers """ import csv import logging import os import re import numpy as np from transformers.data.processors.utils import DataProcessor, InputExample, InputFeatures from transformers.file_utils import is_tf_available if is_tf_available(): import tensorflow as tf logger = logging.getLogger(__name__) def fever_convert_examples_to_features( examples, tokenizer, max_length=512, task=None, label_list=None, output_mode=None, pad_on_left=False, pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True, ): """ Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: FEVER task label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method output_mode: String indicating the output mode. Either ``regression`` or ``classification`` pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default) pad_token: Padding token pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4) mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for actual values) Returns: A list of task-specific ``InputFeatures`` which can be fed to the model. """ if task is not None: processor = fever_processors[task]() if label_list is None: label_list = processor.get_labels() logger.info("Using label list %s for task %s" % (label_list, task)) if output_mode is None: output_mode = fever_output_modes[task] logger.info("Using output mode %s for task %s" % (output_mode, task)) for (ex_index, example) in enumerate(examples): inputs = tokenizer.encode_plus( example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, ) input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"] # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length) assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length) assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length) if output_mode == "classification": label_map = {label: i for i, label in enumerate(label_list)} label = label_map[example.label] elif output_mode == "regression": label = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask])) logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids])) logger.info("label: %s (id = %d)" % (example.label, label)) yield InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, ) def fever_compute_metrics(task_name, preds, labels): def mse(preds, labels): return np.mean((labels - preds) ** 2) def accuracy(preds, labels): return (preds == labels).mean() assert len(preds) == len(labels) if task_name == "sentence_retrieval": return {"mse": mse(preds, labels)} if task_name == "claim_verification": return {"acc": accuracy(preds, labels)} else: raise KeyError(task_name) def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) def process_sent(sentence): sentence = convert_to_unicode(sentence) sentence = re.sub(" \-LSB\-.*?\-RSB\-", "", sentence) sentence = re.sub("\-LRB\- \-RRB\- ", "", sentence) sentence = re.sub(" -LRB-", " ( ", sentence) sentence = re.sub("-RRB-", " )", sentence) sentence = re.sub("--", "-", sentence) sentence = re.sub("``", '"', sentence) sentence = re.sub("''", '"', sentence) return sentence def process_title(title): title = convert_to_unicode(title) title = re.sub("_", " ", title) title = re.sub(" -LRB-", " ( ", title) title = re.sub("-RRB-", " )", title) title = re.sub("-COLON-", ":", title) return title def process_evid(sentence): sentence = convert_to_unicode(sentence) sentence = re.sub(" -LSB-.*-RSB-", " ", sentence) sentence = re.sub(" -LRB- -RRB- ", " ", sentence) sentence = re.sub("-LRB-", "(", sentence) sentence = re.sub("-RRB-", ")", sentence) sentence = re.sub("-COLON-", ":", sentence) sentence = re.sub("_", " ", sentence) sentence = re.sub("\( *\,? *\)", "", sentence) sentence = re.sub("\( *[;,]", "(", sentence) sentence = re.sub("--", "-", sentence) sentence = re.sub("``", '"', sentence) sentence = re.sub("''", '"', sentence) return sentence def process_label(label): label = convert_to_unicode(label) return label class SentenceRetrievalProcessor(DataProcessor): """Processor for the sentence retrieval data set.""" def get_examples(self, file_path, purpose): """See base class.""" with open(file_path, "r", encoding="utf-8-sig") as f: lines = csv.reader(f, delimiter="\t") for (i, line) in enumerate(lines): guid = "%s-%d" % (purpose, i) title = process_title(line[2]) text_a = process_sent(line[1]) text_b = process_evid(line[4]) text_b = title + " : " + text_b label = process_label(line[5]) if purpose != "predict" else self.get_dummy_label() yield InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label) def get_length(self, file_path): """Return the number of examples.""" return sum(1 for line in open(file_path, "r", encoding="utf-8-sig")) def get_labels(self): """See base class.""" return [None] def get_dummy_label(self): return "-1" class ClaimVerificationProcessor(SentenceRetrievalProcessor): """Processor for the claim verification data set.""" def get_labels(self): """See base class.""" return ["R", "S", "N"] # REFUTES, SUPPORTS, NOT ENOUGH INFO def get_dummy_label(self): return "N" fever_processors = { "sentence_retrieval": SentenceRetrievalProcessor, "claim_verification": ClaimVerificationProcessor, } fever_tasks_num_labels = { "sentence_retrieval": 1, "claim_verification": 3, } fever_output_modes = { "sentence_retrieval": "regression", "claim_verification": "classification", }
<reponame>sersoluciones/ngx-leaflet import { Injectable } from '@angular/core'; import { Observable } from 'rxjs'; import { LatLng2LatLngOptions } from './types'; @Injectable({ providedIn: 'root' }) export class MapService { /** * Metodo para obtener la posisión GPS actual usando Observable * @example * this.mapService.getCurrentPosition().pipe(take(1)).subscribe({ * next(position) { * console.log('Current Position: ', position); * }, * error(msg) { * console.log('Error Getting Location: ', msg); * } * }); */ getCurrentPosition(): Observable<GeolocationPosition> { return new Observable((observer) => { // Simple geolocation API check provides values to publish if ('geolocation' in navigator) { navigator.geolocation.getCurrentPosition((position: GeolocationPosition) => { observer.next(position); }, (error: GeolocationPositionError) => { observer.error(error); }); } else { observer.error('Geolocation not available'); } }); } /** * Verifica si la latitud y longitud son válidas * @param lat Latitud * @param lng Longitud */ checkLatLog(lat: number, lng: number) { return (-90 <= lat) && (90 >= lat) && (-180 <= lng) && (180 >= lng); } /** * Obtiene la distancia en km entre dos puntos LatLng * @param lon1 Latitud */ distancePoints(options: LatLng2LatLngOptions) { // tslint:disable-next-line: max-line-length const a = Math.sin(((options.to.lat - options.from.lat) * Math.PI / 180) / 2) * Math.sin(((options.to.lat - options.from.lat) * Math.PI / 180) / 2) + Math.cos(options.from.lat * Math.PI / 180) * Math.cos(options.to.lat * Math.PI / 180) * Math.sin(((options.to.lng - options.from.lng) * Math.PI / 180) / 2) * Math.sin(((options.to.lng - options.from.lng) * Math.PI / 180) / 2); return (6371 * (2 * Math.asin(Math.sqrt(a)))) * 1.60934; } cutPrecision(obj: any, precision: number) { if ('number' === typeof obj[0]) { for (let i = 0; i < obj.length; i++) { obj[i] = Math.round(obj[i] * precision) / precision; } } else { const arr = obj.features || obj.geometries || obj.coordinates || obj; for (let i = 0; i < arr.length; i++) { this.cutPrecision(arr[i], precision); } } } middlePoint(options: LatLng2LatLngOptions) { if ((options.from.lng !== options.to.lng) || (options.from.lat !== options.to.lat)) { const lat1 = options.from.lat * Math.PI / 180; const lat2 = options.to.lat * Math.PI / 180; const lon1 = options.from.lng * Math.PI / 180; const lon2 = options.to.lng * Math.PI / 180; const dLon = lon2 - lon1; const x = Math.cos(lat2) * Math.cos(dLon); const y = Math.cos(lat2) * Math.sin(dLon); let lat3 = Math.atan2(Math.sin(lat1) + Math.sin(lat2), Math.sqrt((Math.cos(lat1) + x) * (Math.cos(lat1) + x) + y * y)); let lon3 = lon1 + Math.atan2(y, Math.cos(lat1) + x); lat3 *= 180 / Math.PI; lon3 *= 180 / Math.PI; const deltaY = options.to.lng - options.from.lng; const deltaX = options.to.lat - options.from.lat; const angleInDegrees = Math.atan2(deltaY, deltaX) * 180 / Math.PI; return { longitude: lon3, Latitude: lat3, angle: angleInDegrees, distance: this.distancePoints(options) }; } else { return false; } } }
use super::error::AmplifierError; use crate::common::permutations::*; use crate::computer::{Code, ListInput, VirtualMachine}; use std::collections::VecDeque; pub struct Amplifier<'a> { computers: VecDeque<(ListInput, VirtualMachine<'a>)>, } impl Amplifier<'_> { pub fn get_best(template: Code, setting: &Vec<i64>) -> Result<i64, AmplifierError> { let mut result = i64::MIN; for perm in setting.permutate() { let mut amplifier = Amplifier::new(template.clone(), &perm); result = result.max(amplifier.run_once(0)?); } Ok(result) } pub fn get_best_continously(template: Code, setting: &Vec<i64>) -> Result<i64, AmplifierError> { let mut result = i64::MIN; for perm in setting.permutate() { let mut amplifier = Amplifier::new(template.clone(), &perm); result = result.max(amplifier.run_continously(0)?); } Ok(result) } fn new<'a>(code: Code, setting: &[&i64]) -> Amplifier<'a> { let computers = setting .iter() .map(|&value| { let mut input = ListInput::new(); input.provide_single(*value); let vm = VirtualMachine::new(code.clone(), input.clone()); (input, vm) }) .collect(); Amplifier { computers } } fn run_once(&mut self, initial_value: i64) -> Result<i64, AmplifierError> { let mut value = initial_value; for (input, vm) in self.computers.iter_mut() { input.provide_single(value); let result = vm.get_all()?; if result.len() != 1 { return Err(AmplifierError::NotExactlyOne); } value = result[0]; } Ok(value) } fn run_continously(&mut self, initial_value: i64) -> Result<i64, AmplifierError> { let mut end_value = initial_value; loop { if let Some((mut input, mut vm)) = self.computers.pop_front() { input.provide_single(end_value); if let Some(step_result) = vm.next()? { end_value = step_result; self.computers.push_back((input, vm)); } else { break; } } } Ok(end_value) } } #[cfg(test)] mod tests { use super::*; #[test] fn expected_outcome() -> Result<(), AmplifierError> { let input = vec![ 3, 15, 3, 16, 1002, 16, 10, 16, 1, 16, 15, 15, 4, 15, 99, 0, 0, ]; let code = input.into(); let expected = 43210; let mut amplifier = Amplifier::new(code, &vec![&4, &3, &2, &1, &0]); let result = amplifier.run_once(0)?; assert_eq!(result, expected); Ok(()) } #[test] fn find_best_outcome_once() -> Result<(), AmplifierError> { let input = vec![ 3, 15, 3, 16, 1002, 16, 10, 16, 1, 16, 15, 15, 4, 15, 99, 0, 0, ]; let code = input.into(); let expected = 43210; let result = Amplifier::get_best(code, &vec![0, 1, 2, 3, 4])?; assert_eq!(result, expected); Ok(()) } #[test] fn find_best_outcome_once2() -> Result<(), AmplifierError> { let input = vec![ 3, 31, 3, 32, 1002, 32, 10, 32, 1001, 31, -2, 31, 1007, 31, 0, 33, 1002, 33, 7, 33, 1, 33, 31, 31, 1, 32, 31, 31, 4, 31, 99, 0, 0, 0, ]; let code = input.into(); let expected = 65210; let result = Amplifier::get_best(code, &vec![0, 1, 2, 3, 4])?; assert_eq!(result, expected); Ok(()) } #[test] fn expected_outcome_continously() -> Result<(), AmplifierError> { let input = vec![ 3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1, 28, 1005, 28, 6, 99, 0, 0, 5, ]; let code = input.into(); let expected = 139629729; let mut amplifier = Amplifier::new(code, &vec![&9, &8, &7, &6, &5]); let result = amplifier.run_continously(0)?; assert_eq!(result, expected); Ok(()) } #[test] fn find_best_outcome_continously1() -> Result<(), AmplifierError> { let input = vec![ 3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1, 28, 1005, 28, 6, 99, 0, 0, 5, ]; let code = input.into(); let expected = 139629729; let result = Amplifier::get_best_continously(code, &vec![5, 6, 7, 8, 9])?; assert_eq!(result, expected); Ok(()) } }
def fan_speed_name(self) -> str: register = self._registers.get_register( 'HR_USER_CONFIG_CURRENT_SYSTEM_MODE') return register.state_name
<reponame>Koswu/TinyBoardBack<gh_stars>0 package logging import ( "boarderbackend/pkgs/setting" "fmt" "log" "os" "path" "time" ) func getLogFilePath() string{ return fmt.Sprintf("%s", setting.Logging.LogFilePath) } func getLogFileFullPath() string { prefixPath := getLogFilePath() suffixPath := fmt.Sprintf("%s%v.%s", setting.Logging.LogFileName, time.Now().Format(setting.Logging.TimeFormat), setting.Logging.LogFileExt) return path.Join(prefixPath, suffixPath) } func openLogFile(filePath string) *os.File { _, err :=os.Stat(filePath) switch { case os.IsNotExist(err): mkDir() case os.IsPermission(err): log.Fatalf("Permission Error: %v", err) } handle, err :=os.OpenFile(filePath, os.O_APPEND | os.O_CREATE | os.O_WRONLY, 0644) if err != nil { log.Fatalf("Failed to Open File: %v", handle) } return handle } func mkDir() { err :=os.MkdirAll(getLogFilePath(), os.ModePerm) if err != nil { panic(err) } }
New records and confirmation of the presence of three species of primates (Mammalia, Primates) in southwestern Colombia Cebus albifrons (Humboldt, 1812); Sapajus apella (Linnaeus, 1758) and Aotus lemurinus I. Geoffroy, 1843 are widely distributed primates in Colombia. Despite this, there are gaps in the occurrence of these species in the southwestern part of the country. Through the collection of specimen remains, molecular analyses and review of museum specimens, we reported new records for these species in the Department of Nariño, expanding their distribution range. Finally, we highlight some important notes for the conservation of these species.
/** * Implements a hierarchical {@link ResourceRegistryPart} by maintaining a list of child ResourceRegistryPart. */ public class HierarchicalResourceRegistryPart extends ResourceRegistryPartBase { private static final String PATH_SEPARATOR = "/"; private Map<String, ResourceRegistryPart> partMap = new HashMap<>(); private List<ResourceRegistryPart> partList = new ArrayList<>(); private ResourceRegistryPartListener childListener = new ResourceRegistryPartListener() { @Override public void onChanged(ResourceRegistryPartEvent event) { notifyChange(); } }; public void putPart(String prefix, ResourceRegistryPart part) { if (partMap.containsKey(prefix)) { throw new IllegalStateException("part with prefx " + prefix + " already exists"); } partMap.put(prefix, part); partList.add(part); part.addListener(childListener); } @Override public RegistryEntry addEntry(RegistryEntry entry) { String resourceType = entry.getResourceInformation().getResourceType(); ResourceRegistryPart part = getPart(resourceType); if (part == null) { throw new IllegalStateException("cannot add " + resourceType + ", no part available in hierarchy"); } return part.addEntry(entry); } @Override public boolean hasEntry(Class<?> clazz) { for (ResourceRegistryPart part : partList) { if (part.hasEntry(clazz)) { return true; } } return false; } @Override public boolean hasEntry(String resourceType) { for (ResourceRegistryPart part : partList) { if (part.hasEntry(resourceType)) { return true; } } return false; } @Override public RegistryEntry getEntry(String resourceType) { ResourceRegistryPart part = getPart(resourceType); if (part == null) { return null; } return part.getEntry(resourceType); } @Override public RegistryEntry getEntryByPath(String resourcePath) { ResourceRegistryPart part = getPart(resourcePath); if (part == null) { return null; } return part.getEntryByPath(resourcePath); } private ResourceRegistryPart getPart(String resourceType) { int sep = resourceType.indexOf(PATH_SEPARATOR); String prefix; if (sep == -1) { prefix = ""; } else { prefix = resourceType.substring(0, sep); } return partMap.get(prefix); } @Override public Collection<RegistryEntry> getResources() { List<RegistryEntry> list = new ArrayList<>(); for (ResourceRegistryPart part : partList) { list.addAll(part.getResources()); } return list; } @Override public RegistryEntry getEntry(Class<?> clazz) { for (ResourceRegistryPart part : partList) { if (part.hasEntry(clazz)) { return part.getEntry(clazz); } } return null; } }
<filename>src/Lib.hs<gh_stars>0 {-# LANGUAGE RecordWildCards #-} module Lib ( someFunc ) where import Control.Monad.IO.Class import Control.Monad.Trans.State import Control.Monad import Data.Foldable import Data.List import qualified Data.Map.Strict as M import Data.Maybe import qualified Data.Set as S import System.Environment keywords :: S.Set String keywords = S.fromList [ "module" , "import" , "where" , "data" , "type" , "newtype" , "class" , "instance" , "case" , "if" , "then" , "else" , "qualified" , "deriving" ] dropSingleLineComments :: [String] -> [String] dropSingleLineComments [] = [] dropSingleLineComments (('-':'-':_):_) = [] dropSingleLineComments (w:ws) = w:(dropSingleLineComments ws) takeUntil :: Eq a => [a] -> [a] -> [a] takeUntil [] as = as takeUntil sentinel as | sentinel `isPrefixOf` as = [] | otherwise = case as of [] -> [] (x:xs) -> x : takeUntil sentinel xs dropUntil :: Eq a => [a] -> [a] -> [a] dropUntil [] as = as dropUntil sentinel as | sentinel `isPrefixOf` as = drop (length sentinel) as | otherwise = case as of [] -> [] (_:xs) -> dropUntil sentinel xs splitOn :: Eq a => [a] -> [a] -> ([a],[a]) splitOn [] as = (as,[]) splitOn sentinel as = (takeUntil sentinel as, dropUntil sentinel as) dropBetween :: Eq a => [a] -> [a] -> Bool -> [[a]] -> (Bool, [[a]]) dropBetween begin end = go where go inComment [] = (inComment,[]) go False (w:ws) | begin `isInfixOf` w = let (pre,post) = splitOn begin w in (pre:) <$> go True (post:ws) | otherwise = (w:) <$> go False ws go True (w:ws) | end `isInfixOf` w = let post = snd $ splitOn end w in go False (post:ws) | otherwise = go True ws dropBetweenWithEscape :: Eq a => [a] -> [a] -> [a] -> Bool -> [[a]] -> (Bool, [[a]]) dropBetweenWithEscape begin end esc = go where go inComment [] = (inComment,[]) go False (w:ws) | (not ((esc ++ begin) `isInfixOf` w)) && begin `isInfixOf` w = let (pre,post) = splitOn begin w in (pre:) <$> go True (post:ws) | otherwise = (w:) <$> go False ws go True (w:ws) | (not ((esc ++ end) `isInfixOf` w)) && end `isInfixOf` w = let post = snd $ splitOn end w in go False (post:ws) | otherwise = go True ws dropMultiLineComments :: Bool -> [String] -> (Bool, [String]) dropMultiLineComments = dropBetween "{-" "-}" dropSqlTH :: Bool -> [String] -> (Bool, [String]) dropSqlTH = dropBetween "[sql|" "|]" dropStringLiterals :: [String] -> [String] dropStringLiterals = snd . dropBetweenWithEscape "\"" "\"" "\\" False dropComments :: Bool -> [String] -> (Bool, [String]) dropComments inComment = fmap dropSingleLineComments . dropMultiLineComments inComment wrapQuotes :: String -> String wrapQuotes = (++"\"") . ("\"" ++) escape :: Char -> String escape '\'' = "\\\'" escape c = [c] isValidOpeningChar :: Char -> Bool isValidOpeningChar c | (c >= 'a' && c <= 'z') || (c == '_') = True | otherwise = False isValidChar :: Char -> Bool isValidChar c | isValidOpeningChar c || (c >= 'A' && c <='Z') || (c >= '0' && c <= '9') || (c == '\'') = True | otherwise = False toMaybe :: Eq a => a -> a -> Maybe a toMaybe a b | a == b = Nothing | otherwise = Just b validateFunDef :: String -> Maybe String validateFunDef word = fmap (concat . map escape) $ case span isValidChar word of ("",_) -> Nothing (vWord,[]) -> Just vWord (vWord, (x:_)) | (x == ':' || x == ' ') -> Just vWord | otherwise -> Nothing validateFunAp :: String -> Maybe String validateFunAp = toMaybe "" . concat . map escape . filter isValidChar data ParseContext = ParseContext { inComment :: Bool , inSqlTH :: Bool , currentFunc :: String , defFuncs :: S.Set String , funcMap :: M.Map String (S.Set String) } defaultParseContext :: ParseContext defaultParseContext = ParseContext False False "" S.empty M.empty someFunc :: IO () someFunc = do files <- getArgs ParseContext{defFuncs=funcs, funcMap=links} <- flip execStateT defaultParseContext $ do forM_ files $ \file -> do contents <- liftIO $ readFile file forM_ (lines contents) $ \line -> do let firstWord = listToMaybe $ words line possibleFunctionDefinition = maybe False isValidOpeningChar $ listToMaybe line pc1 <- get let (inComment', ws'') = dropComments (inComment pc1) $ words line (inSqlTH', ws) = if inComment' then (inSqlTH pc1, ws'') else let (sql,ws') = dropSqlTH (inSqlTH pc1) ws'' in (sql, if sql then ws' else dropStringLiterals ws') put pc1{inComment = inComment', inSqlTH = inSqlTH'} if possibleFunctionDefinition then for_ (listToMaybe ws) $ \w -> case validateFunDef w of Just funcName | not (funcName `S.member` keywords) && (Just w == firstWord) -> do pc@ParseContext{..} <- get let funcCalls = tail ws s = S.fromList . catMaybes . map validateFunAp $ funcCalls funcMap' = case M.lookup currentFunc funcMap of Nothing -> M.insert currentFunc s funcMap Just s' -> M.insert currentFunc (S.union s' s) funcMap put pc{currentFunc = funcName, defFuncs = S.insert funcName defFuncs, funcMap = funcMap'} _ -> return () else do pc@ParseContext{..} <- get let s = S.fromList . catMaybes . map validateFunAp $ ws funcMap' = case M.lookup currentFunc funcMap of Nothing -> M.insert currentFunc s funcMap Just s' -> M.insert currentFunc (S.union s' s) funcMap put pc{funcMap = funcMap'} let links' = M.map (S.intersection funcs) links header = "strict digraph deps {" footer = "}" defs = map ((++ " [style=solid];") . wrapQuotes) $ toList funcs maps = M.foldMapWithKey (\k a -> map (\y -> wrapQuotes k ++ " -> " ++ wrapQuotes y ++ ";") $ toList a) links' mapM_ putStrLn $ header : (defs ++ maps ++ [footer])
/* * Copyright 2015 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.digitalpetri.opcua.stack.core.types.structured; import com.digitalpetri.opcua.stack.core.Identifiers; import com.digitalpetri.opcua.stack.core.serialization.DelegateRegistry; import com.digitalpetri.opcua.stack.core.serialization.UaDecoder; import com.digitalpetri.opcua.stack.core.serialization.UaEncoder; import com.digitalpetri.opcua.stack.core.types.UaDataType; import com.digitalpetri.opcua.stack.core.types.builtin.LocalizedText; import com.digitalpetri.opcua.stack.core.types.builtin.NodeId; import com.digitalpetri.opcua.stack.core.types.builtin.QualifiedName; import com.digitalpetri.opcua.stack.core.types.builtin.unsigned.UInteger; import com.digitalpetri.opcua.stack.core.types.enumerated.NodeClass; @UaDataType("MethodNode") public class MethodNode extends InstanceNode { public static final NodeId TypeId = Identifiers.MethodNode; public static final NodeId BinaryEncodingId = Identifiers.MethodNode_Encoding_DefaultBinary; public static final NodeId XmlEncodingId = Identifiers.MethodNode_Encoding_DefaultXml; protected final Boolean _executable; protected final Boolean _userExecutable; public MethodNode() { super(null, null, null, null, null, null, null, null); this._executable = null; this._userExecutable = null; } public MethodNode(NodeId _nodeId, NodeClass _nodeClass, QualifiedName _browseName, LocalizedText _displayName, LocalizedText _description, UInteger _writeMask, UInteger _userWriteMask, ReferenceNode[] _references, Boolean _executable, Boolean _userExecutable) { super(_nodeId, _nodeClass, _browseName, _displayName, _description, _writeMask, _userWriteMask, _references); this._executable = _executable; this._userExecutable = _userExecutable; } public Boolean getExecutable() { return _executable; } public Boolean getUserExecutable() { return _userExecutable; } @Override public NodeId getTypeId() { return TypeId; } @Override public NodeId getBinaryEncodingId() { return BinaryEncodingId; } @Override public NodeId getXmlEncodingId() { return XmlEncodingId; } public static void encode(MethodNode methodNode, UaEncoder encoder) { encoder.encodeNodeId("NodeId", methodNode._nodeId); encoder.encodeEnumeration("NodeClass", methodNode._nodeClass); encoder.encodeQualifiedName("BrowseName", methodNode._browseName); encoder.encodeLocalizedText("DisplayName", methodNode._displayName); encoder.encodeLocalizedText("Description", methodNode._description); encoder.encodeUInt32("WriteMask", methodNode._writeMask); encoder.encodeUInt32("UserWriteMask", methodNode._userWriteMask); encoder.encodeArray("References", methodNode._references, encoder::encodeSerializable); encoder.encodeBoolean("Executable", methodNode._executable); encoder.encodeBoolean("UserExecutable", methodNode._userExecutable); } public static MethodNode decode(UaDecoder decoder) { NodeId _nodeId = decoder.decodeNodeId("NodeId"); NodeClass _nodeClass = decoder.decodeEnumeration("NodeClass", NodeClass.class); QualifiedName _browseName = decoder.decodeQualifiedName("BrowseName"); LocalizedText _displayName = decoder.decodeLocalizedText("DisplayName"); LocalizedText _description = decoder.decodeLocalizedText("Description"); UInteger _writeMask = decoder.decodeUInt32("WriteMask"); UInteger _userWriteMask = decoder.decodeUInt32("UserWriteMask"); ReferenceNode[] _references = decoder.decodeArray("References", decoder::decodeSerializable, ReferenceNode.class); Boolean _executable = decoder.decodeBoolean("Executable"); Boolean _userExecutable = decoder.decodeBoolean("UserExecutable"); return new MethodNode(_nodeId, _nodeClass, _browseName, _displayName, _description, _writeMask, _userWriteMask, _references, _executable, _userExecutable); } static { DelegateRegistry.registerEncoder(MethodNode::encode, MethodNode.class, BinaryEncodingId, XmlEncodingId); DelegateRegistry.registerDecoder(MethodNode::decode, MethodNode.class, BinaryEncodingId, XmlEncodingId); } }
An analysis of errors in Korean-Chinese public sector translation - A case study on COVID-19 information materials - Journal of the International Network for Korean Language and Culture 19-2, 203-228. This study aims to analyze the translation errors in Chinese COVID-19 information materials published by Korean public institutions www.earticle.net and to explore ways to improve the translation quality of disaster information materials. First, this study discusses the factors to be considered for the quality assessment of public sector translation, as well as the criteria of translation evaluation in previous studies. Based on this, a framework for analyzing disaster information materials is identified. Then, this study analyzes the translation errors in Chinese COVID-19 information materials from January 2020 to February 2022. The analysis finds eight error types in the materials, with the most significant problem being low acceptability, as well as several mistranslations and grammatical errors. These findings suggest the need to select qualified translators and revisers, provide standard terminologies, and strengthen translation policies to convey disaster information to foreigners more effectively. (Shandong University)
<gh_stars>10-100 package com.nepxion.discovery.platform.server.mapper; /** * <p>Title: Nepxion Discovery</p> * <p>Description: Nepxion Discovery</p> * <p>Copyright: Copyright (c) 2017-2050</p> * <p>Company: Nepxion</p> * * @author <NAME> * @version 1.0 */ import org.apache.ibatis.annotations.Mapper; import org.apache.ibatis.annotations.Param; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.baomidou.mybatisplus.core.metadata.IPage; import com.nepxion.discovery.platform.server.entity.dto.SysAdminDto; import com.nepxion.discovery.platform.server.entity.vo.AdminVo; @Mapper public interface AdminMapper extends BaseMapper<SysAdminDto> { IPage<AdminVo> list(IPage<AdminVo> page, @Param("mode") int mode, @Param("name") String name); }
The Multi-Scale Network Landscape of Collaboration Propelled by the increasing availability of large-scale high-quality data, advanced data modeling and analysis techniques are enabling many novel and significant scientific understanding of a wide range of complex social, natural, and technological systems. These developments also provide opportunities for studying cultural systems and phenomena—which can be said to refer to all products of human creativity and way of life. An important characteristic of a cultural product is that it does not exist in isolation from others, but forms an intricate web of connections on many levels. In the creation and dissemination of cultural products and artworks in particular, collaboration and communication of ideas play an essential role, which can be captured in the heterogeneous network of the creators and practitioners of art. In this paper we propose novel methods to analyze and uncover meaningful patterns from such a network using the network of western classical musicians constructed from a large-scale comprehensive Compact Disc recordings data. We characterize the complex patterns in the network landscape of collaboration between musicians across multiple scales ranging from the macroscopic to the mesoscopic and microscopic that represent the diversity of cultural styles and the individuality of the artists. Introduction Advances in information science and technology have enabled us to amass large-scale data from a wide range of social and cultural phenomena, stimulating the development of advanced data modeling and analysis methods for extracting useful information. This type of large-scale data collection and analysis is not limited to traditional scientific and engineering fields but is reaching into a wider range of fields such as social science and humanities, calling for deep and serious transdisciplinary effort to make full use of its universal impact . Recently, various mathematical and computational techniques have been applied to cultural data sets including recipes, music, paintings, etc. to gain new insights and understanding, further expanding the application in their fields . Of many new data modeling frameworks, networks in particular have gained popularity for analyzing systems whose structure and function depend critically on the connections or correlations between their components . There are several essential connections between networks and culture that render such network framework necessary for a scientific understanding of culture. First, a cultural product invariably cites existing products or ideas either explicitly or implicitly. Second, most cultural products are borne out of collaborations between artists and practitioners that act as a conduit for ideas and inspirations. Accordingly there have been notable scientific studies of culture and cultural phenomena from the network perspective focusing on the relationships among cultural products, creators, consumers, etc. . In this paper we study the network of the creators and practitioners of culture to understand the patterns of collaborations and associations, and what they tell us about the nature of cultural prominence and diversity. Specifically we analyze the network of western classical musicians by leveraging one of the most comprehensive Compact Disc (CD) recordings databases in a rigorous fashion using established and new methods. While this paper focuses solely on music as the area of application, the analytical framework we propose should be generally applicable to any similar type of network. Music, one of the most significant and oldest cultural traditions, boasts a rich history of cross-pollination of ideas and practices through time . There have been a number of studies on musician networks: Silva et al. studied the Brazilian popular musician network, finding basic properties such as the small-world effect and the power-law degree distribution ; Park et al. considered two distinct relationship types between contemporary pop musicians (musical similarity and collaboration) and showed that they exhibit vastly different network patterns ; Gleiser and Danon studied the social network of jazz musicians and found communities of musicians that correspond to regional differences and racial segregation ; and Park et al. studied the network of classical composers who formed communities that corresponded to a modern musicological understanding of the history of music . Those works, while having pioneered in the application of network framework to musical data, show two apparent shortcomings. First, many deal with a relatively narrow period in the history of music, mainly the latter half of the 20th century and beyond. Prominently missing is the entire body of western classical music, one of the richest musical traditions . Second, they all ignore that a musical composition is distinct from many other art forms such as paintings or sculptures in that it requires a collaboration or combination between people with differing roles, i.e. individuals or group performers, composers, conductors, etc. Therefore this is a heterogeneous network where the meaning of an edge depends on which node types it connects. By leveraging one of the largest databases on western classical music performance recordings that incorporates the network heterogeneity, this paper aims to shed light on the complex and heterogeneous nature of the network of collaborations in music and, more broadly, culture. We study the network of western classical musicians to find significant global patterns, to uncover principles that drive the connections between musicians, and to identify local network structures that allow us to represent the rich diversity within culture. Our network is constructed from ArkivMusic (http:/www.arkivmusic.com) database, an online vendor of classical music CDs. For each CD it provides its title, release date, label, and four classes of musicians (composer, performer, conductor, and ensemble) whose compositions or performances were featured on it. After removing the so-called compilation albums that are repackaged collections of previously released recordings, we are left with 67 277 CDs and 75 604 musicians, which can be represented as a bipartite network with 428 728 edges as shown schematically in Fig 1(A). Fig 1(B) shows a small backbone of the network of composers as an example (subsequent analyses are performed on the original bipartite network to minimize loss of information). Specifically, we focus on the network patterns on three scales which we label the macroscopic, the mesoscopic, and the microscopic (see Fig 2). On the macroscopic scale we study the global, bird's-eye view of the network, which allows us to identify those musicians with universal prominence. On the mesoscopic scale we study the modular structure (node subgroups) of the network to find the strength of correlation between node characteristics and connection. Some results from the macroscopic and mesoscopic scales have been previously reported by us in , with some updates that reflect the most up-to-date data, although it stands on its own for a completeness and consistency leading to our new and rigorous analysis on the microscopic scale, and on the unified view of the various scales. On the microscopic scale we present how to quantify the relevance of all other musicians to a specific musician, which allows us to identify the smallest, local network landscape. Finally, we conclude by how these multiscale patterns relate to one another, letting us establish a coherent relationship between universality and diversity, two essential yet seemingly contradictory characteristics of culture. Macroscopic Network Patterns: Global Characteristics On the macroscopic scale, our network shows many common characteristics of large-scale complex networks. For instance, the network possesses a giant component comprising 98.8% of all nodes, meaning that most musicians are connected by a path, regardless of their active era or specialties. The average geodesic (the shortest path between two nodes) length is 5.6 while the diameter (longest geodesic length) is 18 in the giant component, showing the smallworld property . They are summarized in Table 1. The mean degree of musicians, i.e. the average number of CDs on which a musician's composition or performance is featured, is 5.7. Compared with the total number of CDs 67277, this tells us that the network is very sparse. The distribution of the degree is very skewed, approximately a power law (Fig 3(A)). In Table 2 we show, for each musician class, the ten highest-degree musicians. For instance, Wolfgang Amadeus Mozart (1756-1791) is the most popular composer, featured on 5 288 CDs. The tenor Plácido Domingo (1941-) is the most popular performer, featured on 354 CDs. Herbert von Karajan (1908Karajan ( -1989 and London Symphony Orchestra (founded in 1904) are most prolific recording conductor and ensemble, respectively. If we accept the degree as a simple measure for the importance of a musician, this appears to suggest that these few musicians dominate the rest in terms of musical importance. But this may be the very reason why a macroscopic characteristic such as the degree distribution is insufficient in properly capturing the nuances in the role or importance of a musician; it would be absurd to assert, for instance, that vocalists are more important than organists simply because their degrees are larger. This problem is found again when we look at groups of The multiscale views of the network landscape of the classical music network. On the macroscopic level (top), we take a bird's-eye view of the global characteristics of the network. On the mesoscopic scale (middle), we investigate the community structure of the network that reveals the homophily based on musician characteristics such as period and nationality. On the microscopic scale (bottom), we find the local network landscape around a specific musician by quantifying the relevance of others to the musician. This type of multiscale view allows us to correctly characterize the relationships between musicians and their roles in the cultural collaboration network, where a simple global prominence (top) can easily eclipse the rich local structures that represent diverse styles (middle) and individuality of artists (bottom). Mesoscopic Network Structures: Communities The previous analysis lets us see the global, system-wide characteristics of the network such as the existence of a giant component and the small-world property. The most notable shortcoming was that a few individuals and groups appeared to be dominating the network, masking other important players in music (Fig 3). According to a modern understanding of networks, in fact, the small-world property by no means rules out interesting local structures in a network that represent groups of nodes called "modules" or "communities." A common definition of a network community is a group of nodes that are more densely connected between themselves than to the rest of the network. Communities are therefore a way of partitioning a network into meaningful mesoscopic substructures. Of many useful algorithms for identifying communities , we apply the Louvain algorithm to our network which yields 614 communities (see S2 Fig for more information). In Fig 4 we show the four largest communities, along with their notable musicians' names. An examination of these suggests a positive correlation between musician characteristics and community structures, a sign of homophily or assortative mixing . For instance, community A contains many Austrian-German Romantic composers such as Ludwig van Beethoven (1770-1827), Franz Schubert (1797-1828), and Johannes Brahms (1833-1897). Community B, on the other hand, contains Aaron Copland (1900-1990), Samuel Barber (1910Barber ( -1981, and John Cage (1912-1992) all prominent US-born Modern composers. To properly characterize a community in terms of such musician attributes as nationality and period, we use the following Z-score to quantify the degree of overrepresentation of a musician attribute a in community s: Z s a n s a À np a ffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffiffi ffi where n s a is the number of musicians with attribute a inside community s, n is the number of all musicians in the network, and p a is the fraction of musicians who have attribute a in the . It also shows the dominance of Spain and Latin America, known for boasting a strong guitar tradition in modern times. Community D also clearly demonstrates the importance of local structures in understanding how diversity is represented in a cultural network: While undoubtedly a significant component of music, musicians specializing in the guitar are absent or severely underrepresented in Table 2 Microscopic Network Structures: Egocentric Relevance That we had to look into smaller-scale network structures to uncover important aspects of a cultural network prompts us to delve further into an even smaller scale. As mesoscopic means the network structure of groups of nodes, we take microscopic to mean the network structure centered on the individual node of the network. Traditionally, the network arranged around a specific node at the center is called the "egocentric network" and the central node the "ego." Here we focus on determining the significance or relevance of network nodes to the specific ego, and what it can tell us about the nature of musical combinations. Perhaps the simplest sensible measure of the relevance of a node to another is the geodesic distance between the two. But geodesic distance is well-known to be of limited use for the following reasons: First, since the geodesic distance is an integer and tend to be small due to the small-world property, very made nodes tend to be at the same distance from the ego. This results in a poor resolution, and not many interesting findings can be made. Second, geodesic distance does not consider the existence of multiple paths between two nodes that could also indicate a varying level of relevance between the nodes. Here we overcome both limitations via two straightforward modifications to the widelyused PageRank of Google that adopts the concept of random walk. We present the detailed steps for clarity. In PageRank, one assumes a random walker who visits the nodes in the network according to the following rule: At each time step, with probability α the walker follows a randomly chosen edge from the currently occupied node (the "walk" dynamic), or with probability 1−α it jumps to a randomly chosen node in the network (the "jump" dynamic, no edge necessary). After a very large number of movements, the PageRank of a node is equal to its occupation probability. PageRank in this original form is still a global measure (its Pearson Correlation Coefficient with the degree is 0.99 in our network), necessitating modifications to measure node's relevance to the ego. This is achieved by modifying the jump dynamic so that the walker jumps to the ego only. This functions to reposition the walker onto the ego so that a node close to ego as well as having more paths leading to it will be visited more often, overcoming the aforementioned shortcomings of geodesic distance. The resulting occupation probability we call Egocentric PageRank (EP) P e i defined for node i and ego e, which can be mathematically represented as P e i ¼ a  P j A ij k j P e j þ ð1 À aÞ Â d e i where A ij is the adjacency matrix, k j is the degree, and d e i is the Kronecker delta. This is a specific application of personalized PageRank , also known as Random Walk with Restart (RWR) . Other examples of random walk-based relevance between two nodes include one based on counting of short-length random walks, which was used for community detection . Now we represent EP in vector and matrix form P e ¼ ð1 À aÞ I À aAK À1 À Á À1 Ád e : ð2Þ The Pearson Correlation Coefficient between EP (averaged over all egos) and the degree is 0.19, a much smaller value but it shows that the degree is still influential; it is the nature of the walk dynamics, where a high degree generally increases the chance of the node being occupied by the surfer. With this in mind, we try the following modification to the walk dynamic: The walker now chooses a target node with a probability inversely proportional to its degree. We call the resulting occupation probability the Degree-Neutralized Egocentric PageRank (DNEP) given in vector and matrix form as where H is a diagonal matrix of H j ¼ , the reciprocal of the sum of the inverse of the degree of node j's neighbors. This degree-neutralized pairwise quantity is reminiscent of similarity measures such as SimRank proposed by Jeh and Widom or the regular equivalence discussed in . Note that, however, our quantity is designed to find the relevance (one could also say generalized closeness) between two nodes by refining PageRank, thereby not their similarity. Now the correlation between degree and DNEP of the nodes is 0.003, showing that the degree effect has been almost eradicated. To see the difference between EP and DNEP, we define the Egocentric Relevance (ER) R e i to be the linear combination of the two: with β 2 . ER thus changes continuously from EP to DNEP as β is tuned from 0 to 1. We now apply this method to a prominent violinist Kyung-Wha Chung (1948-) as an example, which is presented in Fig 5 (see S3 Fig for examples of other musicians). Fig 5(A) shows how Chung's relevant musicians change as β is tuned from 0 to 1. When β = 0 (EP), although the top ten list shares seven musicians with Table 1 (Tchaikosky, London Symphony Orchestra, J. S. Bach, Beethoven, Royal Philharmonic Orchestra, W. A. Mozart, and Brahms), it also features those associated with her signature debut album (André Previn and London Symphony Orchestra). EP therefore already succeeds, to some degree, in bringing forth those more intimately revelant to the ego. The top list of relevant musicians according to DNEP is even more drastically different-first of all, it shares no name with Table 1, and one name with (Charles Dutoit) EP (left). On the top of the list is Chung Trio, composed of Chung and her siblings cellist Myung-Wha Chung (1944-) and pianist-conductor Myung-Whun Chung (1953-), ranked only 35th according to EP. This shows that DNEP performs even better than EP at identifying those intimately relevant to the ego: Pianist Krystian Zimerman (1956-) at #4 is very well known for his Gramophone award-winning with Chung; conductor Sir Simon Rattle (1955-) at #10 is famous for his work with Chung and the Vienna Philharmonic. Fig 5 (B) shows a more extensive egocentric network landscape around Chung according to DNEP of different musician classes (with degree 30 or larger). The distance from Chung is proportional to the log of the reciprocal of DNEP. Among ensembles the Montreal Symphony Orchestra (founded in 1935) whose violin concerto recording with conductor Charles Dutoit (1936-) and Chung is very famous in the classical music community. Among composers it is Max Bruch (1838-1920) who is the most relevant to Chung. It is due to her recording of Max Bruch's concertos considered to be her signature achievement, pushing out better-known names including Beethoven and Bach. Chung's recordings of Jean Sibelius (1865-1957) and Béla Bartók (1881Bartók ( -1945 are also famous, bringing them close to the center. With the success of EP and DNEP in identifying the egocentric network landscape, we now ask if we can use these measures for a group of nodes as an ego. For instance, one may be interested in those relevant to a specific instrument, not merely one individual. One possibility is to add up a musician i's relevance to all nodes e in the given group of interest G, i.e. P e2G R e i . Yet, we would also like to identify those broadly relevant to the member of G. We therefore propose the group-level egocentric relevance as follow: a product of two terms-the sum of relevance and an entropy-like term that gives awards those that are more uniformly relevant to the members of the group. As an example application, we have calculated the relevance of the composers with respect to the five largest performer groups (violinists, cellists, pianists, tenors, and sopranos). Then, we took the top-100 composers in DNEP for each group, and counted how many times (one to five) they are included in the lists as a measure of the composer's versatility. The number of composers and some notable names are given in Fig 6. We see that, perhaps surprisingly, it is only Schubert that is intimately relevant to all five groups, showing his versatility and virtuosity in both instrumental and vocal music. Mozart, Beethoven, J. S. Bach, and Haydn are intimately relevant to four (except the The significance of Fig 6 lies in the fact that it shows an explicit connection between the macroscopic and the microscopic network landscape patterns. Since the figure was based on DNEP, a measure that had nearly eradicated the degree effect, it is a representation of the local structures in the classical music network. Interestingly, however, it reproduces many names that were prominent on the macroscopic scale as those who are versatile and relevant to many classes of musicians. Fig 6 suggests that, therefore, universality in culture stems from versatility on the microscopic level which appears as prominence on the macroscopic scale, while diversity represents the existence of many virtuosi in different subfields. Discussion and Conclusion Our work shows how we can utilize the network framework to understand the landscape of cultural collaboration and combination based on large-scale databases. In order to properly understand the diversity and universality-two of the most significant aspects of cultural creativity-we needed to take a multi-scale view of the network, incrementally revealing the finer Versatility of composers based on relevance to instrument groups. The number of composers highly relevant (ranked 100th or higher) to any of the five largest instrument groups (violin, cello, piano, tenor, and soprano) is in the circles. Composers relevant to multiple instrument groups in the absence of degree effect tend to be the universally recognized composers, revealing the connection between macroscopic and microscopic network patterns. doi:10.1371/journal.pone.0151784.g006 The Multi-Scale Network Landscape of Collaboration and more complex patterns from the network. On the macroscopic scale we retrieve some common features of social-type network such as the power-law degree distribution and the small-world property. The inadequacy of a single-scale analysis becomes immediately clear in the beginning with the macroscopic analysis; the power-law degree distribution, for instance, suggests a strict ordering of the importance of musicians across the entire network. This is, of course, a problematic view of culture where diversity and heterogeneity are treasured. On the mesoscopic scale we presented quantitatively the correlation between the modular structure of the network and various attribute data (periods, instruments, and nationalities), demonstrating a way to establish connection between information mined from massive digital data and a common musicological understanding of the history of music. We conducted an investigation on a further smaller scale to see how one can characterize the network properties centered on individuals. We developed two versions of egocentric relevance measures to achieve this, enabling us to discover the very musicians uniquely relevant to the ego. This allowed us to finally understand how universality and diversity, two seemingly paradoxical nature of culture, could coexist and be represented in a coherent fasion. We believe that our work here represents a starting point for exploring the multiscale patterns of cultural networks. With certainly a vast array of crucial questions to be explored therein, the possibility of advances in the scientific studies on cultural and humanities subjects utilizing large-scale data must be significant. In the boxes we show which musician attributes (composer periods, performer positions, and nationalities) are overrepresented in each community: For instance, community E is the Early music and Pre-Baroque community; F is the vocal-centric Post-Baroque community; G is the instrumentcentric Post-Baroque and Classical community; H is the Romantic Italian opera community; I is the Romantic organ community; finally, J is the Europe-centric nationalist community. . Highly relevant musicians tend to have lower degrees but to be more relavant to the ego. More widely-related high-degree figures such as Johann Sebastian Bach are pushed outwards, exhibiting the ability of DNEP to distinguish between egospecific and universally-relevant musicians. (EPS) S1 Dataset. CD-Musician network data and Musician metadata. Two data sets were used for this research, provided in a zipped file "S1 Dataset.zip" containing two files in tab-separated plain text format. "NetworkList.dat" is the CD-musician network edge list, and "artist_metadata.dat" is the musician metadata list. (ZIP)
#include <iostream> #include <algorithm> using namespace std; long long n, d, m; long long a[105]; int main(){ cin >> n >> d; for( int i=1; i<=n ;++i ) cin >> a[i]; cin >> m; long long se = 0; if( m > n ){ se -= ((m-n)*d); } sort( a+1, a+n+1 ); for( int i=1, I=min(n, m); i<=I; ++i ) se += a[i]; cout << se << endl; }
def wrap_itk_index(x): idx = itk.Index[2]() idx.SetElement(0, int(x[0])) idx.SetElement(1, int(x[1])) return idx
Signup to receive a daily roundup of the top LGBT+ news stories from around the world The latest season of RuPaul’s Drag Race does not feature the phrase ‘You’ve Got She-Mail’ – after a row over transgender slurs. The drag competition reality show last year attracted criticism for a segment in which contestants were asked to guess from a photo whether someone was a ‘female or she-male’. Logo TV later pulled the segment from all future broadcasts – and RuPaul has also vented about being banned from using the word “tranny”. The show has made further changes for the seventh season of the drag competition, which premières in the US this week. The drag star’s catchphrase ‘Ooh, girl, you’ve got she-mail’ – which played near the start of each episode when the contestants received a challenge – has been axed from the series. In its place, RuPaul now has a new catchphrase, “She done already done had herses”. Speaking alongside RuPaul’s Drag Race judge Michelle Visage on their podcast recently, RuPaul said: “Did you call me granny girl? That is your new thing, because we can’t say it with a T anymore.” Michelle Visage said: “We’re just going to say it for everything that’s T-related. I checked the granny in my car…” The seventh season of RuPaul’s Drag Race started on LogoTV today in the US. Netflix are yet to confirm a UK broadcast date.
The Cross, and Necessity: A Trinitarian Perspective I argue that the understanding of the necessity of the cross for divine reconciliation needs to be re-evaluated in light of two components of a classical trinitarian metaphysic: the doctrine of inseparable operations and the doctrine of trinitarian missions. Drawing from Thomas Aquinas and Bernard Lonergan, I suggest that the economic actions of the incarnate Son are not antecedent conditions, but consequent conditions of God’s ultimate salvific ends. After sharpening this proposal in conversation with Nicholas Lombardo’s recent work, I further clarify the particular kind of necessity that attaches to the work of Christ, before responding to several objections.
<reponame>vulpemventures/marina import * as bip32 from 'bip32'; import * as bip39 from 'bip39'; import { networks, payments } from 'ldk'; import * as bitcoinMessage from 'bitcoinjs-message'; import { SignedMessage } from 'marina-provider'; export async function signMessageWithMnemonic( message: string, mnemonic: string, network: networks.Network ): Promise<SignedMessage> { const seed = await bip39.mnemonicToSeed(mnemonic); const node = bip32.fromSeed(seed, network); const child = node.derivePath("m/84'/0'/0'/0/0"); const signature = await bitcoinMessage.signAsync(message, child.privateKey!, true, { segwitType: 'p2wpkh', }); const pay = payments.p2wpkh({ pubkey: child.publicKey, network }); return { signature: signature.toString('base64'), address: pay.address!, publicKey: child.publicKey.toString('hex'), }; }
<filename>src/Parser/AST/Nodes.h<gh_stars>0 #pragma once #include <vector> #include <memory> #include <Parser/ParserVal.h> #include <functional> #include <json.hpp> #include <boost/assert.hpp> #include <RecordManage/TableManager.h> #include <string> #include <Parser/ParsingError.h> namespace tinydbpp { namespace ast { using json = nlohmann::json; using std::string; class Node { public: std::shared_ptr<ParserVal> ch[3]; Node():ch{nullptr}{} virtual ~Node() {} }; class Statement :public Node { public: enum Type { SysManagementBegins, ShowDbs, CreateDb, DropDb, UseDb, ShowTables, SysManagementEnds, TableManagementBegins, CreateTable, DropTable, DesribeTable, InsertItem, DeleteItem, UpdateItem, SelectItem, TableManagementEnds, CreateIdx, DropIdx }; public: virtual json exec(); Statement(Type type); virtual ~Statement(); Type getType() const { return type; } private: Type type; }; class SysManagement :public Statement { public: SysManagement(Statement::Type type) :Statement(type) { } SysManagement(const ParserVal& val, Statement::Type type) :Statement(type), target(std::make_shared<ParserVal>(val)) { } virtual ~SysManagement(); std::shared_ptr<const ParserVal> getTarget() const; private: std::shared_ptr<ParserVal> target; }; class TableManagement :public Statement { public: TableManagement(Statement::Type type); virtual ~TableManagement(); private: }; class Statements :public Node { public: Statements() { } virtual ~Statements(); const std::vector<std::shared_ptr<Statement>> get() const { return statements; } void addStatementToFront(std::shared_ptr<Statement> stmt); private: std::vector<std::shared_ptr<Statement>> statements; }; class Field : public Node{ public: virtual ~Field(); Field(){ can_null = true; is_key = false; is_primary_key_stmt = false; size = 0; } Field(std::string _n, std::string _t, int s, bool _null, bool _key, bool _is_p_stmt = false):name(_n),type(_t),size(s),can_null(_null), is_key(_key) ,is_primary_key_stmt(_is_p_stmt){ } std::string name; std::string type; int size; bool can_null; bool is_key; bool is_primary_key_stmt; }; class FieldList : public Node{ public: std::vector<Field> vec; void checkPrimaryKey(){ for(auto &f : vec) if(f.is_primary_key_stmt) for(auto &t : vec) if(t.name == f.name && !t.is_primary_key_stmt) t.is_key = true; } }; class Value : public Node{ public: std::string type; std::string strVal; int iVal; Value(std::string _t):type(_t){} Value(std::string _t, std::string colv):type(_t){ if(*(colv.end() - 1) == 1) type = "NULL"; else if(_t == "int") iVal = *(int*)colv.c_str(); else if(_t == "varchar") strVal = std::string(colv.begin(), colv.end() - 1); } std::string toString(std::string req_type) { if(type != "NULL" && req_type != type) throw TypeError("TypeError", type, req_type); if(type == "NULL"){ if(req_type == "varchar") return std::string(1, (char)1); else if(req_type == "varchar") return std::string(4, '\0') + std::string(1, (char)1); else BOOST_ASSERT(0); }else if(type == "int"){ std::string v_str(5, '\0'); v_str.replace(v_str.begin(), v_str.begin() + 4, std::string((char*)&(iVal), (char*)&(iVal)+ 4)); return v_str; }else if(type == "varchar"){ return strVal + std::string(1, '\0'); }else BOOST_ASSERT(0); return ""; } //col int string }; class ValueList : public Node{ public: std::vector<std::shared_ptr<Value>> vec; void push_back(std::shared_ptr<Value> p){ vec.push_back(p); } }; class ValueLists : public Node{ public: std::vector<std::shared_ptr<ValueList>> vec; void push_back(std::shared_ptr<ValueList> p){ vec.push_back(p); } }; typedef std::function< std::vector<Value> (const Item &, const std::string&) > Selector; class WhereClause : public Node{ public: std::vector<std::string> names, ops; std::vector<Value> exprs; void becomeCompare(const std::string& colname, const std::string& op, Value v); void becomeIsNull(const std::string& colname); void becomeIsNotNull(const std::string& colname); void becomeAnd(std::shared_ptr<WhereClause> w1, std::shared_ptr<WhereClause> w2); void becomeLike(const std::string &colname, const std::string &regex); /* * @param actually two return value, whether can be optimized by index and which col * @return which table to reduce * find the best table to reduce */ std::string getNextAssignTableName(bool & can_index, int & col_index, std::string& , const std::vector<std::string>& tables); /* * @param table_name * @return checker function * get a checker function to check a item in table "table_name" whether meet the condition * must guarantee that if column has no prefix "table_", it belongs to table "table_name" */ Checker getChecker(std::string table_name); void dfs(std::vector< std::vector<Value> > &ans, const std::vector<std::string>& table_names, const std::vector<Value>& prefix, const Selector & selector, std::vector<std::string>& assigned_table); WhereClause assign(const std::string& table_name, const Item & item); }; class SetClause : public Node{ public: std::vector<std::string> cols; std::vector<Value> values; void push_back(std::string colname, Value v){ cols.push_back(colname); values.push_back(v); } Changer getChanger(const std::string& table_name) { auto td = TableManager::getInstance()->getTableDescription(table_name); std::vector<int> offsets; std::vector<std::string> embed_values; for(int i = 0;i < cols.size();i++) { int offset = td->getOffset(cols[i]); if(offset == -1) throw NoThisColumnError(td->name, cols[i]); offsets.push_back(offset); embed_values.push_back(values[i].toString(td->col_type[offset])); } return ([offsets, embed_values](Item & item){ for(int i = 0;i < offsets.size();i++) item[offsets[i]] = embed_values[i]; return; }); } }; class ColList : public Node{ public: std::vector<std::string> cols; void push_back(std::string colname){ cols.push_back(colname);} static void split(const std::string& full_name, const std::string& default_table, std::string & table, std::string & col){ size_t pos = full_name.find("."); table = pos == std::string::npos? default_table : string(full_name.begin(), full_name.begin() + pos); col = pos == std::string::npos? full_name : string(full_name.begin() + pos + 1, full_name.end()); } }; class SelectCols : public Node{ public: bool isAll; std::shared_ptr<ColList> col_list; SelectCols():isAll(false),col_list(nullptr){} void setAll(){ isAll = true;} void setColList(std::shared_ptr<ColList> _c){ col_list = _c;} Selector getSelector(const std::vector<std::string>& table_names) { std::vector< std::vector<int> > offsets; for(auto & table_name : table_names) { std::vector<int> this_offsets; auto td = TableManager::getInstance()->getTableDescription(table_name); if(isAll) for(int i = 0;i < td->col_name.size();i++) this_offsets.push_back(i); else for(std::string & full_name : col_list->cols){ std::string this_table, this_col; ColList::split(full_name, table_name, this_table, this_col); if(this_table == table_name) this_offsets.push_back(td->getOffset(this_col)); } offsets.push_back(this_offsets); } return [table_names, offsets](const Item & item, const std::string& table_name)-> std::vector<Value>{ std::vector<Value> ret; auto td = TableManager::getInstance()->getTableDescription(table_name); for(int i = 0;i < table_names.size();i++) if(table_name == table_names[i]) for(int j = 0;j < offsets[i].size();j++) ret.push_back(Value(td->col_type[j], item[j])); return ret; }; } }; class TableList : public Node{ public: std::vector<std::string> tables; void push_back(std::string tablename){ tables.push_back(tablename);} }; } }
/** * CubeView is the class that extend GLSurfaceView and it will acts as a View class to * display a rolling dice cube on the application. * */ public class CubeView extends GLSurfaceView implements Callback { /** * An openGL renderer object that will render the cube on GLSurfaceView. */ private OpenGLRenderer renderer; /** * A view constructor that will initialize the view. * @param context Desired activity context. * @param attributeSet Layout Attribute set to layout configuration of the view. */ public CubeView(Context context, AttributeSet attributeSet) { super(context, attributeSet); renderer = new OpenGLRenderer(context); setRenderer(renderer); getHolder().addCallback(this); } @Override public void surfaceCreated(SurfaceHolder holder) { // TODO Auto-generated method stub super.surfaceCreated(holder); } @Override public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { // TODO Auto-generated method stub super.surfaceChanged(holder, format, w, h); } }
def _retrieve_metadata_from_uri(self, uri: str) -> Dict[Any, Any]: if uri and uri.startswith('ipfs://'): uri = urljoin(self.IPFS_GATEWAY, uri.replace('ipfs://', '')) if not uri or not uri.startswith('http'): raise MetadataRetrievalException(uri) try: logger.debug('Getting metadata for uri=%s', uri) response = requests.get(uri) if not response.ok: logger.debug('Cannot get metadata for uri=%s', uri) raise MetadataRetrievalException(uri) else: logger.debug('Got metadata for uri=%s', uri) return response.json() except (requests.RequestException, ValueError) as e: raise MetadataRetrievalException(uri) from e
export { default as LocalTestInfoNotFoundError } from './LocalTestInfoNotFoundError' export { default as TestCaseParseError } from './TestCaseParseError'
/** * Session Bean implementation class ConcepService * * Provides management and persistent services for proven concepts. By default, the proven context * and base uri are used. * */ @Stateless @LocalBean public class ConceptService { private final Logger log = LoggerFactory.getLogger(ConceptService.class); private ProvenConfig pg; @EJB private StoreManager sm; private ObjectConnection oCon = null; private boolean useIdb; private String idbDB; private String idbRP; private String idbUrl; private String idbUsername; private String idbPassword; // ////////////////////////////////////////////////////////// // Named queries // @formatter:off private static final String findSingleByName = "SELECT ?s WHERE {?s " + toIri(HAS_NAME_PROP) + " ?name }"; private static final String findNativeSourcesByDomainName = "SELECT ?s WHERE { ?s " + toIri(HAS_DOMAIN_MODEL_PROP) + " ?domain . " + " ?domain " + toIri(HAS_NAME_PROP) + " ?name " + " } "; private static final String findNativeSourcesByDomainName2 = "SELECT ?s WHERE {?s ?p ?o }"; // @formatter:on // ////////////////////////////////////////////////////////// @PostConstruct public void postConstruct() { try { this.oCon = sm.getObjectStoreConnection(); } catch (Exception e) { e.printStackTrace(); throw new EJBException("Borrow connection failed in construction of ConceptService"); } pg = ProvenConfig.getB2SConfig(); useIdb = Boolean.valueOf(pg.getPropValue(PROVEN_USE_IDB)); idbDB = pg.getPropValue(PROVEN_IDB_DB); idbRP = pg.getPropValue(PROVEN_IDB_RP); idbUrl = pg.getPropValue(PROVEN_IDB_URL); idbUsername = pg.getPropValue(PROVEN_IDB_USERNAME); idbPassword = pg.getPropValue(PROVEN_IDB_PASSWORD); log.debug("USE IDB : " + useIdb); log.debug("IDB DB : " + idbDB); log.debug("IDB RP : " + idbRP); log.debug("IDB URL : " + idbUrl); log.debug("IDB Username : " + idbUsername); log.debug("ConceptService constructed ..."); } @PreDestroy public void preDestroy() { try { // TODO should uncommitted changes just be lost? // force a close and commit changes oCon.commit(); oCon.close(); log.debug("ConeptService destroyed ..."); } catch (RepositoryException e) { // swallow it, not need to throw an ejb exception at this point e.printStackTrace(); } log.debug("ConceptService destroyed ..."); } /** * Commits all changes and creates a new connection to the repository * * @throws Exception */ public void flush() { try { if (isValidConnection()) { oCon.commit(); oCon.close(); } oCon = sm.getObjectStoreConnection(); } catch (Exception e) { log.error("FLUSH FAILURE"); e.printStackTrace(); } } public void begin() { try { if (!oCon.isActive()) { oCon.begin(); } } catch (Exception e) { log.error("BEGIN FAILURE"); e.printStackTrace(); } } public void commit() { try { if (oCon.isActive()) { oCon.commit(); } } catch (Exception e) { log.error("COMMIT FAILURE"); e.printStackTrace(); } } public void rollback() { try { if (oCon.isActive()) { oCon.rollback(); } } catch (Exception e) { log.error("ROLLBACK FAILURE"); e.printStackTrace(); } } public ObjectConnection getObjectConnection() { return oCon; } public String addConcept(Concept c) throws RepositoryException { String ret = oCon.addObject(c).toString(); addBlobs(c.getRepresentations()); return ret; } public void addConcepts() { } public <T> List<T> getConcepts(Class<T> concept) throws Exception { return oCon.getObjects(concept).asList(); } public void addStatements(Collection<Statement> statements, Resource... resources) throws Exception { oCon.add(statements, resources); } public <T> void removeConcept(Concept c, Class<T> cType) throws Exception { oCon.removeDesignation(c, cType); } public void removeConcepts() { } public <T> T findConceptById(Class<T> conceptClass, String uri) throws Exception { T ret = null; log.debug("QUERY::" + findSingleByName); ret = oCon.getObject(conceptClass, uri); return ret; } /** * Finds a concept instance by name. Assumes a single result, will throw an exception if * multiple instances discovered. * * @param conceptClass * the concept class to search over * @param name * value of name property to search for * * @return returns the concept instance. Returns null if no concept found * * @throws Exception * if query failure occurs */ public <T> T findConceptByName(Class<T> conceptClass, String name) throws Exception { T ret = null; ObjectQuery q = oCon.prepareObjectQuery(findSingleByName); q.setObject("name", name); try { ret = q.evaluate(conceptClass).singleResult(); } catch (NoResultException nre) { ret = null; } return ret; } // TODO - concept specific queries should be moved to the concept class it's // associated with. Only concept wide queries should be provided here. Move // this method to the NativeSource concept class. public List<NativeSource> findNativeSourcesByDomain(DomainModel dm) throws Exception { List<NativeSource> ret = new ArrayList<NativeSource>(); log.debug("QUERY::" + findNativeSourcesByDomainName); log.debug("DOMAIN NAME :: " + dm.getName()); // Set read to domain content area log.debug("CONTEXT URI :: " + toUri(dm.getExplicitContent().getContextUri())); oCon.setReadContexts(toUri(PROVEN_CONTEXT), toUri(dm.getExplicitContent().getContextUri())); ret.addAll(oCon.getObjects(NativeSource.class).asList()); return ret; } public RepositoryResult<Statement> getAllStatements() throws Exception { RepositoryResult<Statement> ret = null; ret = oCon.getStatements(null, null, null); return ret; } public RepositoryResult<Statement> getDomainStatements(String domain) throws Exception { RepositoryResult<Statement> ret = null; List<Resource> contexts = new ArrayList<Resource>(); DomainModel dm = findConceptByName(DomainModel.class, domain); if (null != dm) { Resource ec = toResource(dm.getExplicitContent().getContextUri()); contexts.add(ec); Set<Ontology> onts = dm.getOntologies(); for (Ontology ont : onts) { Resource r = toResource(ont.getContext().getContextUri()); contexts.add(r); } Resource[] resources = new Resource[contexts.size()]; ret = oCon.getStatements(null, null, null, contexts.toArray(resources)); } return ret; } public void removeDomainContext(Resource context) throws RepositoryException { oCon.clear(context); oCon.commit(); } public void removeAll() throws RepositoryException { oCon.setRemoveContexts(); // RepositoryResult<Resource> contexts = oCon.getContextIDs(); // List<Resource> contextList = Iterations.asList(contexts); // for (Resource context : contextList) { // if (!context.toString().equals(PROVEN_CONTEXT)); // oCon.clear(context); // } oCon.clear(); oCon.commit(); } public void addBlobs(Set<Representation> reps) { for (Representation rep : reps) { try { addBlob(rep); } catch (RepositoryException e) { log.warn("Blob creation failed :: " + e.getMessage()); // e.printStackTrace(); } } } public void addBlob(Representation rep) throws RepositoryException { OutputStream out = null; FileInputStream in = null; rep.setBlobStatus(BlobStatus.ADD_COMPLETE); try { // For now, only add local file representations if (isLocalResource(rep.getLocation())) { // Get blob and output stream BlobObject bo = oCon.getBlobObject(rep.getBlobKey().toString()); out = bo.openOutputStream(); File source = new File(rep.getLocation()); in = new FileInputStream(source); byte[] buffer = new byte[1024]; int len = in.read(buffer); while (len != -1) { out.write(buffer, 0, len); len = in.read(buffer); } } else { rep.setBlobStatus(BlobStatus.REMOTE); } } catch (Exception e) { rep.setBlobStatus(BlobStatus.ADD_FAIL); e.printStackTrace(); throw new RepositoryException(e.getMessage()); } finally { if (null != out) try { out.close(); } catch (IOException e) { e.printStackTrace(); } if (null != in) try { in.close(); } catch (IOException e) { e.printStackTrace(); } } } public String sparqlQuery(String queryString) { String ret = ""; OutputStream outStream = null; try { //TupleQueryResultWriterRegistry // prepare the query // String queryString = "SELECT * WHERE {?s ?p ?o . }"; TupleQuery query = oCon.prepareTupleQuery(QueryLanguage.SPARQL, queryString); // open a file to write the result to it in JSON format outStream = new ByteArrayOutputStream(); TupleQueryResultHandler writer = new SPARQLResultsJSONWriter(outStream); // execute the query and write the result directly to file query.evaluate(writer); if (null != outStream) { ret = outStream.toString(); } } catch (MalformedQueryException e) { ret = "MalformedQueryException :: " + e.getCause().toString() + " :: " + e.getMessage(); e.printStackTrace(); } catch (RepositoryException e) { ret = "RepositoryException :: " + e.getCause().toString() + " :: " + e.getMessage(); ; } catch (TupleQueryResultHandlerException e) { ret = "TupleQueryResultHandlerException :: " + e.getCause().toString() + " :: " + e.getMessage(); ; } catch (QueryEvaluationException e) { ret = "QueryEvaluationException :: " + e.getCause().toString() + " :: " + e.getMessage(); ; } finally { if (null != outStream) { try { outStream.close(); } catch (IOException e) { e.printStackTrace(); } } } return ret; } public String influxQuery(String queryString) { String ret = ""; if (useIdb) { InfluxDB influxDB = InfluxDBFactory.connect(idbUrl, idbUsername, idbPassword); String dbName = idbDB; // Query query = new Query("select time_idle from cpu limit 10", dbName); Query query = new Query(queryString, dbName); QueryResult qr = influxDB.query(query); if (qr.hasError()) { ret = qr.getError(); } else { ret = qr.getResults().toString(); // ret = qr.toString(); } } else { ret = "{ \"INFO\": \"idb server disabled in Proven configuration\" }"; } return ret; } public void influxWriteMeasurements(Map<String, Set<ProvenanceMetric>> measurements) { if (useIdb) { Long startTime = System.currentTimeMillis(); InfluxDB influxDB = InfluxDBFactory.connect(idbUrl, idbUsername, idbPassword); for (String measurement : measurements.keySet()) { Set<ProvenanceMetric> pms = measurements.get(measurement); Point.Builder builder = Point.measurement(measurement).time(startTime, TimeUnit.MILLISECONDS); for (ProvenanceMetric pm : pms) { if (pm.isMetadata()) { builder.tag(pm.getLocalMetricName(), pm.getLabelMetricValue()); // System.out.println("TAG"); // System.out.println("------------------------------"); // System.out.println(pm.getLocalMetricName()); // System.out.println(pm.getLabelMetricValue()); // System.out.println("------------------------------"); } else { builder.field(pm.getLocalMetricName(), pm.getLabelMetricValue()); // System.out.println("FIELD"); // System.out.println("------------------------------"); // System.out.println(pm.getLocalMetricName()); // System.out.println(pm.getLabelMetricValue()); // System.out.println("------------------------------"); } } // System.out.println("------------------------------"); // System.out.println("------------------------------"); influxDB.write(idbDB, idbRP, builder.build()); } } } @AroundInvoke public Object checkObjectConnection(InvocationContext ic) throws Exception { try { if (!isValidConnection()) { log.debug("GETTING A NEW CONNECTION!!!"); oCon = sm.getObjectStoreConnection(); } } catch (Exception e) { throw new EJBException("Return/Borrow new connection failed"); } return ic.proceed(); } private boolean isValidConnection() { boolean ret = true; try { if ((!oCon.getRepository().isInitialized()) || (!oCon.isOpen())) { ret = false; // Force a close, may cause an exception. oCon.close(); } } catch (Exception e) { ret = false; } return ret; } }
def update( self, currentX, currentDist, overrideSpeed=0.0 ): errorX = (currentX - self.targetX) / (self.racecar.camera.get_width() // 2) errorD = currentDist - self.targetDist dt = self.racecar.get_delta_time() if overrideSpeed == 0: self.speed = np.clip(self.Speed_PID.update(errorD, dt), -1, 1) else: self.speed = np.clip(overrideSpeed, -1, 1) if self.speed >= 0: self.angle = np.clip( self.Angle_PID.update(errorX, dt, 1 - 0.3 * self.speed), -1, 1 ) elif self.speed < 0: self.angle = np.clip( self.Angle_PID.update(errorX, dt, -1 - 0.3 * self.speed), -1, 1 ) return (self.speed, self.angle)
/** * Print an error message and die. * @param s - string format * @effect This function causes the program to die */ void fatalmsg(const char *s,...) { va_list p; va_start(p,s); vfprintf(stderr,s,p); va_end(p); putc('\n',stderr); exit(-1); }
One of the first Christian books I ever read (once I started reading books on my own, simply for pleasure, in high school) was Philip Yancey’s The Jesus I Never Knew. It ends like this: The other two days [besides Holy Saturday] have earned names on the church calendar: Good Friday and Easter Sunday. Yet in a real sense we live on Saturday, the day with no name. What the disciples experienced on a small scale—three days, in grief over one man who had died on a cross—we now live through on a cosmic scale. Human history grinds on, between the time of promise and fulfillment. Can we trust that God can make something holy and beautiful and good out of a world that includes Bosnia and Rwanda, and inner-city ghettoes and jammed prisons in the richest nation on earth? It’s Saturday on planet earth; will Sunday ever come? That dark, Golgothan Friday can only be called Good because of what happened on Easter Sunday, a day which gives a tantalizing clue to the riddle of the universe. Easter opened up a crack in a universe winding down toward entropy and decay, sealing the promise that someday God will enlarge the miracle of Easter to cosmic scale. It is a good thing to remember that in the cosmic drama, we live out our days on Saturday, the in-between day with no name. I know a woman whose grandmother lies buried under 150-year-old live oak trees in the cemetery of an Episcopal church in rural Louisiana. In accordance with the grandmother’s instructions, only one word is carved on the tombstone: “Waiting.” Longtime readers of this blog will know that my entire framework for thinking about my life as a gay, celibate believer is built around that idea of “waiting.” In the midst of ongoing loneliness and struggle, I am “wait[ing]… for the redemption of our bodies” (Romans 8:23). And that’s been true for several years now, ever since my early twenties when I was just beginning to work through what my Christian faith meant for my homosexuality. At that juncture of my life, I stumbled across the kind of thing Philip Yancey was gesturing towards—an actual theology of waiting, a theology of Holy Saturday. Just after I finished college, I read this passage from Richard Hays—which is, essentially, a theology of “waiting” specifically for celibate gay Christians—and I more or less immediately committed it to memory: While Paul regarded celibacy as a charisma, he did not therefore suppose that those lacking the charisma were free to indulge their sexual desires outside marriage. Heterosexually oriented persons are also called to abstinence from sex unless they marry (1 Cor. 7:8-9). The only difference—admittedly a salient one—in the case of homosexually oriented persons is that they do not have the option of homosexual “marriage” [Hays writes before Obergefell, but in any case, his judgment here is theological rather than political/legislative; i.e., same-sex marriage is ruled out by the teaching of Scripture]. So where does that leave them? It leaves them in precisely the same situation as the heterosexual who would like to marry but cannot find an appropriate partner (and there are many such): summoned to a difficult, costly obedience, while “groaning” for the “redemption of our bodies” (Rom. 8:23). Anyone who does not recognize this as a description of authentic Christian existence has never struggled seriously with the imperatives of the gospel, which challenge and frustrate our “natural” impusles in countless ways. (Emphasis added) I know some gay or same-sex attracted Christians would want to put things differently. Not all of us pine for marriage or feel “broken” without it, and some of us are married to spouses of the opposite sex. Not all of us would say that loneliness or sexual temptation are our biggest challenges. But for me, at least, this passage rings true. So much of my Christian life feels like what St. Paul describes as “groaning” (Romans 8:23), as waiting, as living out my days on Holy Saturday, straining forward in hopes that Easter Sunday—the cosmic Easter Sunday, the great Resurrection of the dead—will come sooner than I might have hoped. As one gay friend of mine has put it, “In the ‘already/not-yet’ tension of Christianity, I invariably find the ‘not-yet’ aspect more resonant.” On this Holy Saturday, it’s good for me to remember once again what Hays wrote: that this groaning, this eager waiting-tinged-with-aching, is an authentically Christian way to live. It’s not necessarily a sign of failure or defeatism or depression. Really, it’s where we all live, strung like a tension wire between our sharing in Christ’s death in our baptism and our future sharing in his resurrection after we take our final breath. We’re all sort of like that tombstone Yancey’s friend described: planted in “the now” and looking forward to what’s still to come. O God, Creator of heaven and earth: Grant that, as the crucified body of your dear Son was laid in the tomb and rested on this holy Sabbath, so may we await with him the coming of the third day, and rise with him to newness of life; who now lives and reigns with you and the Holy Spirit, one God, for ever and ever. Amen.
Big Booths for Sony, Microsoft, Blizzard, Electronic Arts and More Revealed By Gamescom Floor Plans Giuseppe Nelva July 27, 2015 2:56:29 AM EST Gamescom is just a a little more than a week away, and the detailed floor plans for the halls are now available on the official website, showing where each publisher is located an roughly the size of each booth. But first of all, let’s give a look at the whole thing: And now, let’s check out each hall, with the maps of the locations of all the major publishers: Entertainment Area (open to all) Hall 6 Electronic Arts There’s a mistake in the map here. The booth actualy covers the whole square around the circle in the middle. Microsoft Take Two Ubisoft Hall 7 Sony Computer Entertainment Crytek Activision Blizzard Bandai Namco Konami Hall 9 Deep Silver Nintendo Sega Square Enix Warner Bros. 343 Industries (Microsoft) Hall 10 Deep Silver Nordic Games Ubisoft Warner Bros. Event Area (Open to all) Hall 5 Square Enix Konami Activision Blizzard Business Area (open only to media and business attendees) Hall 2 – Floor 1 Microsoft Nordic Games Hall 2 – Floor 2 Crytek Hall 3 – Floor 2 Focus Home Interactive Hall 4 – Floor 1 Konami Microsoft Hall 4 – Floor 2 Sony Nintendo Bandai Namco Deep Silver Sega Take-Two Ubisoft Warner Bros. While Sony Computer Entertainment won’t have a press conference at Gamescom, their presence in the entertainment area is massive. Microsoft, Activision Blizzard, Electronic Arts and Ubisoft also have very large booths. Nintendo, Bandai Namco, Konami, Take-Two, Sega, Deep Silver, Square Enix and Warner Bros. will also show some muscle. 343 Industries even has a small booth on its own. According to the organizers, Gamescom is going to be bigger than in the previous years, and of course you can expect full coverage here on DualShockers. Last year I managed to demolish a pair of shoes while running around. Hopefully, I won’t destroy my legs this time around.
Event tourism and sustainable development Tourism is an important base of the country's socio-economic and cultural development. Tourism development strategies involve factors that the society applies to make the best use of its benefits or neutralize the problems it creates in their economies. As every economic subject aims at achieving effectiveness and efficiency, tourism management is tasked with responding to current challenges, and connecting service providers to users so as to achieve mutual satisfaction. Decision-making in tourism is particularly complex when events (cultural, sports…) are held in protected areas and national parks, which attract tourists the most. Since the capacities of a tourist destination often do not meet the needs of constant, accelerated growth of the number of visitors, sustainable development becomes a professional challenge for tourism destination management, and it is often a source of problems for tour operators or governments. A lack of resources, pollution, security, numerous risks…, are some of the factors that may reduce tourism profits or contribute to losses. This paper explores certain aspects of strategies that can bring benefits for visitors, organizers and the society as a whole, in accordance with the achieved level of overall economic, social and cultural development. The paper also points out the concerns, problems, codes and strategies that affect the final product, as an aspect of engagement in the quality management process in tourism.
Efficient architecture and implementations of AES An equivalent optimized sub-pipelined architecture is proposed to implement the AES, every round including encryption and decryption needs one clock cycle. The SubBytes/InvSubBytes operation using composite field arithmetic in GF(24) and BlockRAMs respectively. In addition, an efficient key expansion which supports the output of 128 bits key per cycle and allows key changes every cycle is also presented. The novel pipelined design can achieve a throughput of 82.65Gbps on a Xilinx Virtex-4 xc4vlx100 device with composite field implementation of the SubBytes operation. By using 40 BlockRAMs and 8901 slices, the throughput of 64 Gbps is achieved with a frequency of 500MHz.These two designs' throughput/area rate are all over 6 Mbps/Slice indicate that our designs are low-cost for high-speed implementation.
/** * Verify no entry for ClassName exists in index. * If a match is found then fail assertion error. * * @param classNamePattern the pattern */ public void assertNoClassNameMatches( @Nonnull final Pattern classNamePattern ) { final List<SymbolEntry> matches = findSymbolsByClassName( classNamePattern ); if ( !matches.isEmpty() ) { fail( "Expected that the SymbolMap would have no classNames that match pattern " + classNamePattern + " but the following symbols match: " + matches ); } }
Henry Kissinger, left, and President Richard M. Nixon after Kissinger was sworn in as the 56th secretary of state in the East Room of the White House in Washington in this Sept. 22, 1973, file photo. (AP Photo, File) As time passes, the extent to which the Vietnam War split the American public recedes from memory. We generally recognize the war's futility at this point, after years of sending young men against their will into Southeast Asia to die fighting a mostly symbolic effort to curtail the spread of global communism. But the visceral effects of the damage it did have faded. About 41,000 American troops were killed in action, with more than 58,000 dying in the conflict overall -- a fraction of those killed overall, an unknowable figure that includes combatants from North and South Vietnam and uncountable hundreds of thousands of civilians in Vietnam, Laos and Cambodia. Young American men lived or died depending on a toss of a coin in the government's draft lottery, which determined whether or not they'd be shipped to the combat zone. So those young people, the boomer kids of the veterans of World War II, rose up in protest against the government. The counterculture was born. The political right and left staked out positions they defend today. On the left, the embodiment of the war's evils was -- and to many still is -- Richard Nixon. He didn't start the war -- it was begun by John Kennedy and expanded by Lyndon Johnson -- but Nixon was its most enthusiastic general. He inherited a conflict that was probably unwinnable, and Nixon's national security adviser, the man who helped guide the president's strategy, was Henry Kissinger. "I am proud to say that Henry Kissinger is not my friend," said Bernie Sanders during the PBS NewsHour democratic debate. He criticized rival Hillary Clinton for her positive comments about him, saying, "I will not take advice from Henry Kissinger." (PBS NewsHour) It was Kissinger who in early 1969 -- about a month after Nixon took office -- orchestrated a plan to dramatically escalate airstrikes within the neutral, Vietnam-adjacent country of Cambodia, which the North Vietnamese military was using to avoid U.S. attacks (the so-called Ho Chi Minh Trail). The United States began carpet-bombing the country without informing Congress -- an early example of Nixon's long-term plan to convince the North Vietnamese that he would pull out all the stops in his effort to win the war. These attacks were hidden from official records. Planes were diverted mid-flight to fly raids into Cambodia. When a coup replaced Cambodia's ruler with a leader amenable to the United States, we invaded on the ground. (The extent to which that coup was the work of the United States isn't clear.) When that didn't work -- and, later, as the Khmer Rouge threatened to take the nation over -- the bombings increased dramatically, including in more heavily populated areas. The United States dropped an estimated 2.7 million tons of ordnance on Cambodia; more than the 2 million tons that were dropped in all of World War II. For which Kissinger gets the credit/blame. In one phone call with Nixon, transcribed as part of Kissinger's papers, Nixon expresses concern that heavy bombings (in this case within Vietnam) will work. Kissinger tells Nixon that a million pounds of bombs were dropped on Haiphong, a port city in the north. "Goddamn, that must have been a good strike!" Nixon replies. He continues: Johnson's failure to subdue the North Vietnamese, in Kissinger's estimation, was that he didn't bomb enough. Kissinger and Nixon wouldn't make that same mistake. The result in Cambodia -- not the only targeted country, mind you -- may have been as many as 150,000 civilians killed. Kissinger himself figured it was about 50,000. As national security adviser and later secretary of state, Kissinger was at the heart of a number of other controversial decisions by the Nixon administration. Among the most notable was the United States' involvement in the overthrow of Salvador Allende in Chile. Kissinger pushed Nixon to back a coup in the country, ousting the "tough, dedicated Marxist" Allende. Nixon backed the plan, and on Sept. 11, 1973, Allende was overthrown. His replacement was Augusto Pinochet. Pinochet tortured and murdered tens of thousands of Chileans in an attempt to maintain control of the country. In 1998, he was arrested for human rights violations, but never faced trial due to health reasons. The coup in Chile was not the only notable event in Kissinger's life in 1973. That was also the year he received the Nobel Peace Prize. For members of the American left, these events (and others) define Kissinger's legacy in U.S. politics. The late Christopher Hitchens, a longtime opponent of the Vietnam War, wrote a book outlining a complete prosecution of Kissinger for war crimes, which was later turned into a documentary. He's by no means alone in this belief. Just last year, the left-leaning site Salon dubbed Kissinger "The Ivy League's favorite war criminal," criticizing an appearance Kissinger made at Yale University. Whether or not Kissinger could be prosecuted and convicted of war crimes is one thing. In the current political moment, the better question -- posed very well by Gawker's Alex Pareene -- is whether Kissinger should be embraced by the leading candidate for the Democratic presidential nomination, Hillary Clinton. The point I’m making here is not, [Glenn Greenwald voice] HILLARY CLINTON SUPPORTS A WAR CRIMINAL. ... It’s that Hillary Clinton exists in a world where “Henry Kissinger is a war criminal” is a silly opinion held by unserious people. Her problem? Lots of those silly and unserious people want to wrest control of the Democratic Party away from its current leadership, which is exemplified by people like Hillary Clinton. This is the core of the issue. Nixon and Kissinger are seen favorably by the American political right to this day. To the American left, they are largely reviled. In the middle, Pareene writes, sit members of the establishment like Hillary Clinton. Bernie Sanders's indignation at Clinton's embrace of Kissinger in Thursday night's debate is the indignation of a generation of Democrats who loathed Nixon and Kissinger with every fiber of their being. This isn't a group that's inclined to back Sanders, incidentally; one of the only demographic groups Clinton won in New Hampshire was boomers. But Sanders's thoughts on Kissinger clearly don't stem from political calculations. "I am proud to say that Henry Kissinger is not my friend," Sanders said angrily, when he raised the issue in the debate. "I will not take advice from Henry Kissinger." Clinton replied by asking Sanders from whom he would take advice. "Well, it ain't Henry Kissinger, that's for sure," he replied. "That's fine," said Clinton. "That's fine." The audience laughed.
package dbw import ( "testing" "time" ) func TestFieldNames(t *testing.T) { db, err := Open("postgres", "host=localhost port=5432 dbname=postgres user=admin password=<PASSWORD> sslmode='disable' bytea_output='hex'") if err != nil { t.Error(err) } _, err = db.Exec(` create table x_haha( id int8 not null, name text, row_version int8 not null default 1, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ, deleted_at TIMESTAMPTZ, constraint x_haha_pk primary key (id) )`) if err != nil { t.Fatal(err) } _, err = db.Exec("create sequence x_haha_seq") if err != nil { t.Fatal(err) } defer func() { db.Exec("drop table x_haha") db.Exec("drop sequence x_haha_seq") }() type Xhaha struct { ID int Name *string RowVersion int CreatedAt time.Time UpdatedAt *time.Time DeletedAt *time.Time } tbl := NewTable(db, "x_haha", &Xhaha{}) s := "Robert" row := Xhaha{} row.Name = &s row.CreatedAt = time.Now() if err := tbl.Insert(&row); err != nil { t.Error(err) } t.Logf("version before: %v", row.RowVersion) s = "Rimma" if err := tbl.Update(&row); err != nil { t.Error(err) } // tbl.Update().Row(&row).NoReturning() t.Logf("version after: %v ", row.RowVersion) if err := tbl.Delete(WithID(1), WithReturnVersion(&row.RowVersion)); err != nil { t.Error(err) } t.Logf("version after: %v", row.RowVersion) if err := tbl.Delete(WithID(1), WithReturnAll(&row)); err != nil { t.Error(err) } t.Logf("version after: %v", row.RowVersion) }
/** * Compares source files to destination files to see if they should be * copied. * * @param fromDir The source directory * @param toDir The destination directory * @param files A list of files to copy * @param dirs A list of directories to copy */ protected void scan(File fromDir, File toDir, String[] files, String[] dirs) { FileNameMapper mapper = null; if (mapperElement != null) { mapper = mapperElement.getImplementation(); } else if (flatten) { mapper = new FlatFileNameMapper(); } else { mapper = new IdentityMapper(); } buildMap(fromDir, toDir, files, mapper, fileCopyMap); if (includeEmpty) { buildMap(fromDir, toDir, dirs, mapper, dirCopyMap); } }
def readme(self, readme): self._readme = readme
#include<stdio.h> #include<string.h> int main () { int a[200],x=0,y=0,b,i; char s1[200],s2[200]; scanf ("%s %s",&s1,&s2); b=strlen(s1); for(i=0;i<b;i++) { x=(int)s1[i]; y=(int)s2[i]; a[i]=x^y; } for (i=0;i<b;i++) {printf("%d",a[i]); } return 0; }
// Convert function change bitrate, resolution and ratio for video func (c *Compressor) Convert(ctx context.Context, opt *Request, originalVideo string) (string, error) { opts := c.buildOptions(opt) if opt.Bitrate != 0 { return c.convertWithBitrate(originalVideo, opts) } root := os.Getenv("ROOT") iName := strings.LastIndex(originalVideo, "/") newVideoName := originalVideo[iName:] newVideoPath := fmt.Sprintf("%s%s%s", root, convertedVideosPath, newVideoName) err := c.convertVideo(originalVideo, newVideoPath, opts) if err != nil { return "", err } return newVideoPath, nil }
def validate( filename: str, data: Dict[str, Any], schema: Dict[str, Any], default: bool = False ) -> Tuple[List[str], Dict[str, Any]]: schema_ref = schema.get("$schema", "default") schema_match = re.match(r"https?\:\/\/json\-schema\.org\/(.*)\/schema", schema_ref) Validator = { "draft-04": jsonschema.Draft4Validator, "draft-06": jsonschema.Draft6Validator, "draft-07": jsonschema.Draft7Validator, }.get( schema_match.group(1) if schema_match else "default", jsonschema.Draft7Validator, ) if default: Validator = _extend_with_default( Validator ) validator = Validator(schema) def format_error(error: jsonschema.exceptions.ValidationError) -> List[str]: position = filename if hasattr(error.instance, "lc"): position = f"{filename}:{error.instance.lc.line + 1}:{error.instance.lc.col + 1}" else: curent_data = data parent = None if hasattr(curent_data, "lc"): parent = curent_data for path in error.absolute_path: curent_data = curent_data[path] if hasattr(curent_data, "lc"): parent = curent_data if parent is not None: position = f"{filename}:{parent.lc.line + 1}:{parent.lc.col + 1}" if error.context: results = [] for context in error.context: results += format_error(context) return results else: rule = ( f" (rule: {'.'.join([str(i) for i in error.absolute_schema_path])})" if error.absolute_schema_path else "" ) return [ f"-- {position} " f'{".".join([str(i) for i in error.absolute_path] if error.absolute_path else "/")}: ' f"{error.message}{rule}" ] results = [] for error in validator.iter_errors(data): results += format_error(error) return sorted(results), data
1. Cummins Falls Located about an hour and a half from Nashville, Cummins Falls has been a local hangout and swimming hole for more than 100 years. The beautiful 75-foot falls can only be reached by foot, and you’ll have to undergo a pretty strenuous hike to get there, but it’s totally worth it. Sit on the rocky tiers or swim in the deep, cool pool at the bottom—it’s up to you! 2. Fall Creek Falls Fall Creek Falls sprawls across a whopping 26,000 acres, and is known as Tennessee’s largest state park. It’s also home to its namesake, Fall Creek Falls, a 256-foot tall waterfall and one of the tallest in the eastern U.S. With more than 34 miles of trails, hikers can use one of the day trails or, if they’re feeling daring, opt for an overnight trail to get their waterfall fix.
<reponame>NikolajSkousen/designsystem import { Component } from '@angular/core'; const config = { selector: 'cookbook-toggle-button-example', template: `<kirby-toggle-button [checked]="true" (checkChanged)="onCheckChanged($event)"> <button kirby-button unchecked attentionLevel="3">Deactivated</button> <button kirby-button checked themeColor="success">Activated</button> </kirby-toggle-button> <kirby-toggle-button [checked]="false" (checkChanged)="onCheckChanged($event)"> <button kirby-button unchecked attentionLevel="3">Deactivated</button> <button kirby-button checked themeColor="warning">Activated</button> </kirby-toggle-button> <kirby-toggle-button [checked]="true" (checkChanged)="onCheckChanged($event)"> <button kirby-button unchecked attentionLevel="3">Deactivated</button> <button kirby-button checked themeColor="danger">Activated</button> </kirby-toggle-button>`, }; @Component({ selector: config.selector, styleUrls: ['./toggle-button-example.component.scss'], template: config.template, }) export class ToggleButtonExampleComponent { template = config.template; onCheckChanged(checked: boolean) { console.log(`Toggle onCheckChanged: ${checked}`); } }
def dump_uischema(self, obj, many=None, *args): return dict(self._dump_uischema_iter(obj, *args, many=many))
#Test Model from src.models import Hang2020 import torch import os import pytest os.environ['KMP_DUPLICATE_LIB_OK']='True' def test_conv_module(): m = Hang2020.conv_module(in_channels=369, filters=32) image = torch.randn(20, 369, 11, 11) output = m(image) assert output.shape == (20,32,11,11) def test_conv_module_maxpooling(): m = Hang2020.conv_module(in_channels=32, filters=64, maxpool_kernel=(2,2)) image = torch.randn(20, 32, 11, 11) output = m(image, pool = True) assert output.shape == (20,64,5,5) @pytest.mark.parametrize("conv_dimension",[(20,32,11,11),(20,64,5,5),(20,128,2,2)]) def test_spatial_attention(conv_dimension): """Check spectral attention for each convoutional dimension""" m = Hang2020.spatial_attention(filters=conv_dimension[1], classes=10) image = torch.randn(conv_dimension) attention, scores = m(image) assert scores.shape == (20,10) @pytest.mark.parametrize("conv_dimension",[(20,32,11,11),(20,64,5,5),(20,128,2,2)]) def test_spectral_attention(conv_dimension): """Check spectral attention for each convoutional dimension""" m = Hang2020.spectral_attention(filters=conv_dimension[1], classes=10) image = torch.randn(conv_dimension) attention, scores = m(image) assert scores.shape == (20,10) def test_spectral_network(): m = Hang2020.spectral_network(bands=369, classes=10) image = torch.randn(20, 369, 11, 11) output = m(image) assert len(output) == 3 assert output[0].shape == (20,10) def test_spatial_network(): m = Hang2020.spatial_network(bands=369, classes=10) image = torch.randn(20, 369, 11, 11) output = m(image) assert len(output) == 3 assert output[0].shape == (20,10) def test_vanillaCNN_HSI(): m = Hang2020.vanilla_CNN(bands=369, classes=10) image = torch.randn(20, 369, 11, 11) output = m(image) assert output.shape == (20,10) def test_vanillaCNN_RGB(): m = Hang2020.vanilla_CNN(bands=3, classes=10) image = torch.randn(20, 3, 11, 11) output = m(image) assert output.shape == (20,10)
<gh_stars>10-100 /* Copyright (c) the purl authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ declare module "packageurl-js" { /** * A purl or package URL is an attempt to standardize existing approaches to reliably identify and locate software packages. * A purl is a URL string used to identify and locate a software package in a mostly universal and uniform way across programing languages, * package managers, packaging conventions, tools, APIs and databases. * Such a package URL is useful to reliably reference the same software package using a simple and expressive syntax and conventions based on familiar URLs. */ class PackageURL { /** * the package "type" or package "protocol" such as maven, npm, nuget, gem, pypi, etc. Required. */ type: string; /** * some name prefix such as a Maven groupid, a Docker image owner, a GitHub user or organization. Optional and type-specific. */ namespace: string | undefined | null; /** * the name of the package. Required. */ name: string; /** * the version of the package. Optional. */ version: string | undefined | null; /** * extra qualifying data for a package such as an OS, architecture, a distro, etc. Optional and type-specific. */ qualifiers: { [key: string]: string; } | undefined | null; /** * extra subpath within a package, relative to the package root. Optional. */ subpath: string | undefined | null; constructor(type: string, namespace: string | undefined | null, name: string, version: string | undefined | null, qualifiers: { [key: string]: string; } | undefined | null, subpath: string | undefined | null); /** * Converts the PackageURL to a string */ toString(): string; /** * Parses a string tp a PackageURL * @param purl string to parse */ static fromString(purl: string): PackageURL } }
<filename>modules/http-client/src/main/java/com/devonfw/module/httpclient/common/impl/AbstractAsyncServiceHttpClient.java package com.devonfw.module.httpclient.common.impl; import java.net.http.HttpRequest; import java.net.http.HttpResponse; import java.net.http.HttpResponse.BodyHandlers; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; import com.devonfw.module.service.common.api.client.AsyncServiceClient; import com.devonfw.module.service.common.api.client.async.ServiceClientInvocation; import com.devonfw.module.service.common.api.client.async.ServiceClientStub; import com.devonfw.module.service.common.base.client.ServiceClientPerformanceLogger; import com.devonfw.module.service.common.base.client.async.AbstractAsyncServiceClient; /** * Abstract base implementation of {@link AsyncServiceClient} using Java HTTP client. * * @param <S> type of the {@link #get() service client}. * @param <F> type of the owning {@link AsyncServiceClientFactoryHttp factory}. * @since 2020.08.001 */ public abstract class AbstractAsyncServiceHttpClient<S, F extends AsyncServiceClientFactoryHttp> extends AbstractAsyncServiceClient<S> { /** {@link ServiceHttpClient} to use. */ protected final ServiceHttpClient client; /** The owning {@link AsyncServiceClientFactoryHttp factory} which created this client. */ protected final F factory; /** * The constructor. * * @param proxy the {@link #get() service client}. * @param stub the {@link ServiceClientStub}. * @param httpClient the {@link ServiceHttpClient} to use. * @param factory the owning {@link AsyncServiceClientFactoryHttp factory}. */ public AbstractAsyncServiceHttpClient(S proxy, ServiceClientStub<S> stub, ServiceHttpClient httpClient, F factory) { super(proxy, stub); this.client = httpClient; this.factory = factory; } @Override protected <R> void doCall(ServiceClientInvocation<S> invocation, Consumer<R> resultHandler) { long startTime = System.nanoTime(); HttpRequest request = createRequest(invocation); CompletableFuture<HttpResponse<String>> future = this.client.getHttpClient().sendAsync(request, BodyHandlers.ofString()); future.thenAccept(response -> handleResponse(response, startTime, invocation, resultHandler, getErrorHandler())); } private Throwable createError(HttpResponse<?> response, ServiceClientInvocation<S> invocation, String service) { int statusCode = response.statusCode(); String contentType = response.headers().firstValue("Content-Type").orElse("application/json"); String data = ""; Object body = response.body(); if (body instanceof String) { data = (String) body; } else { handleUnsupportedBody(body); } return this.factory.getErrorUnmarshaller().unmarshall(data, contentType, statusCode, service); } /** * @param body the body of the HTTP request/response. * @return nothing. Will already throw an exception. */ protected Object handleUnsupportedBody(Object body) { String bodyType = "null"; if (body != null) { body.getClass().getName(); // avoid OWASP sensitive data exposure and only reveal classname in message } throw new UnsupportedOperationException( "HTTP request/response body of type " + bodyType + " is currently not supported!"); } /** * @param response the received {@link HttpResponse}. * @param invocation the {@link ServiceClientInvocation}. * @return the unmarshalled result object from the response body or {@code null} if no body was found or return type * is {@code void}. * @throws IllegalStateException if the unmarshalling of the result failed. * @throws UnsupportedOperationException if the body type is not supported. */ protected abstract Object createResult(HttpResponse<?> response, ServiceClientInvocation<S> invocation); /** * @param invocation the {@link ServiceClientInvocation}. * @return the according {@link HttpResponse} to send. */ protected abstract HttpRequest createRequest(ServiceClientInvocation<S> invocation); @Override protected <R> CompletableFuture<R> doCall(ServiceClientInvocation<S> invocation) { long startTime = System.nanoTime(); HttpRequest request = createRequest(invocation); CompletableFuture<HttpResponse<String>> future = this.client.getHttpClient().sendAsync(request, BodyHandlers.ofString()); return future.thenApplyAsync( response -> handleResponse(response, startTime, invocation, null, ErrorHandlerThrowImmediately.get())); } @SuppressWarnings({ "unchecked" }) private <R> R handleResponse(HttpResponse<?> response, long startTime, ServiceClientInvocation<S> invocation, Consumer<R> resultHandler, Consumer<Throwable> errorHandler) { Throwable error = null; String service = invocation.getServiceDescription(response.uri().toString()); try { int statusCode = response.statusCode(); if (statusCode >= 400) { error = createError(response, invocation, service); errorHandler.accept(error); } else { R result = (R) createResult(response, invocation); if (resultHandler != null) { resultHandler.accept(result); } return result; } } catch (Throwable t) { errorHandler.accept(t); error = t; } finally { ServiceClientPerformanceLogger.log(startTime, service, response.statusCode(), error); } return null; } }
package com.cmput3owo1.moodlet.fragments; import android.content.Intent; import android.os.Bundle; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentTransaction; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.EditText; import android.widget.ProgressBar; import android.widget.TextView; import android.widget.Toast; import com.cmput3owo1.moodlet.R; import com.cmput3owo1.moodlet.activities.MainActivity; import com.cmput3owo1.moodlet.services.IUserServiceProvider; import com.cmput3owo1.moodlet.services.UserService; /** * A fragment that handles user login. It takes in email and password in EditTexts * and makes sure that the inputs are valid. Once the inputs have been verified, * the login button attempts to log the user in. The fragment also contains a clickable * TextView that changes from a login fragment to a register fragment */ public class LoginFragment extends Fragment implements IUserServiceProvider.LoginListener { private EditText email, password; private TextView signupText; private Button loginButton; private ProgressBar progressBar; private IUserServiceProvider userService = new UserService(); /** * Default constructor for the Fragment */ public LoginFragment(){ } /** * This function is called to have the fragment instantiate its user interface view. * @param inflater The LayoutInflater object that can be used to inflate any views in the fragment. * @param container If non-null, this is the parent view that the fragment's UI should be attached to. The fragment should not add the view itself, but this can be used to generate the LayoutParams of the view. * @param savedInstanceState If non-null, this fragment is being re-constructed from a previous saved state as given here. * @return Return the View for the fragment's UI, or null. */ @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment View loginFragmentView = inflater.inflate(R.layout.fragment_login, container, false); signupText = loginFragmentView.findViewById(R.id.sign_up_text); email = loginFragmentView.findViewById(R.id.edit_text_email); password = loginFragmentView.findViewById(R.id.edit_text_password); loginButton = loginFragmentView.findViewById(R.id.btn_login); progressBar = loginFragmentView.findViewById(R.id.login_progress_bar); signupText.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { FragmentTransaction fragmentTransaction = getActivity().getSupportFragmentManager().beginTransaction(); fragmentTransaction.replace(R.id.fragment_container, new RegisterFragment()); fragmentTransaction.commit(); } }); loginButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { String txt_email = email.getText().toString(); String txt_password = password.<PASSWORD>(); if(txt_email.isEmpty() || txt_password.isEmpty()) { Toast.makeText(getActivity(), R.string.all_fields_required, Toast.LENGTH_SHORT).show(); } else { showProgressBar(); userService.loginUser(txt_email, txt_password, LoginFragment.this); } } }); // Inflate the layout for this fragment return loginFragmentView; } /** * Interface function to switch to main activity upon successful login */ @Override public void onLoginSuccess() { Intent intent = new Intent(getActivity(), MainActivity.class); startActivity(intent); getActivity().finish(); } /** * Interface function to show a toast message for unsuccessful login */ @Override public void onLoginFailure() { hideProgressBar(); Toast.makeText(getActivity(), R.string.authentication_failed, Toast.LENGTH_SHORT).show(); } /** * Function to set all fields to clickable, hides the progress bar and shows the login button */ private void hideProgressBar() { setAllToClickable(); progressBar.setVisibility(View.INVISIBLE); loginButton.setVisibility(View.VISIBLE); } /** * Function to set all fields to unclickable, shows the progress bar, and hides the login button */ private void showProgressBar() { setAllToUnclickable(); loginButton.setVisibility(View.INVISIBLE); progressBar.setVisibility(View.VISIBLE); } /** * Function to set all fields in login fragment to be unclickable */ private void setAllToUnclickable() { email.setEnabled(false); password.setEnabled(false); signupText.setClickable(false); loginButton.setClickable(false); } /** * Function to set all fields in login fragment to be clickable */ private void setAllToClickable() { email.setEnabled(true); password.setEnabled(true); signupText.setClickable(true); loginButton.setClickable(true); } }
// Save, set automatically the the Route.Size and Route.Pattern value func (r *Route) save() { r.Size = len(r.Path) r.Token.Tokens = strings.Split(r.Path, "/") for i, s := range r.Token.Tokens { if len(s) >= 1 { switch s[:1] { case ":": if r.Pattern == nil { r.Pattern = make(map[int]string) } r.Pattern[i] = s[1:] r.Atts |= PARAM case "#": if r.Compile == nil { r.Compile = make(map[int]*regexp.Regexp) r.Tag = make(map[int]string) } tmp := strings.Split(s, "^") r.Tag[i] = tmp[0][1:] r.Compile[i] = regexp.MustCompile("^" + tmp[1][:len(tmp[1])-1]) r.Atts |= REGEX case "*": r.wildPos = i r.Atts |= WC default: r.Token.raw = append(r.Token.raw, i) } } r.Token.Size++ } }
import numpy as np import numdifftools as nd import scipy.optimize as opt import cvxopt as cvx from statsmodels.regression.quantile_regression import QuantReg from scipy.stats import chi2, norm from scipy.linalg import block_diag, null_space """Worst-case standard errors for minimum distance estimates without knowledge of the full correlation matrix for the matched moments If desired, also computes: - worst-case efficient estimates - full-information efficient estimates - joint test of parameter restrictions - over-identification test Reference: Cocci, <NAME>. & <NAME>, "Standard Errors for Calibrated Parameters" https://scholar.princeton.edu/mikkelpm/calibration """ class MinDist: def __init__(self, moment_fct, moment_estim, moment_se=None, moment_varcov=None, moment_fct_deriv=None): self.moment_fct = moment_fct self.moment_estim = np.asarray(moment_estim).flatten() self.moment_num = len(moment_estim) self.moment_fct_deriv = self._deriv(moment_fct_deriv, self.moment_fct) # Var-cov matrix assert (moment_se is None) + (moment_varcov is None) == 1, 'Either "moment_se" or "moment_varcov" must be supplied, but not both' if moment_varcov is not None: self.moment_varcov = np.asarray(moment_varcov) else: self.moment_varcov = np.empty((self.moment_num,self.moment_num)) self.moment_varcov[np.eye(self.moment_num)==1] = np.asarray(moment_se**2) self.moment_varcov[np.eye(self.moment_num)==0] = np.nan # Check inputs assert self.moment_varcov.shape==(self.moment_num,self.moment_num), 'Dimension of "moment_se" or "moment_varcov" is wrong' assert np.isreal(self.moment_estim).all(), 'Wrong input type for "moment_estim"' assert np.isreal(self.moment_varcov).all(), 'Wrong input type for "moment_se" or "moment_varcov"' assert (np.diag(self.moment_varcov)>=0).all(), 'SE for each individual moment must be nonnegative' assert np.any([self.moment_varcov==self.moment_varcov.T, np.isnan(self.moment_varcov)],axis=0).all(), '"moment_varcov" must be symmetric' assert self.moment_num>=1, '"moment_estim" must have at least one element' # Determine type of var-cov matrix self.full_info = (not np.isnan(self.moment_varcov).any()) # Full info self.diag_only = ((not self.full_info) and np.isnan(self.moment_varcov[np.eye(self.moment_num)==0]).all()) # Only diagonal is known # Check if var-cov has known block diagonal (and unknown everywhere else) self.blockdiag_only = False if not (self.full_info or self.diag_only): i = 0 block_inds = [] while i<self.moment_num: # Loop through purported blocks the_block = i + np.flatnonzero(1-np.isnan(self.moment_varcov[i,i:])) if not (np.diff(the_block)==1).all(): return # Can't be block diagonal block_inds.append(the_block) i = the_block.max()+1 # Check that the block diagonal indeed has non-NaN values, with NaN's outside blocks block_bool = block_diag(*[np.ones((len(b),len(b))) for b in block_inds]) self.blockdiag_only = (not np.isnan(self.moment_varcov[block_bool==1]).any()) and np.isnan(self.moment_varcov[block_bool==0]).all() if not self.blockdiag_only: return # If block diagonal, extract useful information for later self.moment_varcov_blocks = {'ind': block_inds, # Indices of blocks 'varcov': [self.moment_varcov[np.ix_(b,b)] for b in block_inds], # Var-cov for each block 'num': len(block_inds)} self.moment_varcov_blocks['chol'] = [(b if b.ndim==1 else np.linalg.cholesky(b)) for b in self.moment_varcov_blocks['varcov']] # Cholesky factors def fit(self, transf=lambda x: x, weight_mat=None, opt_init=None, estim_fct=None, eff=True, one_step=True, transf_deriv=None, param_estim=None, estim=None, transf_jacob=None, moment_fit=None, moment_jacob=None): """Minimum distance estimates and standard errors, either with full-information moment var-cov matrix or with limited-information individual moment variances """ # Check inputs assert (param_estim is not None) or (opt_init is not None) or (estim_fct is not None), 'One of the following must be supplied: "param_estim", "opt_init", or "estim_fct"' # Transformation Jacobian function transf_deriv = self._deriv(transf_deriv, transf) # Determine weight matrix, if not supplied if self.full_info and (eff or (weight_mat is None)): weight_mat = np.linalg.inv(self.moment_varcov) # Full-info efficient weight matrix eff = True if weight_mat is None: weight_mat = np.diag(1/np.diag(self.moment_varcov)) # Ad hoc diagonal weight matrix # Default estimation routine if estim_fct is None: estim_fct = lambda W: opt.minimize(lambda x: (self.moment_estim-self.moment_fct(x)) @ W @ (self.moment_estim-self.moment_fct(x)), opt_init, method='BFGS')['x'] # Initial estimate of parameters, if not supplied if param_estim is None: param_estim = estim_fct(weight_mat) # Transformation, moment function, and Jacobians at initial estimate estim, transf_jacob, moment_fit, moment_jacob \ = self.estim_update(param_estim, transf, transf_deriv, estim=estim, transf_jacob=transf_jacob, moment_fit=moment_fit, moment_jacob=moment_jacob) moment_loadings = self._get_moment_loadings(moment_jacob, weight_mat, transf_jacob) estim_num = 1 if np.isscalar(estim) else len(estim) # Efficient estimates if eff: if self.full_info: # Full information if one_step: # One-step estimation param_estim = self._get_onestep(moment_fit, weight_mat, moment_jacob, param_estim) estim, transf_jacob, moment_fit, moment_jacob = self.estim_update(param_estim, transf, transf_deriv) moment_loadings = self._get_moment_loadings(moment_jacob, weight_mat, transf_jacob) else: # Full optimization # Do nothing, since param_estim already contains estimates of interest pass else: # Limited information if estim_num > 1: # If more than one parameter of interest, handle each separately by recursive call estim_init = estim.copy() ress = [self.fit(transf=lambda x: transf(x)[i], weight_mat=weight_mat, estim_fct=estim_fct, eff=True, one_step=one_step, param_estim=param_estim, estim=estim_init[i], transf_jacob=transf_jacob[i,:], moment_fit=moment_fit, moment_jacob=moment_jacob) for i in range(estim_num)] # Compute for each parameter of interest estim = np.array([r['estim'] for r in ress]) estim_se = np.array([r['estim_se'] for r in ress]) moment_loadings = np.array([r['moment_loadings'] for r in ress]).T weight_mat = [r['weight_mat'] for r in ress] else: # If only single parameter of interest estim_se, moment_loadings, weight_mat = self.worstcase_eff(moment_jacob, transf_jacob, weight_mat=weight_mat) if one_step: # One-step estimation estim = self._get_onestep(moment_fit, None, moment_loadings, estim).item() else: # Full optimization estimation param_estim = estim_fct(weight_mat) estim = transf(param_estim) # Start building results dictionary res = {'estim': estim, 'param_estim': param_estim, 'weight_mat': weight_mat, 'moment_fit': moment_fit, 'moment_jacob': moment_jacob, 'moment_loadings': moment_loadings, 'transf_jacob': transf_jacob, 'eff': eff, 'estim_num': estim_num} # Standard errors if self.full_info: # Full information estim_varcov = moment_loadings.T @ self.moment_varcov @ moment_loadings estim_se = np.sqrt(np.diag(estim_varcov)) res['estim_varcov'] = estim_varcov else: # Limited information if eff: # Do nothing, since standard errors have already been computed above pass else: estim_se, worstcase_varcov = self.worstcase_se(moment_loadings) res['worstcase_varcov'] = worstcase_varcov res['estim_se'] = estim_se return res def test(self, estim_res, joint=True, test_weight_mat=None): """Test whether transformed parameters equal zero """ # t-statistics old_settings=np.seterr(divide='ignore') tstat = estim_res['estim']/estim_res['estim_se'] np.seterr(**old_settings) tstat_pval = 2*norm.cdf(-np.abs(tstat)) res = {'tstat': tstat, 'tstat_pval': tstat_pval} if not joint: return res # Weight matrix for joint test statistic if test_weight_mat is None: if self.full_info: # Full information test_weight_mat = np.linalg.inv(estim_res['estim_varcov']) else: # Limited information # Ad hoc choice motivated by independence test_weight_mat = np.linalg.inv(estim_res['moment_loadings'].T @ np.diag(np.diag(self.moment_varcov)) @ estim_res['moment_loadings']) # Check dimensions assert test_weight_mat.shape == (estim_res['estim_num'],estim_res['estim_num']), 'Dimension of "test_weight_mat" is wrong' # Test statistic joint_stat = estim_res['estim'] @ test_weight_mat @ estim_res['estim'] # p-value if self.full_info: # Full information joint_pval = 1-chi2.cdf(joint_stat, estim_res['estim_num']) else: # Limited information max_trace, max_trace_varcov = self.solve_sdp(estim_res['moment_loadings'] @ test_weight_mat @ estim_res['moment_loadings'].T) joint_pval = 1-chi2.cdf(joint_stat/max_trace, 1) if joint_pval>0.215: # Test can only be used at significance levels < 0.215 joint_pval = np.array([1]) res['max_trace'] = max_trace res['max_trace_varcov'] = np.array(max_trace_varcov) res.update({'test_weight_mat': test_weight_mat, 'joint_stat': joint_stat.item(), 'joint_pval': joint_pval.item()}) return res def overid(self, estim_res, joint=True): """Over-identification test """ assert isinstance(estim_res['weight_mat'], np.ndarray), 'Estimation results must be based on a single weight matrix' # Errors in fitting moments moment_error = self.moment_estim - estim_res['moment_fit'] # Standard errors for moment errors M = np.eye(self.moment_num) - self._get_moment_loadings(estim_res['moment_jacob'], estim_res['weight_mat'], estim_res['moment_jacob']).T the_estim_res = self.fit(lambda x: x, eff=False, weight_mat=np.eye(self.moment_num), param_estim=estim_res['param_estim'], estim=moment_error, transf_jacob=M, moment_fit=self.moment_estim, moment_jacob=np.eye(self.moment_num)) '''Only the inputs "weight_mat", "transf_jacob", and "moment_jacob" are actually used to calculate the standard errors - the other inputs are only provided to avoid unnecessary computations ''' # Test statistic and p-value the_test_res = self.test(the_estim_res, joint=joint, test_weight_mat=estim_res['weight_mat']) res = {'moment_error': moment_error, 'moment_error_se': the_estim_res['estim_se'], 'tstat': the_test_res['tstat'], 'tstat_pval': the_test_res['tstat_pval']} if joint: res.update({'joint_stat': the_test_res['joint_stat'], 'joint_pval': the_test_res['joint_pval']}) if self.full_info: # Adjust degrees of freedom assert estim_res['eff'], 'Full-information joint test requires using the efficient weight matrix' res['joint_pval'] = 1-chi2.cdf(the_test_res['joint_stat'], self.moment_num-len(estim_res['param_estim'])) else: res.update({'max_trace': the_test_res['max_trace'], 'max_trace_varcov': the_test_res['max_trace_varcov']}) return res def worstcase_se(self, moment_loadings): """Worst-case standard errors and corresponding var-cov matrix for linear combination of moments """ if moment_loadings.ndim>1: # If more than one parameter of interest, handle them separately ress = [self.worstcase_se(moment_loadings[:,i]) for i in range(moment_loadings.shape[1])] return np.array([r[0] for r in ress]), [r[1] for r in ress] if self.diag_only: # Only diagonal is known moment_se = np.sqrt(np.diag(self.moment_varcov)) se = moment_se @ np.abs(moment_loadings) # Closed form aux = np.sign(moment_loadings) * moment_se varcov = np.outer(aux, aux) elif self.blockdiag_only: # Only block diagonal is known loading_blocks = [moment_loadings[ind] for ind in self.moment_varcov_blocks['ind']] aux = [self.moment_varcov_blocks['varcov'][i] \ @ loading_blocks[i] for i in range(self.moment_varcov_blocks['num'])] var_blocks = [max(loading_blocks[i] @ aux[i],1e-10) # Avoid exact zeros (when loadings are zero) for i in range(self.moment_varcov_blocks['num'])] se = np.sqrt(var_blocks).sum() aux2 = [aux[i]/np.sqrt(var_blocks[i]) for i in range(self.moment_varcov_blocks['num'])] aux3 = [self.moment_varcov_blocks['chol'][i] \ - np.outer(aux[i], loading_blocks[i] @ self.moment_varcov_blocks['chol'][i]) \ / var_blocks[i] for i in range(self.moment_varcov_blocks['num'])] aux4 = np.hstack((np.hstack(aux2).reshape(-1,1), block_diag(*aux3))) varcov = aux4 @ aux4.T else: # General knowledge of var-cov matrix # Solve semidefinite programming problem var, varcov = self.solve_sdp(np.outer(moment_loadings, moment_loadings)) se = np.sqrt(var) varcov = np.array(varcov) # Convert to numpy array return se, varcov def worstcase_eff(self, moment_jacob, transf_jacob, weight_mat=None): """Compute worst-case efficient moment loadings and weight matrix See main paper for explanation """ # Set up median regression as described in paper (p,k) = moment_jacob.shape GpG = moment_jacob.T @ moment_jacob Y = moment_jacob @ np.linalg.solve(GpG, transf_jacob.reshape(-1,1)) moment_jacob_perp = null_space(moment_jacob.T) X = -moment_jacob_perp if self.diag_only: # Only diagonal is known # Run median regression moment_se = np.sqrt(np.diag(self.moment_varcov)) qr_mod = QuantReg(moment_se.reshape(-1,1) * Y, moment_se.reshape(-1,1) * X) qr_fit = qr_mod.fit(q=.5) resid = qr_fit._results.resid # Residuals moment_loadings = resid / moment_se se = np.abs(resid).sum() # Weight matrix puts weight on only k moments sort_inds = np.abs(moment_loadings).argsort() if weight_mat is not None: weight_mat_new = weight_mat.copy() else: weight_mat_new = np.eye(p) weight_mat_new[sort_inds[:p-k],:] = 0 weight_mat_new[:,sort_inds[:p-k]] = 0 else: # General case # Objective function and gradient def objective(z, Y, X): resid = Y.flatten() - X @ z se, varcov = self.worstcase_se(resid) grad = -2 * (X.T @ varcov @ resid) return se**2, grad # Solve nested optimization cvx.solvers.options['show_progress'] = False # Suppress CVX output temporarily opt_res = opt.minimize(lambda z: objective(z, Y, X), np.zeros(p-k), jac=True, method='BFGS') cvx.solvers.options['show_progress'] = True moment_loadings = Y.flatten() - X @ opt_res['x'] se = np.sqrt(opt_res['fun']) # Weight matrix aux1 = np.outer(transf_jacob, opt_res['x']) / (transf_jacob @ np.linalg.solve(GpG, transf_jacob.reshape(-1,1))) W = lambda delta: np.vstack((np.hstack((np.eye(k),aux1)),np.hstack((aux1.T,delta*np.eye(p-k))))) # Determine delta such that W(delta) is positive definite delta_pd = opt.fsolve(lambda delta: np.linalg.eigvalsh(W(delta)).min()-0.01, 0) aux2 = np.hstack((moment_jacob, moment_jacob_perp)) weight_mat_new = aux2 @ W(delta_pd) @ aux2.T return se, moment_loadings, weight_mat_new def solve_sdp(self, A): """Solve semidefinite programming problem max tr(A*V) s.t. V psd and known elements of V using CVXOPT package """ # Find elements of V with known values (below diagonal) inds = np.all([(np.tril(np.ones((self.moment_num,self.moment_num)))==1),1-np.isnan(self.moment_varcov)],axis=0) # Coefficient matrices for CVXOPT: max tr(h*V) s.t. G'*vec(V)+c=0 # Note: For some reason, CVXOPT's "vec" operator multiplies off-diagonal elements by 2 factor = 2-np.eye(self.moment_num) c = cvx.matrix(-(self.moment_varcov*factor)[inds.T].reshape(-1,1),tc='d') G = cvx.sparse(cvx.matrix(np.eye(self.moment_num**2)[:,inds.T.flatten()==1])) h = cvx.matrix(-A,tc='d') # Solve SDP sol = cvx.solvers.sdp(c,Gs=[G],hs=[h]) # Return objective value and optimal V return sol['dual objective'], sol['zs'][0] def estim_update(self, param_estim, transf, transf_deriv, estim=None, transf_jacob=None, moment_fit=None, moment_jacob=None): """Update estimated parameter transformation and moments, including their Jacobians. Avoids recomputing quantities if they're already supplied. """ if estim is None: estim = transf(param_estim) if transf_jacob is None: transf_jacob = transf_deriv(param_estim) if moment_fit is None: moment_fit = self.moment_fct(param_estim) if moment_jacob is None: moment_jacob = self.moment_fct_deriv(param_estim) return estim, transf_jacob, moment_fit, moment_jacob def _get_onestep(self, moment_init, weight_mat, moment_jacob, param_init): """One-step estimation """ if weight_mat is None: subtr = moment_jacob.T @ (moment_init-self.moment_estim) else: subtr = np.linalg.solve(moment_jacob.T @ weight_mat @ moment_jacob, moment_jacob.T @ weight_mat @ (moment_init-self.moment_estim)) return param_init - subtr.flatten() @staticmethod def _get_moment_loadings(moment_jacob, weight_mat, transf_jacob): """Asymptotic loadings of minimum distance estimator on empirical moments """ return weight_mat @ moment_jacob @ np.linalg.solve(moment_jacob.T @ weight_mat @ moment_jacob, transf_jacob.T) @staticmethod def _deriv(deri, fct): """Create Jacobian function, either numerically or from user-supplied function """ if deri is None: return lambda x: nd.Jacobian(fct)(x) # Numerical differentiation elif isinstance(deri, np.ndarray): return lambda x: deri # Turn constant matrix into function else: return deri # Just use the supplied derivative function
/* * Copyright 1995, 2003 Perforce Software. All rights reserved. * * This file is part of Perforce - the FAST SCM System. */ /* * fileiouni.cc -- FileIOUnicode methods */ # include <stdhdrs.h> # include <error.h> # include <errornum.h> # include <msgsupp.h> # include <strbuf.h> # include <i18napi.h> # include <charcvt.h> # include <debug.h> # include <tunable.h> # include <msgserver.h> # include "filesys.h" # include "fileio.h" void FileIOUnicode::FillBuffer( Error *e ) { // Fill buffer from file. if (trans) { int readlen; int cnt; readlen = tbuf.Length() - tsz; cnt = FileIOCompress::Read( tbuf.Text()+tsz, readlen, e ); if ( e->Test() ) return; tsz += cnt; if (tsz) { const char *ss; char *ts; ss = tbuf.Text(); ts = iobuf.Text(); trans->ResetErr(); trans->Cvt(&ss, tbuf.Text()+tsz, &ts, iobuf.Text()+iobuf.Length()); if (trans->LastErr() == CharSetCvt::NOMAPPING) { // set an error e->Set( MsgSupp::NoTrans ) << trans->LineCnt() << Name(); return; } else if (trans->LastErr() == CharSetCvt::PARTIALCHAR) { if( cnt < readlen ) { /* * End of file and buffer still had room: * Read() has read a smaller number of bytes than * the size of the buffer that was supplied meaning * it reached the end of file. But cvt() reports * that the last character is partial. Since we * have reached the end of file, there are no further * reading to do to complete this partial character * therefore report an error equivalent to the NOMapping * case. */ e->Set( MsgSupp::NoTrans ) << trans->LineCnt() << Name(); return; } } else if (ts == iobuf.Text()) { // error e->Set( MsgSupp::PartialChar ); return; } rcv = ts-iobuf.Text(); tsz += tbuf.Text()-ss; if (tsz) memmove(tbuf.Text(), ss, tsz); } } else { FileIOBuffer::FillBuffer( e ); } } void FileIOUnicode::FlushBuffer( Error *e ) { if (trans) { const char *ss; char *ts; trans->ResetErr(); ss = iobuf.Text(); ts = tbuf.Text(); trans->Cvt(&ss, iobuf.Text()+snd, &ts, tbuf.Text()+iobuf.Length()); if (trans->LastErr() == CharSetCvt::NOMAPPING) { // set an error e->Set( MsgSupp::NoTrans ) << trans->LineCnt() << Name(); // prevent close from attempting second flush snd = 0; } else if (ts == tbuf.Text()) { // error e->Set( MsgSupp::PartialChar ); // prevent close from attempting second flush snd = 0; } else { FileIOCompress::Write( tbuf.Text(), ts-tbuf.Text(), e ); snd += iobuf.Text()-ss; if (snd) memmove(iobuf.Text(), ss, snd); } } else { FileIOBuffer::FlushBuffer( e ); } } void FileIOUnicode::Close( Error *e ) { FileIOBuffer::Close( e ); tsz = 0; trans = NULL; } void FileIOUnicode::Translator( CharSetCvt *c ) { trans = c; if( c ) { c->ResetCnt(); c->IgnoreBOM(); } } FileIOUTF16::FileIOUTF16( LineType lineType ) : FileIOUnicode( lineType ) { SetContentCharSetPriv( (int)CharSetApi::UTF_16_BOM ); } void FileIOUTF16::Set( const StrPtr &name ) { Set( name, 0 ); } void FileIOUTF16::Set( const StrPtr &name, Error *e ) { FileIOUnicode::Set( name, e ); SetContentCharSetPriv( (int)CharSetApi::UTF_16_BOM ); } void FileIOUTF16::Open( FileOpenMode mode, Error *e ) { CharSetCvt *cvt; if( mode == FOM_READ ) cvt = new CharSetCvtUTF168; else cvt = new CharSetCvtUTF816( -1, 1 ); FileIOUnicode::Open( mode, e ); FileIOUnicode::Translator( cvt ); } void FileIOUTF16::Close( Error *e ) { CharSetCvt *temp = trans; FileIOUnicode::Close( e ); delete temp; } void FileIOUTF16::Translator( CharSetCvt * ) { } FileIOUTF8::FileIOUTF8( LineType lineType ) : FileIOUTF16( lineType ) { SetCharSetPriv(); } void FileIOUTF8::Set( const StrPtr &name, Error *e ) { FileIOUnicode::Set( name, e ); SetCharSetPriv(); } void FileIOUTF8::SetCharSetPriv() { int c = p4tunable.Get( P4TUNE_FILESYS_UTF8BOM ); int f = (int)CharSetApi::UTF_8_BOM; # ifdef OS_NT if( c == 0 ) f = (int)CharSetApi::UTF_8; # else if( c != 1 ) // meaning either 0 or 2 f = (int)CharSetApi::UTF_8; # endif SetContentCharSetPriv( f ); } void FileIOUTF8::Open( FileOpenMode mode, Error *e ) { CharSetCvt *cvt; if( mode == FOM_READ ) cvt = new CharSetCvtUTF8UTF8( -1, UTF8_VALID_CHECK ); else { int f = 0; if( GetContentCharSetPriv() == (int)CharSetApi::UTF_8_BOM ) f = UTF8_WRITE_BOM; cvt = new CharSetCvtUTF8UTF8( 1, f ); } FileIOUnicode::Open( mode, e ); FileIOUnicode::Translator( cvt ); }
//================================================================================================== /*! @file Main header for the unit test system @copyright 2015 <NAME> Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) **/ //================================================================================================== #ifndef STF_HPP_INCLUDED #define STF_HPP_INCLUDED #include <unordered_map> #include <sstream> #include <cstdlib> #include <string> #include <boost/config.hpp> namespace stf { namespace detail { struct args_map { args_map() { std::pair<std::string,std::string> envvars[] = { {"STF_COMPACT" , "compact"} }; for(auto const& id : envvars) { auto p = get_env(id.first); if(!p.empty()) data_[id.second] = p; } } void update(int argc, const char** argv) const { bool found = false; std::string id; for(int i=1;i<argc;++i) { std::string cur{argv[i]}; if(is_option(cur)) { found = true; id = std::string{cur.begin()+2,cur.end()}; } else { if(found && !is_option(cur)) { data_[id] = cur; found = false; } } } } template<typename R> R operator()(std::string const& id, R def = R{} ) const { auto opt = data_.find(id); if(opt != data_.cend()) { std::istringstream s(opt->second); s >> def; } return def; } static bool is_option(std::string const& s) { return (s.size() > 2) && (s[0] == '-') && (s[1] == '-'); } static std::string get_env(std::string const& name) { #if defined(BOOST_MSVC) char* buf = 0; std::size_t sz = 0; _dupenv_s(&buf, &sz, name.c_str()); std::string that = buf ? buf : " "; free(buf); #else auto p = std::getenv(name.c_str()); std::string that = p ? p : ""; #endif return that; } private: mutable std::unordered_map<std::string,std::string> data_; }; } const detail::args_map args; } #include <iostream> #include <cstddef> #include <string> namespace stf { namespace unit { struct env { public: env(int argc, const char** argv, std::ostream& s = std::cout) : test_count{0}, success_count{0}, invalid_count{0}, os(s) { args.update(argc,argv); } void compact(bool m) { compact_mode = m; } bool is_compact() const { return compact_mode; } void as_success() { test_count++; success_count++; } void as_invalid() { test_count++; invalid_count++; } void as_failure() { test_count++; } bool passed() const { return tests() != successes(); } std::ptrdiff_t tests() const { return test_count; } std::ptrdiff_t successes() const { return success_count; } std::ptrdiff_t invalids() const { return invalid_count; } std::ptrdiff_t failures() const { return tests() - successes() - invalids(); } std::ostream& stream() const { return os; } std::ostream& pass() const { return os << "[PASS]" << " - "; } std::ostream& fail() const { return os << "[FAIL]" << " - "; } std::ostream& invalid() const { if(compact_mode) return os << "I"; else return os << "[IVLD]" << " - "; } env(env const&) = delete; env& operator=(env const&) = delete; private: std::ptrdiff_t test_count; std::ptrdiff_t success_count; std::ptrdiff_t invalid_count; bool compact_mode; std::ostream& os; }; } inline bool report(unit::env const& e, std::ptrdiff_t fails, std::ptrdiff_t invalids) { auto test_txt = e.tests() > 1 ? "tests" : "test"; auto pass_txt = e.successes() > 1 ? "successes" : "success"; auto fail_txt = e.failures() > 1 ? "failures" : "failure"; auto inv_txt = e.invalids() > 1 ? "invalids" : "invalid"; e.stream() << std::string(80,'-') << "\n" << "Results: " << e.tests() << " " << test_txt << " - " << e.successes() << " " << pass_txt << " - " << e.failures() << "/" << fails << " " << fail_txt << " - " << e.invalids() << "/" << invalids << " " << inv_txt << std::endl; if(!fails && !invalids) return e.passed(); else return e.failures() != fails || e.invalids() != invalids; } template<typename Test> inline void scenario_header( unit::env& env, Test const& t) { if(!env.is_compact()) { auto hbar = std::string(80,'-'); env.stream() << hbar << std::endl << "Scenario: " << t.name << std::endl << hbar << std::endl; } else { env.stream() << "Scenario: " << t.name << " : "; } } inline void process_invalid( unit::env& env, std::ptrdiff_t count) { if(count == env.tests()) { env.as_invalid(); if(!env.is_compact()) env.invalid() << "Empty test case" << std::endl; else env.stream() << "!"; } } } #define STF_STRING__(...) #__VA_ARGS__ #define STF_STRING_(text) STF_STRING__ text #define STF_STRING(...) STF_STRING_((__VA_ARGS__)) #define STF_UNIQUE3( ID, LINE ) ID ## LINE #define STF_UNIQUE2( ID, LINE ) STF_UNIQUE3( ID, LINE ) #if defined(DOXYGEN_ONLY) #define STF_UNIQUE( Identifier ) #define STF_FUNCTION #define STF_REGISTRATION #else #define STF_UNIQUE( Identifier ) STF_UNIQUE2( Identifier, __LINE__ ) #define STF_FUNCTION STF_UNIQUE(stf_function) #define STF_REGISTRATION STF_UNIQUE(stf_registration) #endif #include <vector> #include <functional> namespace stf { namespace unit { struct test { using behavior_t = std::function<void( env& )>; std::string name; behavior_t behaviour; test( std::string const& n, behavior_t const& b ) : name( n ), behaviour( b ) {} void operator()(env& e) { behaviour(e); } }; using test_suite = std::vector<test>; static inline test_suite& suite() { static test_suite tests; return tests; } } } namespace stf { namespace detail { struct registrar { registrar( ::stf::unit::test const& test_case ) { unit::suite().push_back( test_case ); } }; } } #include <boost/preprocessor/seq/elem.hpp> #include <boost/preprocessor/seq/size.hpp> #include <boost/preprocessor/repetition/repeat.hpp> #define STF_CASE(DESCRIPTION) \ void STF_FUNCTION( ::stf::unit::env& ); \ namespace \ { \ ::stf::detail::registrar \ STF_REGISTRATION{::stf::unit::test(DESCRIPTION, STF_FUNCTION)}; \ } \ void STF_FUNCTION( ::stf::unit::env& $ ) \ #define STF_RTYPE(z, n, t) \ { \ using T = BOOST_PP_SEQ_ELEM(n,t); \ $.stream() << std::endl; \ $.stream() << "With T = [" << STF_STRING(BOOST_PP_SEQ_ELEM(n,t)) \ << "] "; \ if(!$.is_compact()) $.stream() << std::endl; \ STF_FUNCTION<T>($); \ } \ #define STF_CASE_TPL(DESCRIPTION, TYPES) \ template<typename T> void STF_FUNCTION( stf::unit::env& ); \ namespace \ { \ stf::detail::registrar \ STF_REGISTRATION{ stf::unit::test \ ( DESCRIPTION \ , [](::stf::unit::env& $) -> void \ { \ BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(TYPES),STF_RTYPE,TYPES) \ } \ ) \ }; \ } \ template<typename T> void STF_FUNCTION( stf::unit::env& $ ) \ #include <cstddef> #include <cstdint> #define STF_SIGNED_INTEGRAL_TYPES (std::int8_t)(std::int16_t)(std::int32_t)(std::int64_t) #define STF_UNSIGNED_INTEGRAL_TYPES (std::uint8_t)(std::uint16_t)(std::uint32_t)(std::uint64_t) #define STF_INTEGRAL_TYPES (char)STF_SIGNED_INTEGRAL_TYPES STF_UNSIGNED_INTEGRAL_TYPES #define STF_IEEE_TYPES (float)(double) #define STF_SIGNED_NUMERIC_TYPES STF_SIGNED_INTEGRAL_TYPES STF_IEEE_TYPES #define STF_UNSIGNED_NUMERIC_TYPES STF_UNSIGNED_INTEGRAL_TYPES #define STF_NUMERIC_TYPES STF_SIGNED_NUMERIC_TYPES STF_UNSIGNED_NUMERIC_TYPES #define STF_ALL_TYPES (bool) STF_NUMERIC_TYPES namespace stf { inline bool is_false() { return false; } inline bool is_true() { return true; } } #include <algorithm> #include <random> namespace stf { template<typename Environment, typename Suite, typename... Setup> inline bool run(Environment& environment, Suite& tests, Setup const&... setup) { auto is_compact = args("compact",false); environment.compact(is_compact); if(auto seed = args("random",0u)) { std::shuffle( tests.begin(), tests.end(), std::mt19937{seed} ); } for(auto& t : tests ) { scenario_header(environment,t); auto count = environment.tests(); t(environment); process_invalid(environment, count); environment.stream() << std::endl; } return ::stf::report(environment,setup...); } } #if !defined(STF_CUSTOM_DRIVER_FUNCTION) #define STF_CUSTOM_DRIVER_FUNCTION main #endif #if defined(DOXYGEN_ONLY) #define STF_CUSTOM_DRIVER_FUNCTION #endif #if !defined(STF_USE_CUSTOM_DRIVER) int STF_CUSTOM_DRIVER_FUNCTION(int argc, const char** argv) { ::stf::unit::env $env(argc,argv,std::cout); return ::stf::run( $env, ::stf::unit::suite(), 0, 0 ); } #endif #include <iostream> #include <string> namespace stf { struct location { std::string file; std::size_t line; }; std::ostream& operator<<(std::ostream& os, location const& l) { return os << l.file << ":" << l.line; } inline location at(std::string const& f, std::size_t l) { return {f,l}; } } #include <cstddef> #include <type_traits> namespace stf { namespace detail { template<typename T> struct is_container { template<typename U> static auto test( int ) -> decltype ( std::declval<U>().begin() , std::declval<U>().end() , std::declval<U>().size() , std::true_type() ); template<typename> static auto test( ... ) -> std::false_type; typedef std::is_same<decltype(test<T>(0)),std::true_type> type; }; template <typename T, typename R> using if_container = typename std::enable_if<is_container<T>::type::value,R>::type; template<typename T, typename U, typename R> using are_not_containers = typename std::enable_if< !detail::is_container<T>::type::value && !detail::is_container<U>::type::value , R >::type; template <typename T, typename R> using if_not_container = typename std::enable_if<!is_container<T>::type::value,R>::type; #if defined(DOXYGEN_ONLY) template<typename C> inline std::size_t size(C const& v); template<typename C> inline auto begin(C const& v); template<typename C> inline auto end(C const& v); #else template<typename C> inline detail::if_container<C,std::size_t> size(C const& c) { return c.size(); } template<typename C> inline detail::if_not_container<C,std::size_t> size(C const&) { return 1ull; } template<typename C> inline detail::if_container<C,typename C::const_iterator> begin(C const& c) { return c.begin(); } template<typename C> inline detail::if_not_container<C,C const*> begin(C const& t) { return &t; } template<typename C> inline detail::if_container<C,typename C::const_iterator> end(C const& c) { return c.end(); } template<typename C> inline detail::if_not_container<C,C const*> end(C const& t) { return (&t)+1; } #endif } } #include <cstddef> #include <iostream> #include <type_traits> namespace stf { namespace detail { template<typename T> struct is_streamable { template<typename U> static auto test( int ) -> decltype ( std::cout << std::declval<U>() , std::true_type() ); template<typename> static auto test( ... ) -> std::false_type; typedef std::is_same<decltype(test<T>(0)),std::true_type> type; }; template <typename T, typename R> using if_streamable = typename std::enable_if<is_streamable<T>::type::value,R>::type; template <typename T, typename R> using if_not_streamable = typename std::enable_if<!is_streamable<T>::type::value,R>::type; } } #include <boost/core/demangle.hpp> #include <sstream> #include <cstddef> #include <string> #include <iomanip> namespace stf { inline std::string to_string( std::nullptr_t ) { return "nullptr"; } inline std::string to_string( bool v ) { return v ? "true" : "false"; } inline std::string to_string( std::string const& v ) { return v; } inline std::string to_string( char const* v ) { return std::string(v); } inline std::string to_string( char v ) { return std::string(1, v); } template <typename T> inline detail::if_streamable<T,std::string> to_string( T const& value) { std::ostringstream os; os << std::setprecision(20) << value; return os.str(); } template <typename T> inline detail::if_container<T,std::string> make_string( T const& value) { auto b = value.begin(), e = value.end(); std::ostringstream os; os << "{ "; if(b!=e) os << to_string(*b++); while(b != e) os << ", " << to_string(*b++); os << " }"; return os.str(); } template <typename T> inline detail::if_not_container<T,std::string> make_string( T const& value) { std::ostringstream os; os << "[ "; os << boost::core::demangle(typeid(T).name()); os << " ] @" << (void*)(&value); return os.str(); } template <typename T> inline detail::if_not_streamable<T,std::string> to_string( T const& value) { return make_string(value); } template<typename LHS, typename RHS> inline std::string split_line(LHS const&, RHS const&, std::string const& op) { auto lb = detail::is_container<LHS>::type::value; auto rb = detail::is_container<RHS>::type::value; return (lb?"\n":"") + op + (rb?"\n":""); } } #include <string> namespace stf { namespace detail { struct result { bool status; std::string lhs; std::string op; std::string rhs; explicit operator bool() { return status; } }; } } #define STF_DUMP(R) \ $.stream() << "failing because:\n" << R.lhs << R.op << R.rhs << "\n" << "is incorrect.\n"; \ namespace stf { namespace ext { template<typename LHS, typename RHS, typename EnableIf = void> struct equal { inline bool operator()(LHS const& l, RHS const& r) const { return l == r; } }; template<typename LHS, typename RHS, typename EnableIf = void> struct less { inline bool operator()(LHS const& l, RHS const& r) const { return l < r; } }; } namespace detail { template<typename LHS, typename RHS> inline bool eq(LHS const& l, RHS const& r) { return ::stf::ext::equal<LHS,RHS>()(l, r); } template<typename LHS, typename RHS> inline bool neq(LHS const& l, RHS const& r) { return !eq(l, r); } template<typename LHS, typename RHS> inline bool lt(LHS const& l, RHS const& r) { return ::stf::ext::less<LHS,RHS>()(l, r); } template<typename LHS, typename RHS> inline bool ge(LHS const& l, RHS const& r) { return !lt(l, r); } template<typename LHS, typename RHS> inline bool gt(LHS const& l, RHS const& r) { return !lt(l, r) || !neq(l, r); } template<typename LHS, typename RHS> inline bool le(LHS const& l, RHS const& r) { return lt(l, r) || eq(l, r); } } } namespace stf { namespace detail { template<typename Expression> struct lhs_expr { Expression lhs; lhs_expr( Expression x ) : lhs( x ) {} lhs_expr(lhs_expr const&) = delete; lhs_expr& operator=(lhs_expr const&) = delete; operator result() { return result { bool(lhs) , stf::to_string( bool(lhs) ) , stf::to_string("") , stf::to_string("") }; } #define STF_BINARY_DECOMPOSE(OP,SB,FN) \ template<typename R> result operator OP( R const & rhs ) \ { \ return { stf::detail::FN(lhs, rhs) \ , stf::to_string( lhs ), stf::split_line(lhs,rhs,SB), stf::to_string(rhs) \ }; \ } \ STF_BINARY_DECOMPOSE( ==, "==", eq ) STF_BINARY_DECOMPOSE( !=, "!=", neq ) STF_BINARY_DECOMPOSE( < , "<" , lt ) STF_BINARY_DECOMPOSE( > , ">" , gt ) STF_BINARY_DECOMPOSE( >=, ">=", ge ) STF_BINARY_DECOMPOSE( <=, "<=", le ) #undef STF_BINARY_DECOMPOSE }; struct decomposer { template <typename Expression> lhs_expr<Expression const &> operator->* ( Expression const& expr ) { return { expr }; } }; } } #define STF_DECOMPOSE( XPR ) ( stf::detail::decomposer()->* XPR ) #define STF_DISPLAY( INDICATOR, MESSAGE ) \ do \ { \ if(!$.is_compact()) $.stream() << INDICATOR << MESSAGE << std::endl; \ } while( ::stf::is_false() ) \ #define STF_INFO( MESSAGE ) STF_DISPLAY("[INFO] ", MESSAGE) #define STF_WARNING( MESSAGE ) STF_DISPLAY("[WARNING] ", MESSAGE) #define STF_ERROR( MESSAGE ) STF_DISPLAY("[ERROR] ", MESSAGE) #define STF_PASS( MESSAGE ) \ do \ { \ $.as_success(); \ if(!$.is_compact()) \ { \ $.pass() << MESSAGE << " in: " << ::stf::at(__FILE__,__LINE__) << std::endl; \ } \ else \ { \ $.stream() << "+"; \ } \ } while( ::stf::is_false() ) \ #define STF_FAIL( MESSAGE ) \ do \ { \ $.as_failure(); \ if(!$.is_compact()) \ { \ $.fail() << MESSAGE << " in: " << ::stf::at(__FILE__,__LINE__) << std::endl; \ } \ else \ { \ $.stream() << "-"; \ } \ } while( ::stf::is_false() ) \ #define STF_EXPECT( EXPR ) \ do \ { \ if( ::stf::detail::result stf_local_r = STF_DECOMPOSE(EXPR) ) \ STF_PASS( "Expecting: " << STF_STRING(EXPR)); \ else \ { \ STF_FAIL( "Expecting: " << STF_STRING(EXPR)); \ if(!$.is_compact()) STF_DUMP( stf_local_r ); \ } \ } while( ::stf::is_false() ) \ #define STF_EXPECT_NOT( EXPR ) \ do \ { \ if( ::stf::detail::result stf_local_r = STF_DECOMPOSE(EXPR) ) \ { \ STF_FAIL( "Not expecting: " << STF_STRING(EXPR)); \ if(!$.is_compact()) STF_DUMP( stf_local_r ); \ } \ else \ STF_PASS( "Not expecting: " << STF_STRING(EXPR)); \ } while( ::stf::is_false() ) \ #if defined(__GNUC__) || defined(DOXYGEN_ONLY) #define STF_UNUSED(X) (void) X #else #define STF_UNUSED(X) X #endif #include <boost/preprocessor/punctuation/remove_parens.hpp> #define STF_THROW( X, T ) \ do \ { \ bool caught = false; \ try { STF_UNUSED(BOOST_PP_REMOVE_PARENS(X)); } \ catch( BOOST_PP_REMOVE_PARENS(T)& ) { caught = true; } \ \ if(caught) \ STF_PASS( STF_STRING(X) << " throws " << STF_STRING(T) ); \ else \ STF_FAIL( STF_STRING(X) << " does not throw " << STF_STRING(T) ); \ } while( ::stf::is_false() ) \ #define STF_NO_THROW( X ) \ do \ { \ bool caught = false; \ try { STF_UNUSED(BOOST_PP_REMOVE_PARENS(X)); } \ catch( ... ) { caught = true; } \ \ if(caught) \ STF_FAIL( STF_STRING(X) << " throws while not expected to" ); \ else \ STF_PASS( STF_STRING(X) << " doesn't throw" ); \ } while( ::stf::is_false() ) \ #include <vector> #include <string> namespace stf { template<typename Measure, typename Reference> struct approx_ { approx_(Reference const& r, double u) : ref(r), diff(u), size_mismatch(false), max_diff(u) {} template<typename U> inline bool compare(U const& data) const { Measure m; size_mismatch = detail::size(ref) != detail::size(data); if(size_mismatch) return false; auto br = detail::begin(data); auto er = detail::end(data); auto bi = detail::begin(ref); std::vector<double> dist; while(br != er) dist.push_back( m(*br++,*bi++) ); bi = detail::begin(ref); br = detail::begin(data); auto bd = detail::begin(dist); std::ptrdiff_t sz = detail::size(data); for(std::ptrdiff_t idx=0;idx < sz; ++idx) check( *bd++, *br++, *bi++, (sz>1 ? idx : -1) ); return errors.size() == 0; } struct error { double value; std::string ref,data; std::ptrdiff_t idx; }; bool mismatched() const { return size_mismatch; } double max() const { return max_diff; } std::vector<error> const& report() const { return errors; } private: template<typename U, typename X, typename Y> inline void check(U const& u, X const& x, Y const& y, std::ptrdiff_t idx) const { using stf::to_string; if( u > diff ) { errors.push_back( {u, to_string(x),to_string(y), idx} ); max_diff = std::max<double>(max_diff,u); } } Reference ref; double diff; mutable bool size_mismatch; mutable double max_diff; mutable std::vector<error> errors; }; template<typename Measure, typename Reference> std::ostream& operator<<( std::ostream& os, approx_<Measure,Reference> const& u ) { using stf::to_string; if(u.mismatched()) return os << "arguments with mismatched size."; std::ostringstream s,ls; ls.precision(20); for(auto const& e : u.report()) { (e.idx >= 0) ? ls << " [" << e.idx << "]: " : ls << " "; ls << to_string(e.ref) << " vs " << to_string(e.data); Measure::to_stream(ls,e.value); } s.precision(20); Measure::to_stream(s,u.max()); return os << "{\n" + ls.str() + "}\n with a maximal error of " + s.str(); } namespace ext { template<typename T, typename Measure, typename Reference> struct equal<T,stf::approx_<Measure, Reference>> { inline bool operator()(T const& l, stf::approx_<Measure, Reference> const& r) const { return r.compare(l); } }; } } #include <type_traits> namespace stf { namespace detail { template<typename T, typename R> using if_integral = typename std::enable_if<std::is_integral<T>::value, R>::type; template<typename T, typename R> using if_real = typename std::enable_if<std::is_floating_point<T>::value, R>::type; template<typename T, typename U> using common_t = typename std::common_type<T,U>::type; } } #include <type_traits> #include <algorithm> #include <iterator> #include <cmath> namespace stf { namespace ext { template< typename T1, typename T2 = T1 , typename EnableIF = void > struct ulpdist { inline double operator()(T1 a, T2 b) const { using common_t = detail::common_t<T1,T2>; return ext::ulpdist<common_t>() ( static_cast<common_t>(a) , static_cast<common_t>(b) ); } }; template< typename T> struct ulpdist<T,T,typename std::enable_if<std::is_same<T,bool>::value>::type> { inline double operator()(T a, T b) const { return a == b ? 0. : 1.; } }; template<typename T> struct ulpdist< T, T , typename std::enable_if<std::is_floating_point<T>::value>::type > { inline double operator()(T a, T b) const { if( (a==b) || ((a!=a) && (b!=b)) ) return 0.; if( (a!=a) || (b!=b) ) return std::numeric_limits<T>::infinity(); int e1 = 0,e2 = 0; T m1,m2; m1 = std::frexp(a, &e1); m2 = std::frexp(b, &e2); int expo = -std::max(e1, e2); T e = (e1 == e2) ? std::abs(m1-m2) : std::abs(std::ldexp(a, expo)- std::ldexp(b, expo)); return double(e/std::numeric_limits<T>::epsilon()); } }; template<typename T> struct ulpdist< T, T , typename std::enable_if < std::is_integral<T>::value && !std::is_same<T,bool>::value >::type > { inline double operator()(T a, T b) const { using u_t = typename std::make_unsigned<T>::type; return static_cast<double>( (a<b) ? u_t(b-a) : u_t(a-b) ); } }; } template<typename T, typename U> inline double ulpdist(T const& a0, U const& a1) { return ext::ulpdist<T,U>()(a0,a1); } #if 0 #endif } #include <string> namespace stf { namespace detail { struct ulp_measure { template<typename T, typename U> double operator()(T const& data, U const& ref) const { return stf::ulpdist(data,ref); } template<typename Stream> static void to_stream(Stream& s, double v) { s << " (" << v << " ULPs)\n"; } }; } template<typename R> using ulp_ = approx_<detail::ulp_measure, R>; template<typename R> inline ulp_<R> ulp(R const& t, double n) { return {t,n}; } } #include <type_traits> #include <algorithm> #include <iterator> #include <cmath> namespace stf { namespace ext { template< typename T1, typename T2 = T1 , typename EnableIF = void > struct reldist { inline double operator()(T1 a, T2 b) const { using common_t = detail::common_t<T1,T2>; return ext::reldist<common_t>() ( static_cast<common_t>(a) , static_cast<common_t>(b) ); } }; template< typename T> struct reldist<T,T,typename std::enable_if<std::is_same<T,bool>::value>::type> { inline double operator()(T a, T b) const { return a == b ? 0. : 1.; } }; template<typename T> struct reldist< T, T , typename std::enable_if<std::is_floating_point<T>::value>::type > { inline double operator()(T a, T b) const { auto inf_ = std::numeric_limits<T>::infinity(); auto aa = std::abs(a); auto ab = std::abs(b); if( (a == b ) || ((a != a) && (b!=b)) ) return 0.; if( (a != a ) || (b != b) ) return inf_; if( (aa==inf_) || (ab == inf_) ) return inf_; return std::abs(a-b) / std::max(T(1), std::max(aa,ab)); } }; template<typename T> struct reldist< T, T , typename std::enable_if < std::is_integral<T>::value && !std::is_same<T,bool>::value >::type > { inline double operator()(T a, T b) const { auto d0 = static_cast<double>(a), d1 = static_cast<double>(b); return reldist<double>()(d0,d1); } }; } template<typename T, typename U> inline double reldist(T const& a0, U const& a1) { return ext::reldist<T,U>()(a0,a1); } } #include <string> namespace stf { namespace detail { struct relative_measure { template<typename T, typename U> double operator()(T const& data, U const& ref) const { return ::stf::reldist(data,ref); } template<typename Stream> static void to_stream(Stream& s, double v) { s.precision(2); s << " (" << std::fixed << v*100. << " %)\n"; } }; } template<typename R> using relative_ = approx_<detail::relative_measure, R>; template<typename R> inline relative_<R> relative(R const& t, double n) { return {t,n/100.}; } } #define STF_ULP_EQUAL(A,B,X) \ do \ { \ auto stf_local_r = ::stf::ulpdist((A),(B)); \ auto stf_local_d = STF_DECOMPOSE((A) == (B)); \ if( stf_local_r <= (X) ) \ STF_PASS( "Expecting: " << STF_STRING(A) " == " STF_STRING(B) << " within " << X << " ULPs." ); \ else \ STF_FAIL( "Expecting: " << stf_local_d.lhs << " == " << stf_local_d.rhs \ << " within " << X << " ULPs " << "but found: " << stf_local_r \ << " ULPs instead." \ ); \ } while( ::stf::is_false() ) \ #define STF_IEEE_EQUAL(A,B) STF_ULP_EQUAL(A,B,0.) #define STF_ALL_ULP_EQUAL(A,B,X) \ do \ { \ auto stf_local_r = STF_DECOMPOSE((A) == ::stf::ulp(B,X)); \ if( stf_local_r ) \ STF_PASS( "Expecting: " << STF_STRING(A) " == " STF_STRING(B) << " within " << X << " ULPs." ); \ else \ STF_FAIL( "Expecting: " << STF_STRING(A) " == " STF_STRING(B) \ << " within " << X << " ULPs " << "but found: " << stf_local_r.rhs \ << " ULPs instead." \ ); \ } while( ::stf::is_false() ) \ #define STF_ALL_IEEE_EQUAL(A,B) STF_ALL_ULP_EQUAL(A,B,0.) #define STF_RELATIVE_EQUAL(A,B,X) \ do \ { \ auto stf_local_r = ::stf::reldist((A),(B)); \ auto stf_local_d = STF_DECOMPOSE((A) == (B)); \ if( stf_local_r <= (X/100.)) \ STF_PASS( "Expecting: " << STF_STRING(A) " == " STF_STRING(B) << " ~ " << X << " %.");\ else \ STF_FAIL( "Expecting: " << stf_local_d.lhs << " == " << stf_local_d.rhs \ << " within " << X << " % " \ << "but found: " << 100*stf_local_r \ << " % instead." \ ); \ } while( ::stf::is_false() ) \ #define STF_ALL_RELATIVE_EQUAL(A,B,X) \ do \ { \ auto stf_local_r = STF_DECOMPOSE((A) == ::stf::relative(B,X)); \ if( stf_local_r ) \ STF_PASS( "Expecting: " << STF_STRING(A) " == " STF_STRING(B) << " within " << X << " %."); \ else \ STF_FAIL( "Expecting: " << STF_STRING(A) " == " STF_STRING(B) \ << " within " << X << " % " \ << "but found: " << stf_local_r.rhs \ << " % instead." \ ); \ } while( ::stf::is_false() ) \ #define STF_ALL_EQUAL(A,B) STF_ALL_RELATIVE_EQUAL(A,B,0) #define STF_EQUAL( A, B ) STF_EXPECT( (A) == (B) ) #define STF_NOT_EQUAL( A, B ) STF_EXPECT( (A) != (B) ) #define STF_LESS(A,B) STF_EXPECT( (A) < (B) ) #define STF_GREATER(A,B) STF_EXPECT( (A) > (B) ) #define STF_LESS_EQUAL(A,B) STF_EXPECT( (A) <= (B) ) #define STF_GREATER_EQUAL(A,B) STF_EXPECT( (A) >= (B) ) #include <type_traits> #include <boost/core/demangle.hpp> #include <type_traits> #include <typeinfo> #include <string> namespace stf { template<typename T> inline std::string type_id() { typedef std::is_const<typename std::remove_reference<T>::type> const_t; typedef std::is_lvalue_reference<T> lref_t; typedef std::is_rvalue_reference<T> rref_t; std::string s = boost::core::demangle(typeid(T).name()); s += const_t::value ? " const" : ""; s += lref_t::value ? "&" : ""; s += rref_t::value ? "&&" : ""; return s; } template<typename T> inline std::string type_id( const T& ) { return type_id<T>(); } } #include <boost/preprocessor/punctuation/remove_parens.hpp> #include <boost/core/ignore_unused.hpp> #include <boost/mpl/apply.hpp> #define STF_TYPE_IS(T, Type) \ do \ { \ volatile bool stf_local_b = std::is_same< BOOST_PP_REMOVE_PARENS(Type) \ , BOOST_PP_REMOVE_PARENS(T) \ >::value; \ if( stf_local_b ) \ STF_PASS ( "Expecting " << STF_STRING(BOOST_PP_REMOVE_PARENS(T)) \ << " == " << stf::type_id<BOOST_PP_REMOVE_PARENS(Type)>() \ ); \ else \ STF_FAIL ( "Expecting " << STF_STRING(BOOST_PP_REMOVE_PARENS(T)) \ << " == " << stf::type_id<BOOST_PP_REMOVE_PARENS(Type)>() \ << " found " << stf::type_id<BOOST_PP_REMOVE_PARENS(T)>() \ << " instead" \ ); \ } while( ::stf::is_false() ) \ #define STF_EXPR_IS(Expression, Type) \ STF_TYPE_IS(decltype( BOOST_PP_REMOVE_PARENS(Expression)), Type) \ #define STF_EXPR_TYPE(Expression, Lambda, Type) \ do \ { \ using other = boost::mpl::apply < BOOST_PP_REMOVE_PARENS(Lambda) \ , decltype(BOOST_PP_REMOVE_PARENS(Expression)) \ >::type; \ \ volatile bool stf_local_b = std::is_same<BOOST_PP_REMOVE_PARENS(Type), other>::value; \ if( stf_local_b ) \ STF_PASS ( "Expecting " \ << STF_STRING(BOOST_PP_REMOVE_PARENS(Lambda)) \ << " applied on " \ << stf::type_id(BOOST_PP_REMOVE_PARENS(Expression)) \ << " to be " \ << stf::type_id<BOOST_PP_REMOVE_PARENS(Type)>() \ ); \ else \ STF_FAIL( "Expecting " \ << STF_STRING(BOOST_PP_REMOVE_PARENS(Lambda)) \ << " applied on " \ << stf::type_id(BOOST_PP_REMOVE_PARENS(Expression)) \ << " to be " \ << stf::type_id<BOOST_PP_REMOVE_PARENS(Type)>() \ << " but found " << stf::type_id<other>() << " instead" \ ); \ } while( ::stf::is_false() ) \ namespace stf { namespace unit { #if defined(DOXYGEN_ONLY) #define STF_USE_CUSTOM_DRIVER #endif } namespace detail { } } #endif
/** * Synchronize with the receiver. Blocks until finished. */ public void synchronize() throws InterruptedException { final StandardWorkGroup workGroup = new StandardWorkGroup("sending-synchronizer"); final AsyncInputStream asyncIn = new AsyncInputStream(in, workGroup); final AsyncOutputStream asyncOut = getAsyncOutputStream(out, workGroup); prepareToSend(asyncIn, root); workGroup.execute("receiving-thread", () -> receivingThread(asyncIn)); workGroup.execute("sending-thread", () -> sendingThread(asyncIn, asyncOut)); workGroup.waitForTermination(); if (workGroup.hasExceptions()) { workGroup.logAllExceptions(log, marker, Level.ERROR); throw new MerkleSynchronizationException("Synchronization failed with exceptions"); } }
<gh_stars>10-100 package region import ( "context" "math" "github.com/ironarachne/world/pkg/geometry" "github.com/ironarachne/world/pkg/random" ) // Region is a geographic area. type Region struct { Description string `json:"description"` Altitude int `json:"altitude"` // -99-99, 0 is sea level Humidity int `json:"humidity"` // 0-99 Temperature int `json:"temperature"` // 0-99 NearestOceanDistance int `json:"nearest_ocean_distance"` NearestOceanDirection int `json:"nearest_ocean_direction"` NearestMountainsDistance int `json:"nearest_mountains_distance"` NearestMountainsDirection int `json:"nearest_mountains_direction"` DistanceToEquator int `json:"distance_to_equator"` // 0 is on equator, -99 is south pole, 99 is north pole } // Generate procedurally generates a random region. func Generate(ctx context.Context) Region { region := RandomTemperate(ctx) return region } // GenerateSpecific generates a region based on specific characteristics func GenerateSpecific(ctx context.Context, temperature int, humidity int, altitude int, distance int) Region { region := Region{} region.DistanceToEquator = distance region.Temperature = temperature region.Humidity = humidity region.Altitude = altitude // TODO: Replace the following with real data gleaned from the world region.NearestOceanDistance = random.Intn(ctx, 100) region.NearestOceanDirection = geometry.RandomDirection(ctx) region.NearestMountainsDirection = geometry.OppositeDirection(region.NearestOceanDirection) region.NearestMountainsDistance = random.Intn(ctx, 100) region.Description = region.Describe() return region } // RandomTemperate returns a random region that is appropriate for life func RandomTemperate(ctx context.Context) Region { region := Region{} region.DistanceToEquator = random.Intn(ctx, 100) - 50 region.Altitude = random.Intn(ctx, 50) + 10 region.NearestOceanDistance = random.Intn(ctx, 100) region.NearestOceanDirection = geometry.RandomDirection(ctx) region.NearestMountainsDirection = geometry.OppositeDirection(region.NearestOceanDirection) region.NearestMountainsDistance = random.Intn(ctx, 100) region.Temperature = GetTemperature(region.DistanceToEquator, region.Altitude) region.Humidity = GetHumidity(region.Altitude, region.NearestOceanDistance) region.Description = region.Describe() return region } // GetHumidity calculates a region's humidity based on its altitude and its distance from the nearest ocean func GetHumidity(altitude int, oceanDistance int) int { if oceanDistance == 0 { return 100 } humidity := 100 - (altitude / 2) - (oceanDistance / 2) if humidity > 100 { humidity = 100 } if humidity < 0 { humidity = 0 } return humidity } // GetTemperature calculates a temperature for a region given its distance from the equator and its altitude func GetTemperature(distanceToEquator int, altitude int) int { temperature := 100 - int(math.Abs(float64(distanceToEquator))) - (altitude / 2) if temperature < 0 { temperature = 0 } if temperature > 99 { temperature = 99 } return temperature }
Clinical and demographic correlates of medication and visit adherence in a large randomized controlled trial Background Patient characteristics are associated with adherence, which has implications for planning clinical research or designing payment systems that reward superior outcomes. It is unclear to what extent clinician efforts to improve adherence can attenuate these associations. Methods To identify factors predicting visit and medication adherence in settings designed to optimize adherence, we did a retrospective analysis of participants in the Antihypertensive and Lipid-Lowering Treatment to Prevent Heart Attack Trial (ALLHAT). ALLHAT recruited participants at 632 sites in North America, Puerto Rico, and the U.S. Virgin Islands for random assignment to antihypertensive treatment with amlodipine, chlorthalidone, or lisinopril. Site investigators reported clinic characteristics at the time they applied to participate in the study and research coordinators used standardized methods to measure patient characteristics. We defined adequate visit adherence as attending at least 80 % of scheduled visits; adequate medication adherence was defined as taking 80 % or more of the randomly assigned medication at all study visits. Results The 31,250 ALLHAT participants eligible for the visit adherence analysis attended 78.5 % of scheduled study visits; 68.9 % attended more than 80 % of scheduled visits. Clinic setting was predictive of both forms of adherence; adherence was worst at private clinics; clinics that enrolled more study participants had superior adherence. Adjusting for clinic characteristics and clinical factors, women, younger participants, Blacks and smokers were less likely to have adequate visit adherence. Among the 28,967 participants eligible for the medication adherence analysis, 21,261 (73.4 %) reported adequate medication adherence. In adjusted analyses, younger and less educated participants, Blacks, and smokers were less likely to report adequate adherence. Conclusions Participant demographics were associated with adherence despite strenuous efforts to optimize adherence. Our results could inform decisions by researchers planning trials and policymakers designing payment systems. Trial registration NCT00000542. Registered 27 October 1999. Electronic supplementary material The online version of this article (doi:10.1186/s12913-016-1471-x) contains supplementary material, which is available to authorized users. Background Adherence is defined as the extent to which a person's behavior coincides with medical or health advice. This can refer to such diverse behaviors as personal habits (e.g., dietary changes) , attendance at scheduled visits (visit adherence) , and the extent to which patients take medication as prescribed . Poor medication adherence has been associated with worse blood pressure control, worse clinical outcomes, and increased health care costs . Proposals to link payment to such measures of quality as hypertension control have increased attention to mechanisms to enhance adherence. As participant adherence is considered an essential component of high-quality randomized clinical trials (RCT), efforts to ensure adherence have long been incorporated in RCT design . Extensive literature has examined factors associated with adherence in clinical practice. Medication adherence has been linked to gender, age, race, and ability to pay , as well as clinical factors such as the specific drug, dosing schedule, duration of therapy, and indication for therapy . However, many of these studies had serious flaws. Studies using administrative data often lacked detailed clinical information, in particular the indication for which the drug was prescribed. On the other hand, most studies that have used clinical data were smaller and performed in one or a few clinical settings, limiting generalizability. Finally, most studies were carried out in settings that did not have explicit strategies to enhance adherence, raising the question of whether greater use of these strategies could attenuate the association of participant characteristics with adherence. Policymakers need to consider patient characteristics associated with poor adherence as they design payment systems that reward better outcomes; providers caring for populations with higher levels of poor adherence will be at risk for worse outcomes and therefore lower payments. Similarly, anticipating the likely level of adherence, particularly in population subgroups, is important for RCT design and conduct, as poor adherence to a therapy during a trial could obscure evidence of its efficacy. The evidence for a link between adherence and efficacy is well established for antihypertensive drug therapy . Therefore, we examined predictors of visit and medication adherence among individuals with hypertension who participated in a large RCT that used state-of-the-art methods to facilitate adherence. Methods We performed secondary analysis of participants in the Antihypertensive and Lipid-Lowering Treatment to Prevent Heart Attack Trial (ALLHAT) . ALLHAT was a randomized trial of participants with hypertension who were aged 55 or older and at high risk for coronary artery disease events. The study explicitly selected sites able to enroll a diverse study population. Between February 1994 and May 1998, 632 clinics in the United States, Canada, Puerto Rico and the U.S. Virgin Islands recruited participants using mass mailings, media presentations, chart review, and word of mouth. The ALLHAT protocol was approved by the University of Texas Health Science Center at Houston Institutional Review Board (IRB) and all participants recruited into the study provided written informed consent. Each clinic received approval from its site IRB. For most participants, the study site was their primary care clinic. Participants received study drugs at no cost. Follow-up visits were scheduled at intervals established by study protocol; visit costs were not reimbursed. Key individuals at study clinics received ongoing feedback from regional coordinating centers regarding visit adherence and blood pressure control; they also received education regarding strategies to improve visit and medication adherence in regular conference calls, by written communication, and at annual investigator meetings; sites with exceptional performance were recognized at these meetings. Participants were randomly assigned to 1 of 4 antihypertensive study drugsamlodipine, chlorthalidone, lisinopril, or doxazosin. The study drug dose was adjusted regularly to achieve blood pressure control. Participants and providers were blinded to the identity of this drug. If blood pressure was uncontrolled on the study drug alone, additional antihypertensive drugs were added, following a study protocol. In the present analysis, we focus on adherence to the randomly assigned antihypertensive drug. We included only participants who had at least five expected visits (i.e., 1 year of follow-up). We reasoned that the adherence of participants who did not complete a year in the study would not be representative of long term adherence. We also excluded participants randomized to receive doxazosin, as this arm was stopped early due to an excess of cardiovascular disease events and futility compared with chlorthalidone . Dependent variables Visit adherence The coordinating center maintained detailed records of attendance at follow-up visits scheduled at 1, 3, 6, 9, and 12 months following randomization; and every 4 months thereafter for up to 96 months. For most visits, there was a 4-month window during which a visit could occur and be considered adherent. During the first year, these windows were smallere.g., the 1 month visit had to occur between the day after randomization and 2 months following randomization. Although the maximum number of scheduled visits for a participant was 26, many participants were expected to have fewer visits due to death or later enrollment. To calculate a measure of visit adherence, we first divided the number of visits that were completed by the number of visits possible with perfect adherence. A dichotomous visit adherence variable was created for the primary analysis, with adherence considered to be poor if this proportion was <0.80 and adequate otherwise. Medication adherence At each follow-up visit, participants reported whether they had taken at least 80 % of their randomly assigned medication, a commonly used threshold for adequate compliance . We analyzed medication adherence for participants who had attended at least five visits in which they were expected to be taking their randomized medication. We classified participants' medication adherence as poor if they reported taking <80 % of their step 1 medication at any visit and as adequate otherwise. Predictor variables When each site applied to participate in ALLHAT, the site investigator classified the clinical setting as private practice, group practice, staff model health maintenance organization (HMO), community health center, university, Veterans Affairs Medical Center (VAMC), or other. We characterized clinics by the number of ALLHAT patients they enrolled, then divided participants into ten equal groups, based on the enrollment volume of their clinic; participants in the first decile attended clinics enrolling 1 to 33 participants, those in the tenth, clinics enrolling 492-607. At the time of randomization, study coordinators collected data elements that we hypothesized might be associated with adherence. These included age, gender, race, ethnicity and education, as well as several clinical variables. We categorized age as 55-64, 65-74, and >74 years of age. In sensitivity analyses, we repeated the analysis, first treating age as a continuous variable and then using 5 year cutpoints; results were similar, so we report only the original categorization. We categorized self-reported years of education as < 12 years, 12 years, or > 12 years. At the time of randomization, the site coordinator categorized each participant's self-reported race as White, Black, American Indian/Alaskan Native, Asian/Pacific Islander, or other,. In addition, the coordinator asked whether the participant was of Hispanic origin; response options were yes, no, and don't know. Because some prior studies have found that Black race and Hispanic ethnicity are associated with lower adherence, we analyzed Blacks as a single group, regardless of Hispanic origin and divided non-Blacks into those who self-identified as Hispanic and those who did not. We measured overall baseline health with the question, "In general, would you say your health is excellent, very good, good, fair, or poor?" We used medical record review at the time of randomization to establish the presence or absence of diabetes mellitus, HDL cholesterol < 35 mg/dl, subclinical atherosclerotic cardiovascular disease, and atherosclerotic cardiovascular disease. We categorized a participant as possibly disabled if they were <65 years old and had Medicare insurance. We also examined baseline characteristics that suggested more or less concern about health issues, including cigarette smoking (classified as never, past, or current), daily aspirin use, and obesity (body mass index ≥ 30 kg/m 2 ). Finally, we considered how long participants had been treated for their hypertension, a surrogate for when the condition had been recognized. Statistical analysis For our primary analyses we first compared the proportion of participants with adequate visit and medication adherence across categories of each predictor variable using chi-square tests or Fisher's exact test, as appropriate. We then used multivariable logistic regression to identify baseline characteristics independently associated with adherence. For each type of adherence, we entered all baseline variables into the regression model as covariates, then performed stepwise backward variable selection (probability 0.10 to remove a variable and 0.05 for reentry). We confirmed goodness of fit using the Pearson chi-square test. In a secondary analysis of visit adherence, we used the continuous form of visit adherence (proportion of expected visits attended) after an arcsine (square root) transformation for its non-normal distribution. In this analysis, our multivariable analysis used linear regression. We did not perform a similar analysis of a continuous measure of medication adherence, as this variable was so non-normally distributed. In sensitivity analyses, we excluded visits after a participant developed cancer (except non-melanoma skin cancer) or end stage renal disease (ESRD) or had a new cardiovascular disease event, including stroke, myocardial infarction, coronary artery revascularization, angina, congestive heart failure, or peripheral vascular disease, because it is possible that patterns of adherence would change after such events. We used STATA version 12.0 (StataCorp LP, College Station, TX) for all statistical analyses. Results The ALLHAT study achieved excellent ethnic and gender diversity (Table 1). While 40 % of participants were enrolled from the southern United States, the population was also geographically diverse; Puerto Rico and the Virgin Islands together contributed 13 % of enrollees. less clinical cardiovascular disease, but were no less likely to have diabetes mellitus and were more likely to have HDL cholesterol < 35 mg/dl. Our measures of health awareness suggested they were more attentive to their health; they were more likely to have been on hypertension treatment for more than 2 months, take aspirin, have quit smoking, and be non-obese. They were more likely to be treated in the US or Canada, in a VAMC, HMO or group practice site, at a clinic that enrolled more participants, and not be randomized to receive lisinopril. In multivariable analysis, the demographic factors associated with adequate adherence tended to remain significant (Table 2), except educational level became nonsignificant and both past-and never-smokers were more adherent. Among clinical factors, better self-reported health, subclinical cardiovascular disease, and low HDLbut not diabetes -were associated with adequate adherence. Factors we considered to represent health awareness, with the exception of obesity, continued to be associated with adherence. The clinic site factors associated with adherence changed after adjustment for demographic and clinical factors. While clinic enrollment volume continued to be associated with better adherence, geographic location was no longer significant; after adjustment, group practice, university, and VAMC clinics were associated with better adherence and HMO clinics with worse visit adherence. Medication adherence There were 30,364 ALLHAT participants who attended five or more visits where they were still taking their randomly assigned medication. In Table 1 we compare the baseline characteristics of the 21,897 (72.1 %) who reported taking 80 % or more of their randomized medication at each of their study visits to those who reported taking less than 80 % at 1 or more visits. The pattern of bivariable comparisons are similar to those we found for visit adherence, except that younger participants were more, rather than less, likely to be medically adherent, and diabetes mellitus was not associated with medical adherence. In multivariable analysis, in contrast to visit adherence, age was inversely associated with adequate medication adherence, while gender was not significant. Non-Hispanic non-Blacks had better medication and visit adherence than Blacks or Hispanic non-Blacks. The impacts of randomization to lisinopril treatment group and educational status were small but still significant. The impact of clinical factors was generally less than with visit adherence, and the directionality was often different. Thus, individuals with subclinical cardiovascular disease were less likely to be medically adherent, the impact of self-reported health status was small, though significant, and diabetes mellitus was not significant. Our measures of health awareness were associated with medical adherence in the same pattern as with visit adherence: nonsmoking, aspirin use, and prior treatment of hypertension predicted better adherence but obesity had no effect. As with visit adherence, higher clinic enrollment volume was associated with better medication adherence, but other associations with clinic type were markedly different (Table 3). Visit adherence was not associated with medication adherence. Sensitivity analyses The predictors of visit adherence were unchanged when visit adherence was treated as a continuous variable (see Additional file 1: Table S1, which shows baseline characteristics by continuous visit adherence). The factors associated with adherence were also similar when we repeated our analysis excluding visits after a participant developed ESRD or cancer, or suffered a cardiovascular disease event, except randomization to lisinopril was not associated with medication adherence and diabetes mellitus was not associated with visit adherence (see Additional file 1: Tables S2-S4, which show the unadjusted and adjusted predictors of adherence with truncation after these events). Discussion In this large diverse cohort with structured follow-up for hypertension at clinics that received audit and feedback similar to current "best practices" for chronic disease management , we found that clinical and demographic participant characteristics remained associated with both visit and medication adherence. While these associations are not large enough to aid clinical decisionmaking for individual patients, they could significantly affect reimbursement in systems that reward improved adherence. Similarly, these differences could have important implications for designing and powering RCTs. Moreover, while clinic characteristics impact both medication and visit adherence, the ability of larger, more structured clinics (e.g., VAMC, university, or group practices) to achieve better visit adherence did not translate into improved medication adherence. Indeed, better visit adherence was not associated with improved medication adherence at the participant level, suggesting that closer follow-up alone may not improve medication adherence. The patterns of medication adherence that we observed are consistent with patterns of adherence seen in settings with fewer resources to support adherence. Most, but not all, studies have found lower adherence among Blacks and Hispanic non-Blacks and persons with less education . The association of adherence with age and gender has been less consistent across studies . Although the literature is not as extensive, demographic characteristics have also been associated with visit adherence . In contrast to prior non-randomized studies suggesting better medication adherence among persons receiving angiotensin-converting enzyme inhibitors, we observed decreased medication and visit adherence among participants randomized to lisinopril . However, this association lost significance when we excluded data following clinical events that may have decreased adherence. We note that the lisinopril group had higher rates of strokes, combined cardiovascular disease, and heart failure , which may have affected adherence. Further studies should confirm these findings in other settings. We acknowledge that our findings are most easily generalized to other RCTs. Individuals who participate in RCTs are known to be more adherent than the general population . Similarly, the clinics which participate in randomized trials may systematically differ from the universe of clinics. These factors suggest we may have overestimated adherence. It is also possible the effects of participant variables are attenuated, since these participants may be more uniformly interested in health, causing factors such as race, age, and gender to have less impact on adherence. The same might apply to clinicsthe federally-qualified health clinics and VAMCs that participated in this study, for example, may be more like one another than federally-qualified health clinics and VAMCs in general. Thus, the odds ratios we found may be conservative estimates of the impact of patient and clinic characteristics on adherence. Similarly, since ALLHAT began in 1994, with the last closeout visits happening in 2002, it is possible that factors influencing adherence have changed since that time, although the techniques used to support adherence in ALLHAT continue to be viewed as best practices . We acknowledge this may affect extrapolation of our results to current practice. We also note that we studied adherence in the setting of hypertension, a chronic disease, using drugs from just three classes; our findings may not apply to other disease/medication dyads . Moreover, medication adherence may have been affected by the fact that doctor and participant were unaware of which medication they were using. Finally, our choice of measures of visit and medication adherence influenced our results. Our generous 4 month window for considering a visit "adherent" likely inflates our visit adherence compared to other reports. Our single item self-report measure of medication adherence might give different results than analyses based on more complex self-report measures , measures of medication refill behavior , or electronic pill counters . Despite these limitations, we believe the strengths of the study make the results noteworthy for clinical practice and policy. We studied adherence in a large, diverse population treated in a wide variety of clinical settings, using standardized measures of visit and medication adherence. Participants and sites used the same drugs on the same schedule for the same indication and received similar support for medication and visit adherencepatients received regular monitoring, reminder contacts, and free prescriptions; clinics received audit and feedback, typically had dedicated staff and were recognized for high performance. All these factors reduce variation due to local clinic policies or clinician choices. Conclusions We believe our findings suggest three considerations regarding adherence that may apply to patient care. First, adherence is unpredictable. Although patient characteristics are significantly associated with adherence, they do not identify patient groups with uniformly adequate medication adherence. For example, while the odds of adequate medication adherence among non-Hispanic, non-Blacks were nearly 50 % higher than among Blacks, large majorities of both non-Hispanic, non-Blacks, and Blacks were adherent; significant minorities of both groups were not. Interventions targeting patients at particular risk of poor adherence ignore this fact. Second, patients who come to visits more regularly should not be assumed to take their medications routinely; in our analysis, visit adherence did not predict medication adherence. Finally, poor adherence may persist even in well-run clinics with motivated patients. This suggests that we may still need new approaches to improving adherence. These results from the largest RCT of hypertensive therapy also confirm that study designers must attend to participant factors associated with adherence to ensure adequate power to detect significant differences, particularly in important subgroups. On the policy level, if our results can be extrapolated to current practice, then they suggest that systematic variation in adherence by patient characteristics will persist despite vigorous efforts to support adherence. This would buttress prior concerns that clinics serving certain populations will be disadvantaged if financial rewards or penalties are distributed based on adherence . Additional file Additional file 1: It contains 4 supplementary tables entitled, respectively: Table S1. Baseline Characteristics by Continuous Visit Adherence*. Table S2. Baseline Characteristics by Medication and Visit Adherence, Truncated after Event. Table S3. Independent Effect of Baseline Characteristics on Visit Adherence, Truncated after Event. Table S4. Independent Effect of Baseline Characteristics on Medication Adherence, Truncated after Event. These tables contain additional detail regarding the study results. Table S1 provides an alternative analysis of the bivariable association between participant characteristics and visit adherence. Tables S2-S4 present the results of sensitivity analyses. (DOCX 39 kb)
// GetPayload returns value of Payload field. func (i *InputMessageInvoice) GetPayload() (value []byte) { if i == nil { return } return i.Payload }
<reponame>dollarkillerx/ClangStudy #include <stdio.h> int main(void) { struct Test { unsigned int a:1; // 只能放下0~1 unsigned int b:1; unsigned int c:2; // 只能存两个字节 }; struct Test test = {0,1,3}; printf("a= %d b= %d c= %d \n",test.a,test.b,test.c); return 0; }
//------------------------------------------------------------------- // print build log // // program: program object // device: device id // void ClUtils::printBuildLog(const cl_program program, const cl_device_id device) const { cl_int status; size_t size_ret; char buffer[4096]; status=clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, sizeof(buffer)-1, buffer, &size_ret); if(status!=CL_SUCCESS) { printf("clGetProgramInfo failed.\n"); printError(status); } else { buffer[size_ret]='\0'; puts("--------------- build log --------------"); printf("%s\n", buffer); puts("----------- end of build log -----------"); } }
/** * Will set the fields marked with @ColumnName by querying the database * * @param t */ public void populateExternals(T t) { List<SqlFieldExternal> externals = mapperSettings.getExternals(); if (externals != null) externals.forEach(sqlFieldExternal -> { Populate<T> populate = populatorsMap.get(sqlFieldExternal.getPopulateStrategy()); if (populate == null) throw new DataMapperException("The annotation ColumnName didn't follow the rules"); populate.execute(t, sqlFieldExternal); }); }
import * as fs from 'fs'; import * as path from 'path'; export type MiniArticle = { title: string; date: string; tags: string[]; id: string; dotDate: string; } export type Article = MiniArticle & { main?: string; } const DIRNAME = process.env.NODE_ENV === 'production' ? 'prod' : 'dev'; const articleDir = (() => { if (!fs.existsSync(path.join(process.cwd(), 'articles', DIRNAME))) { return path.join(process.cwd(), 'articles', 'dev'); } return path.join(process.cwd(), 'articles', DIRNAME); })(); export const getArticles = (): Article[] => { const filelist = fs.readdirSync(articleDir); const articles = filelist.map((file) => { const fileWithoutExt = file.split(/\..+?$/)[0]; const metadata = fileWithoutExt.split('#'); const [y, m, d] = metadata[0].split('-'); const title = metadata[1]; const tags = metadata.slice(2); const main = fs.readFileSync(path.join(articleDir, file), 'utf-8'); return { date: `${y}年${m}月${d}日`, tags, title, main, sort: new Date(metadata[0]).getTime(), dotDate: `${y}.${m}.${d}`, id: metadata[0] + '_' + title, }; }).sort((a, b) => a.sort > b.sort ? -1 : 1).map((data, id): Article => ({ date: data.date, tags: data.tags, title: data.title, main: data.main, id: data.id, dotDate: data.dotDate, })); return articles; }; export const getArticle = (id: string): Article => { console.log(id); const targetFileName = id.split('_')[0] + '#' + id.split('_').slice(1).join('_'); const filelist = fs.readdirSync(articleDir); if (filelist.filter((x) => x.includes(targetFileName)).length === 0) { return null; } const targetFile = filelist.filter((x) => x.includes(targetFileName))[0]; const fileWithoutExt = targetFile.split(/\..+?$/)[0]; const metadata = fileWithoutExt.split('#'); const [y, m, d] = metadata[0].split('-'); const title = metadata[1]; const tags = metadata.slice(2); const main = fs.readFileSync(path.join(articleDir, targetFile), 'utf-8'); return { date: `${y}年${m}月${d}日`, tags, title, main, id: metadata[0] + '_' + title, dotDate: `${y}.${m}.${d}`, }; }; export const convertMiniArticle = (article: Article): MiniArticle => { return { date: article.date, tags: article.tags, title: article.title, id: article.id, dotDate: article.dotDate, }; }; export const convertShortArticle = (article: Article): MiniArticle => { return { date: article.date, tags: article.tags, title: article.title.length > 15 ? article.title.slice(0, 15) + '...' : article.title, id: article.id, dotDate: article.dotDate, }; };
Effects of direct nucleon-nucleon interactions in pion-condensed neutron star matter. We investigate the effects of direct nucleon-nucleon interactions in the ground state of pion-condensed neutron star matter within the sigma-model formalism. We use a realistic description of asymmetric nuclear matter. The divergence of the critical density as g/sub A/ approaches 1.0 is removed by including direct nucleon-nucleon forces. For the most realistic equations of state, we find a density interval with negative pressure from the critical density up to approximately 0.5 N/fm/sup 3/, preventing a pion condensate below this density. The most realistic equations of state are considerably softer than ..beta..-stable neutron star matter. The equations of state show similar behavior for both kinds of chiral symmetry breaking investigated in this calculation. The composition of the matter depends to some extent on the chiral symmetry breaking term in the Hamiltonian. The indicated proton percentage in the allowed density region is higher than 30.
package oop.interfaces.caravan; public class House implements Habitable { private int numOfBedrooms; private int builtYear; public House(int numOfBedrooms, int builtYear) { this.numOfBedrooms = numOfBedrooms; this.builtYear = builtYear; } @Override public boolean canFit(int inhabitants) { return numOfBedrooms * 2 >= inhabitants; } }
from django import forms from django.contrib.auth.models import User, Group from dal import autocomplete from .models import School, CotisationHistory, WhiteListHistory, BanishmentHistory from preferences.models import PaymentMethod class LoginForm(forms.Form): """ Form to log in. """ username = forms.CharField(max_length=255, label="Nom d'utitisateur") password = forms.CharField(max_length=255, widget=forms.PasswordInput, label="Mot de passe") class CreateUserForm(forms.ModelForm): """ Form to create a new user (:class:`django.contrib.auth.models.User`). """ class Meta: model = User fields = ("username", "last_name", "first_name", "email") school = forms.ModelChoiceField (queryset=School.objects.all(), label="École") def __init__(self, *args, **kwargs): self.user = kwargs.pop('user', None) super(CreateUserForm, self).__init__(*args, **kwargs) def clean(self): cleaned_data = super().clean() email = cleaned_data.get("email") if User.objects.filter(email=email).count() > 0: if User.objects.filter(email=email).count() == 1 and User.objects.get(email=email) == self.user: pass else: raise forms.ValidationError("L'adresse email est déjà utilisée") class CreateGroupForm(forms.ModelForm): """ Form to create a new group (:class:`django.contrib.auth.models.Group`). """ class Meta: model = Group fields = ("name", ) class EditGroupForm(forms.ModelForm): """ Form to edit a group (:class:`django.contrib.auth.models.Group`). """ class Meta: model = Group fields = "__all__" class SelectUserForm(forms.Form): """ Form to select a user from all users (:class:`django.contrib.auth.models.User`). """ user = forms.ModelChoiceField(queryset=User.objects.all(), required=True, label="Utilisateur", widget=autocomplete.ModelSelect2(url='users:all-users-autocomplete', attrs={'data-minimum-input-length':2})) class SelectNonSuperUserForm(forms.Form): """ Form to select a user from all non-superuser users (:class:`django.contrib.auth.models.User`). """ user = forms.ModelChoiceField(queryset=User.objects.filter(is_active=True), required=True, label="Utilisateur", widget=autocomplete.ModelSelect2(url='users:non-super-users-autocomplete', attrs={'data-minimum-input-length':2})) class SelectNonAdminUserForm(forms.Form): """ Form to select a user from all non-staff users (:class:`django.contrib.auth.models.User`). """ user = forms.ModelChoiceField(queryset=User.objects.filter(is_active=True), required=True, label="Utilisateur", widget=autocomplete.ModelSelect2(url='users:non-admin-users-autocomplete', attrs={'data-minimum-input-length':2})) class GroupsEditForm(forms.ModelForm): """ Form to edit a user's list of groups (:class:`django.contrib.auth.models.User` and :class:`django.contrib.auth.models.Group`). """ class Meta: model = User fields = ("groups", ) class EditPasswordForm(forms.Form): """ Form to change the password of a user (:class:`django.contrib.auth.models.User`). """ password = forms.CharField(max_length=255, widget=forms.PasswordInput, label="Mot de passe actuel") password1 = forms.CharField(max_length=255, widget=forms.PasswordInput, label="Nouveau mot de passe") password2 = forms.CharField(max_length=255, widget=forms.PasswordInput, label="Nouveau mot de passe (répétez)") def clean_password2(self): """ Verify if the two new passwords are identical """ password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError("Les mots de passe ne sont pas identiques") return password2 class addCotisationHistoryForm(forms.ModelForm): """ Form to add a :class:`users.models.CotisationHistory` to user (:class:`django.contrib.auth.models.User`). """ def __init__(self, *args, **kwargs): super(addCotisationHistoryForm, self).__init__(*args, **kwargs) self.fields['paymentMethod'].queryset = PaymentMethod.objects.filter(is_usable_in_cotisation=True).filter(is_active=True) class Meta: model = CotisationHistory fields = ("cotisation", "paymentMethod") class addWhiteListHistoryForm(forms.ModelForm): """ Form to add a :class:`users.models.WhiteListHistory` to user (:class:`django.contrib.auth.models.User`). """ class Meta: model = WhiteListHistory fields = ("duration", "reason") class addBanishmentHistoryForm(forms.ModelForm): """ Form to add a :class:`users.models.BanishmentHistory` to user (:class:`django.contrib.auth.models.User`). """ class Meta: model = BanishmentHistory fields = ("end_date", "reason") class SchoolForm(forms.ModelForm): """ Form to add and edit a :class:`users.models.School`. """ class Meta: model = School fields = "__all__" class ExportForm(forms.Form): """ Form to export list of users (:class:`django.contrib.auth.models.User`) to csv file """ QUERY_TYPE_CHOICES = ( ('all', 'Tous les comptes'), ('all_active', 'Tous les comptes actifs'), ('adherent', 'Tous les adhérents'), ('adherent_active', 'Tous les adhérents actifs') ) FIELDS_CHOICES = ( ('username', 'Nom d\'utilisateur'), ('last_name', 'Nom'), ('first_name', 'Prénom'), ('email', 'Adresse mail'), ('school', 'École'), ('balance', 'Solde'), ('credit', 'Crédit'), ('debit', 'Débit') ) query_type = forms.ChoiceField(choices=QUERY_TYPE_CHOICES, label="Ensemble de la demande") fields = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=FIELDS_CHOICES, label="Champs") group = forms.ModelChoiceField(queryset=Group.objects.all(), empty_label="Tous les groupes", required=False, label="Groupe")
<filename>deprecated/software/SLAM/ygz_slam_ros/common/include/ygz/G2OTypes.h #ifndef YGZ_G2OTYPES_H #define YGZ_G2OTYPES_H #include "ygz/NumTypes.h" #include "ygz/Settings.h" #include "ygz/IMUPreIntegration.h" #include "ygz/Camera.h" #include <g2o/core/base_vertex.h> #include <g2o/core/base_unary_edge.h> #include <g2o/core/base_multi_edge.h> #include <g2o/core/base_binary_edge.h> #include <g2o/types/slam3d/vertex_pointxyz.h> #include <g2o/types/slam3d/edge_pointxyz.h> using namespace Eigen; using namespace g2o; // 在优化中要用到的g2o点和边 namespace ygz { struct CameraParam; struct Frame; // --------------------------------------------------------------------------------------------------------- // 各种顶点 /** * VIO的Pose * 参数化为P+R,右乘更新,P在前 */ class VertexPR : public BaseVertex<6, Vector6d> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; VertexPR() : BaseVertex<6, Vector6d>() {} bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } virtual void setToOriginImpl() override { _estimate.setZero(); } virtual void oplusImpl(const double *update_) override { // P直接加,R右乘 _estimate.segment<3>(0) += Vector3d(update_[0], update_[1], update_[2]); _estimate.segment<3>(3) = SO3d::log( SO3d::exp(_estimate.segment<3>(3)) * SO3d::exp(Vector3d(update_[3], update_[4], update_[5]))); } Matrix3d R() const { return SO3d::exp(_estimate.segment<3>(3)).matrix(); } Vector3d t() const { return _estimate.head<3>(); } }; // TODO GPS vertex-------------------------------------------------- typedef g2o::VertexPointXYZ VertexGPS; //------------------------------------------------------------------ //TODO Attitude vertex ///typedef g2o::VertexPointXYZ VertexAtti; // Speed typedef g2o::VertexPointXYZ VertexSpeed; // Bias Acce typedef g2o::VertexPointXYZ VertexAcceBias; // Bias Gyro typedef g2o::VertexPointXYZ VertexGyrBias; /** * @brief The VertexGravityW class * 重力方向的顶点,估计的是重力的旋转 */ class VertexGravityW : public BaseVertex<2, Vector3d> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; VertexGravityW(); bool read(std::istream &is) { return true; } bool write(std::ostream &os) const { return true; } virtual void setToOriginImpl() { _estimate = Vector3d(0, 0, setting::gravity); } virtual void oplusImpl(const double *update_) { _estimate = SO3d::exp(Vector3d(update_[0], update_[1], 0)) * _estimate; } }; /** * 逆深度地图点 * _estimate 为逆深度 */ class VertexPointInvDepth : public BaseVertex<1, double> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; VertexPointInvDepth() : BaseVertex<1, double>() {}; bool read(std::istream &is) { return true; } bool write(std::ostream &os) const { return true; } virtual void setToOriginImpl() { _estimate = 1.0; } virtual void oplusImpl(const double *update) { _estimate += update[0]; } }; // --------------------------------------------------------------------------------------------------------- /** * 各种边 */ /** * Edge of inverse depth prior for stereo-triangulated mappoints * Vertex: inverse depth map point * * Note: User should set the information matrix (inverse covariance) according to feature position uncertainty and baseline */ class EdgeIDPPrior : public BaseUnaryEdge<1, double, VertexPointInvDepth> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW EdgeIDPPrior() : BaseUnaryEdge<1, double, VertexPointInvDepth>() {} bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } virtual void computeError() override; virtual void linearizeOplus() override; }; /** * Edge of reprojection error in one frame. * Vertex 0: inverse depth map point * Veretx 1: reference KF PR * Vertex 2: current frame PR * Vertex 3: extrinsic pose Tbc(or Tcb) **/ class EdgePRIDP : public BaseMultiEdge<2, Vector2d> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; // give the normalized x, y and camera intrinsics EdgePRIDP(double x, double y, CameraParam *cam) : BaseMultiEdge<2, Vector2d>() { resize(4); this->x = x; this->y = y; this->mpCam = cam; } bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } virtual void computeError() override; virtual void linearizeOplus() override; bool isDepthValid() { return dynamic_cast<const VertexPointInvDepth *>( _vertices[0])->estimate() > 0; } protected: // [x,y] in normalized image plane in reference KF double x = 0, y = 0; CameraParam *mpCam = nullptr; }; /** * XYZ 的投影误差 * 0号点为PR,相机位姿 * 1号点为XYZ */ class EdgePRXYZ : public BaseBinaryEdge<2, Vector2d, VertexPR, VertexPointXYZ> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgePRXYZ(CameraParam *cam) : fx(cam->fx), fy(cam->fy), cx(cam->cx), cy(cam->cy) {} bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } virtual void computeError() override; virtual void linearizeOplus() override; bool isDepthValid() { return depth > 0; } protected: double fx, fy, cx, cy; double depth = 0; }; class EdgePRGPS : public BaseUnaryEdge<3, Vector3d, VertexPR> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgePRGPS(): BaseUnaryEdge<3, Vector3d, VertexPR>() { //LOG(WARNING) << " EdgePRGPS instantialized" << endl; } bool read(std::istream &is) override{return true; } bool write(std::ostream &os) const override{ return true; } virtual void computeError()override; // virtual void linearizeOplus() override; }; typedef Vector3d SO3LieGroup; class EdgeAttitude : public BaseUnaryEdge<3,SO3LieGroup,VertexPR> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgeAttitude():BaseUnaryEdge<3,SO3LieGroup,VertexPR >() { //LOG(WARNING)<<"EdgeAttitude instantialized!"<<endl; } bool read(std::istream &is) override{return true;} bool write(std::ostream &os) const override { return true;} virtual void computeError() override; virtual void linearizeOplus() override; }; /** * The pre-integration IMU motion constraint * Connect 6 vertex: PR0, V0, biasG0, bias A0 and PR1, V1 * Vertex 0: PR0 * Vertex 1: PR1 * Vertex 2: V0 * Vertex 3: V1 * Vertex 4: biasG0 * Vertex 5: biasA0 * Error order: error_P, error_R, error_V * different from PVR edge */ //important. this edge merges imu preint and image pr estimation. //BaseMultiEdge<D(dim of E),E(measurement)> multi measurement,pushed into a vector of edge. class EdgePRV : public BaseMultiEdge<9, IMUPreIntegration> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgePRV(const Vector3d &gw) : BaseMultiEdge<9, IMUPreIntegration>(), GravityVec(gw) { resize(6); } bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } virtual void computeError() override; virtual void linearizeOplus() override; protected: // Gravity vector in 'world' frame Vector3d GravityVec; }; /** * BiasG上的随机游走 * 顺序:i帧的Bgi, j帧的Bgj * Error = Bgj - Bgi */ typedef g2o::EdgePointXYZ EdgeBiasG; /** * BiasA基本和Bg相同 * 顺序:i帧的Bai, j帧的Baj * Error = Baj - Bai */ typedef g2o::EdgePointXYZ EdgeBiasA; /** * 投影方程,用于估计P+R * 需要指定Rcb, tcb, 相机模型和被投影点 * 这里不优化Rcb和tcb,所以不从setting里取 */ class EdgeProjectPoseOnly : public BaseUnaryEdge<2, Vector2d, VertexPR> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgeProjectPoseOnly(const CameraParam *cam, const Vector3d &pw_) : BaseUnaryEdge<2, Vector2d, VertexPR>(), pw(pw_) { fx = cam->fx; fy = cam->fy; cx = cam->cx; cy = cam->cy; } bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } virtual void computeError() override; virtual void linearizeOplus() override; private: double fx = 0, fy = 0, cx = 0, cy = 0; Vector3d pw; // world 3d position }; /** * @brief The EdgeGyrBias class * For gyroscope bias compuation in Visual-Inertial initialization */ class EdgeGyrBias : public BaseUnaryEdge<3, Vector3d, VertexGyrBias> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgeGyrBias() : BaseUnaryEdge<3, Vector3d, VertexGyrBias>() {} bool read(std::istream &is) override { return true; } bool write(std::ostream &os) const override { return true; } Matrix3d dRbij; Matrix3d J_dR_bg; Matrix3d Rwbi; Matrix3d Rwbj; void computeError() override { const VertexGyrBias *v = static_cast<const VertexGyrBias *>(_vertices[0]); Vector3d bg = v->estimate(); Matrix3d dRbg = SO3d::exp(J_dR_bg * bg).matrix(); SO3d errR((dRbij * dRbg).transpose() * Rwbi.transpose() * Rwbj); // dRij^T * Riw * Rwj _error = errR.log(); } virtual void linearizeOplus() override { SO3d errR(dRbij.transpose() * Rwbi.transpose() * Rwbj); // dRij^T * Riw * Rwj Matrix3d Jlinv = SO3d::JacobianLInv(errR.log()); _jacobianOplusXi = -Jlinv * J_dR_bg; } }; } #endif
package memory import ( "context" "strings" "github.com/ViBiOh/auth/v2/pkg/ident" "github.com/ViBiOh/auth/v2/pkg/model" "golang.org/x/crypto/bcrypt" ) // Login checks given credentials func (a App) Login(_ context.Context, login, password string) (model.User, error) { user, ok := a.ident[login] if !ok { return model.User{}, ident.ErrInvalidCredentials } if err := bcrypt.CompareHashAndPassword(user.password, []byte(password)); err != nil { return model.User{}, ident.ErrInvalidCredentials } return user.User, nil } // IsAuthorized checks user on profile func (a App) IsAuthorized(_ context.Context, user model.User, profile string) bool { profiles, ok := a.auth[user.ID] if !ok { return false } if len(profile) == 0 { return true } for _, listedProfile := range profiles { if strings.EqualFold(profile, listedProfile) { return true } } return false }
def swap(a,i,j): temp=a[i] a[i]=a[j] a[j]=temp t=int(input()) for q in range(t): n=int(input()) a=[int(i) for i in input().split()] a=sorted(a) rev_a=a[::-1] #print(a,rev_a) ans=[None]*2*n i=0 j=1 k=0 while i<n: ans[j]=rev_a[i] ans[k]=a[i] i+=1 j+=2 k+=2 print(*ans)
#ifndef _FUNCTIONFLOW_H_ #define _FUNCTIONFLOW_H_ #include <windows.h> #include <vector> using namespace std; // Struct for flags struct FUNCTION_FLOW { DWORD dwThreadId; BOOL bFlag; }; class FunctionFlow { // Vector of flags static vector<FUNCTION_FLOW*> vFlags; static CRITICAL_SECTION gCriticalSection; static bool gInitialized; public: static void Init(); // Functions static BOOL CheckFlag(); static void UnCheckFlag(); }; #endif
/** * Clear the GameView with the specified color. */ public void clear(int color) { checkCanvas(); paint.setColor(color); canvas.drawRect(0, 0, canvas.getWidth(), canvas.getHeight(), paint); }
// Callback method notifies that GAM native ad has been successfully loaded. @Override public void onAdReceived(@NonNull NativeAd nativeAd) { Log.d(TAG, "Native Ad Received"); renderNativeAd(nativeAd); }
/** Created by andrey on 08.08.17. */ public final class ThreadDump { private final Map<Thread, StackTraceElement[]> threadDumpData = Thread.getAllStackTraces(); @Override public final String toString() { final StringBuilder threadDumpStr = new StringBuilder(); final String lineSep = System.lineSeparator(); for(final Thread thread : threadDumpData.keySet()) { threadDumpStr .append(thread.getName()) .append(" (state: ") .append(thread.getState()) .append("):") .append(lineSep); final StackTraceElement[] threadStackTrace = threadDumpData.get(thread); for(final StackTraceElement ste : threadStackTrace) { threadDumpStr .append('\t') .append(ste) .append(lineSep); } } return threadDumpStr.toString(); } }
n=int(input()) a=[] a=[a for j in range(n)] for i in range(n): a[i]=input() x1=a[0][0] x2=a[0][1] it=False for i in range(n): for j in range(n): if(((i==j or i+j+1==n) and a[i][j]==x1) or (not (i == j or i+j+1==n) and a[i][j]==x2) ): it=True else: it=False if(x1==x2): it=False if(not it): break if (not it): break if(it): print("YES") else: print("NO")
// Copyright 2020 TensorFlow Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "tensorflow/compiler/tf2xla/xla_tensor/strided_slice_helpers.h" #include "tensorflow/compiler/tf2xla/xla_tensor/debug_util.h" #include "tensorflow/compiler/tf2xla/xla_tensor/helpers.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/util/strided_slice_op.h" namespace swift_xla { namespace { // Create a rank 1 tensor from a sequence of values. tensorflow::Tensor MakeRank1Tensor(absl::Span<const int64_t> values) { tensorflow::TensorShape tensor_shape; std::vector<int64_t> tensor_size; tensor_size.push_back(values.size()); tensorflow::Status status = tensorflow::TensorShapeUtils::MakeShape(tensor_size, &tensor_shape); XLA_CHECK_OK(status); tensorflow::Tensor t(tensorflow::DT_INT64, tensor_shape); void* dest = tensorflow::DMAHelper::base(&t); memcpy(dest, values.data(), values.size() * sizeof(values[0])); return t; } } // namespace StridedSliceSpec ComputeIndexingBoundsAndStrides( absl::Span<const int64_t> input_sizes, absl::Span<const int64_t> begin, absl::Span<const int64_t> end, absl::Span<const int64_t> strides, int32_t begin_mask, int32_t end_mask, int32_t ellipsis_mask, int32_t new_axis_mask, int32_t shrink_axis_mask) { tensorflow::Tensor begin_tensor = MakeRank1Tensor(begin); tensorflow::Tensor end_tensor = MakeRank1Tensor(end); tensorflow::Tensor strides_tensor = MakeRank1Tensor(strides); tensorflow::TensorShape tf_input_shape(input_sizes); absl::InlinedVector<int64_t, 4> begin_spec; absl::InlinedVector<int64_t, 4> end_spec; absl::InlinedVector<int64_t, 4> strides_spec; tensorflow::PartialTensorShape partial_processing_shape, partial_final_shape; bool dummy; tensorflow::Status status = tensorflow::ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, tf_input_shape, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, &partial_processing_shape, &partial_final_shape, &dummy, &dummy, &dummy, &begin_spec, &end_spec, &strides_spec); XLA_CHECK_OK(status); tensorflow::TensorShape processing_shape; XLA_CHECK(partial_processing_shape.AsTensorShape(&processing_shape)) << "Unexpected incomplete processing shape"; tensorflow::TensorShape final_shape; XLA_CHECK(partial_final_shape.AsTensorShape(&final_shape)) << "Unexpected incomplete final shape"; return {begin_spec, end_spec, strides_spec, processing_shape.dim_sizes(), final_shape.dim_sizes()}; } } // namespace swift_xla
/** * Read SCSI device capacity * * @v scsidev SCSI device * @v block Block data interface * @ret rc Return status code */ static int scsidev_read_capacity ( struct scsi_device *scsidev, struct interface *block ) { return scsidev_command ( scsidev, block, &scsicmd_read_capacity, 0, 0, UNULL, 0 ); }
Functional coexistence of twin arsenic resistance systems in Pseudomonas putida KT2440. The genome of the soil bacterium Pseudomonas putida KT2440 bears two virtually identical arsRBCH operons putatively encoding resistance to inorganic arsenic species. Single and double chromosomal deletions in each of these ars clusters of this bacterium were tested for arsenic sensitivity and found that the contribution of each operon to the resistance to the metalloid was not additive, as either cluster sufficed to endow cells with high-level resistance. However, otherwise identical traits linked to each of the ars sites diverged when temperature was decreased. Growth of the various mutants at 15°C (instead of the standard 30°C for P. putida) uncovered that ars2 affords a much higher resistance to As (III) than the ars1 counterpart. Reverse transcription polymerase chain reaction of arsB1 and arsB2 genes as well as lacZ fusions to the Pars1 and Pars2 promoters traced the difference to variations in transcription of the corresponding gene sets at each temperature. Functional redundancy may thus be selected as a stable condition - rather than just as transient state - if it affords one key activity to be expressed under a wider range of physicochemical settings. This seems to provide a straightforward solution to regulatory problems in environmental bacteria that thrive under changing scenarios.
<a href="http://www.flickr.com/photos/tonyfelgueiras/4731592951/">TonyFelgueiras</a> via Flickr Fashion labels have long sent celebrities their latest accessories for free in hopes of generating positive publicity for the product. It's a classic PR move. But with a new breed of celebrity that serves as society's punching bag rather than its role model, PR teams have begun to rethink this strategy. They've tried to capitalize on the punching bags. As Simon Doonan of The Observe explains: Remember how Snooki, drunk or sober, was never seen without that Coach bag dangling from the crook of her arm? Snooki and her Coach were as synonymous as The Situation and his six-pack. But then the winds of change started blowing on Jersey Shore. Every photograph of Guido-huntin' Snooki showed her toting a new designer purse. Why the sudden disloyalty? Was she trading up? Was she vomiting into her purses and then randomly replacing them? The answer is much more intriguing. Allegedly, the anxious folks at these various luxury houses are all aggressively gifting our gal Snookums with free bags. No surprise, right? But here's the shocker: They are not sending her their own bags. They are sending her each other's bags! Competitors' bags! Fashion labels are using sneaky tactics to ensure their luxury items aren't associated with sleazy celebrities. Call it "unbranding." Keep that in mind the next time you go pick up that bag you saw Snooki rock on last week's Jersey Shore.
import { resizeForm } from "../form"; import { modelChildren } from "../../../mve/modelChildren"; import { mve } from "../../../mve/util"; import { DOMNode, svg } from "../../../mve-DOM/index"; import { EOChildren } from "../../../mve/childrenBuilder"; export function hRuler(param:{ bit:number }){ return resizeForm(function(me,p,rp){ const model=mve.arrayModelOf<number>([]) me.WatchAfter(function(){ return rp.out.width() / param.bit }, function(size){ let i=model.size() while(i<size){ model.push(i) i++ } } ) return { allow(){ return true }, element:svg({ type:"svg", event:{ mousedown:rp.move }, style:{ width(){ return rp.out.width()+"px" }, height(){ return rp.out.height()+"px" } }, children:[ svg({ type:"rect", style:{ background:"gray", opacity:"0.1", }, attr:{ width(){ return rp.out.width() }, height(){ return rp.out.height() } } }), modelChildren(model,function(me,row,index){ let color="red" let diff = 20 const lines:DOMNode[]=[] if(row % 10 == 0){ color ="black" diff = diff + 20 lines.push({ type:"text", attr:{ x:row * param.bit, y(){ return rp.out.height() / 2 + diff + 10 } }, text:row+"" }) lines.push({ type:"text", attr:{ x:row * param.bit, y(){ return rp.out.height() / 2 - diff - 10 } }, text:row+"" }) }else if(row % 5 == 0){ color="orange" diff = diff + 10 } lines.push( { type:"line", style:{ "stroke":color, "stroke-width":"1" }, attr:{ x1:row * param.bit, y1(){ return rp.out.height()/2 - diff }, x2:row * param.bit, y2(){ return rp.out.height() / 2 + diff } } }) return lines.map(svg) }) ] }) } }) }
/* sys/sysinfo.h This file is part of Cygwin. This software is a copyrighted work licensed under the terms of the Cygwin license. Please consult the file "CYGWIN_LICENSE" for details. */ /* sys/sysinfo.h header file for Cygwin. */ #ifndef _SYS_SYSINFO_H #define _SYS_SYSINFO_H #include <sys/cdefs.h> __BEGIN_DECLS struct sysinfo { long uptime; /* Seconds since boot */ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */ unsigned long totalram; /* Total usable main memory size */ unsigned long freeram; /* Available memory size */ unsigned long sharedram; /* Amount of shared memory */ unsigned long bufferram; /* Memory used by buffers */ unsigned long totalswap; /* Total swap space size */ unsigned long freeswap; /* swap space still available */ unsigned short procs; /* Number of current processes */ unsigned long totalhigh; /* Total high memory size */ unsigned long freehigh; /* Available high memory size */ unsigned int mem_unit; /* Memory unit size in bytes */ char __f[10]; /* Pads structure to 64 bytes */ }; extern int sysinfo (struct sysinfo *); extern int get_nprocs_conf (void); extern int get_nprocs (void); extern long get_phys_pages (void); extern long get_avphys_pages (void); __END_DECLS #endif /* _SYS_SYSINFO_H */
module Main where fib :: Int -> Int fib n = if n < 2 then 1 else fib(n-1) + fib (n-2) main = print $ fib 30
The Relationship of Molecular Biomarker Detection and DNA Isolation Nucleic acid (DNA) extraction using a Formalin-Fixed Paraffin-Embedded (FFPE) block is a frequent practice in pathological diagnosis and medical research. The extracted nucleic acid, which is the targeted end-product for the further molecular genetic analysis, depends on the ratio of tumor cells to normal cells. Theoretically, the same tissue sample would give an identical genetic detection result. However, the variable percentage of tumor cells and normal cells influences this theoretical result, therefore potentially influencing the final interpretation of this genetic testing result. To investigate the relationship between DNA isolation and biomarker detection, we studied biomarker detection using DNA isolated from FFPE samples and from secondary DNA isolation of leftover tissues that are usually available after the first round of lysis. For this study, primary FFPE samples of malignant pleural mesothelioma (MPM) were used. Genetic p16 (CDKN2A) copy number variation (CNV) analysis was carried out using DNA isolation from the first isolation and from the leftover tissue. We used droplet digital polymerase chain reaction (ddPCR), a precise method to detect CNV. The number of copies of reference genes (E2) were compared with those of p16(CDKN2A), a well-known deleted gene in mesothelioma. Our results indicate that DNA isolated from leftover tissue contains a higher ratio of normal cells compared to tumour cells. This DNA isolation factor can influence the interpretation of genetic alternation detection results. polymerase chain reaction (PCR), that monitors the amplification of a targeted genetic molecule. For this have chosen droplet our and review of current there is no one single biomarker that can identify mesothelioma. The current biomarkers utilised to detect MPM from adenocarcinoma in clinical diagnosis include CK8/18, Calretinin, CK 5/6, CD141, HBME-1, WT-1, D2-40, EMA, CEA, TAG72, BG8, CD15, TTF-1, and BerEP4. BAP1 and MTAP are recent biomarkers used clinically to help differentiate between reactive mesothelial hyperplasia and MPM. Introduction Genetic testing is a type of medical test that identifies changes in chromosomes, genes, or proteins. The results of a genetic test can confirm or rule out a suspected genetic condition or help determine an individual's chance of developing or passing on a genetic disorder. Genetic testing allows identification of genetic alterations that could also contribute to targeted therapy, such as testing for EGFR and ALK1 in lung cancer . ADRI is a research institute whose primary focus is on asbestos related disease especially mesothelioma. Therefore, establishing sound research laboratory techniques including accurate genetic testing and interpretation remains one of our core practices. Conventionally, molecular biomarker detection is carried out by quantitative polymerase chain reaction qPCR, a molecular biotechnology based on the polymerase chain reaction (PCR), that monitors the amplification of a targeted genetic molecule. For this study we have chosen droplet digital PCR (ddPCR). To the best of our knowledge and review of current literature, there is no one single biomarker that can identify mesothelioma. The current biomarkers utilised to detect MPM from adenocarcinoma in clinical diagnosis include CK8/18, Calretinin, CK 5/6, CD141, HBME-1, WT-1, D2-40, EMA, CEA, TAG72, BG8, CD15, TTF-1, and BerEP4. BAP1 and MTAP are recent biomarkers used clinically to help differentiate between reactive mesothelial hyperplasia and MPM. In our previous study , we tested p16(CDKN2A) deletion and BAP1 genomic expression using Fluorescence InSite Hybridization (FISH) testing and CNV analysis with ddPCR respectively . Results indicated that ddPCR provides an advanced biotechnological technique which is a refinement of conventional PCR methods. The direct measurement of DNA and reference allows calculation of CNV using copy number of a reference gene detected by the digital reader of ddPCR. In this study, we used mesothelioma FFPE samples to study p16(CDKN2A) CNV to test our hypothesis that DNA isolation factors can affect the test results. We isolated DNA from FFPE samples and then did a second round of DNA isolation using the leftover tissue from the first isolation. ddPCR was then performed to study CNV. DNA Isolation All procedures were carried out according to manufacturer's instructions with in-house modification to meet the ISO15189 standard. For this study, QIAamp DNA Mini Kit was used. After first lysis buffer incubation, lysate was used to continue the DNA isolation process. Leftover tissue was then used for the secondround isolation by adding additional lysis buffer then following the isolation process. Both sets of DNA isolation were then quantified by nanophotometer to measure DNA input for ddPCR analysis. Assay Design The study design of CVN of p16(CDKN2A) is based on our previous experience from our published paper . To be able to compare copy number precisely, we designed a probe-based assay. We used the sample primer set for p16 from the previous paper . In the same reaction we also designed a p16(CDKN2A) probe labelled with FAM and a reference gene E2 representing two copies from each cell was also included with HEX label. The detected number of E2 copies implies the number of DNA that was analyzed with the designated probe and primer. The number of E2 copies was compared to the number of p16 copies to give a p16:E2 ratio. A p16:E2 ratio approaching 2 is considered no deletion of p16, whilst a p16: E2 ratio tending to 1 is considered to be a heterozygous deletion of p16. When the ratio tends towards 0, this indicates that there is a homozygous deletion of p16. Results In this study, 11 FFPE samples of MPM patients were included. Qiagen FFPE mini kit was applied to extract DNA from the FFPE scrolls. Two scrolls of 20 μm in thickness were obtained from the FFPE block (numbered as 180014 to 180024 sequentially). DNA concentration was analyzed by Nanophotometer and Nanodrop as reference quality checkpoints. All samples showed good quantity and quality of isolated DNA for ddPCR analysis. All samples were subjected to ddPCR analysis. Raw data was collected and CNV of p16 was calculated as listed in Table 1 (DNA from first isolation) and Table 2 (DNA from second isolation). As indicated in the table, results were obtained for every sample except one (180014). Result for sample 180014 was not comparable. Its DNA content was unusually low which resulted in a large destabilizing influence on the ratio calculation given that the base amount of DNA is effectively absent. This lack of DNA may have been due to the fact that the actual size of primary sample was too small or due to a high fragmentation of DNA in the FFPE block. Figure 1 shows ddPCR results for different populations of E2 (green) and p16 (blue). Copy numbers were then calculated using the CNV calculation formula (copy number detected for p16/copy number of E2) x 2, as every E2 reference refers to two copies in each cell. The CNV number is then subjected to being called as p16 homozygous or as heterozygous deletion of p16. The same tumour samples from their identical FFPE blocks are expected to give equivalent results. However, we have observed a tendency of higher p16:E2 ratios from the leftover tissue samples compared to the initial samples, which implies that there is a higher number of normal cells in leftover tissue samples. E2 copy was increased in leftover samples compared to the original samples. In all the samples studied in this paper, E2 population was detected as a higher number in all leftover samples (the second DNA isolation) compared to first DNA isolation. The higher E2 copy number in leftover samples influences the final CVN calculation. Samples 180018, 18009, 180022 and 180024 were first carried out by researcher A. E2 copies of these four samples in the leftover tissue showed increase copy number when compared with E2 copies from the first isolations. Researcher B then performed samples 180014 to 180024 (total 11 samples). Except for one sample (180014), all samples also showed elevated E2 copy numbers. Sample 180014 did not show an increased trend potentially due to low DNA quantity from both isolations. We propose that the difference in p16:E2 ratios in first and second DNA isolations may imply that there are more normal cells in the leftover samples resulting in more E2 copies. The results from leftover tissue samples may not represent the actual CNV in clinical samples. Discussion and Conclusion Proteinase is applied to lyse the cells; we presume normal cells have better cellular structure and architecture making them more difficult to break by lysis buffer. Tumour cells lacking in cell-cell adhesion molecules may have low protein content on the cell surface, enhancing the effectiveness of lysis. From this study, we also tried different proteinases and found that proteinase K is better than proteinase as less leftover tissue was retained after lysis step. Our data supports our hypothesis that DNA isolation factors affect CNV results as more E2 copies were detected from the second DNA isolation samples. These results demonstrate the concept that normal lysis of normal cells is more difficult than for the tumour cells, possibly due to the loss of glycoproteins in the tumour MPM cells . We therefore suggest that DNA extracted from leftover tissue samples cannot be used for genetic alternation detection by ddPCR as it may cause the incorrect calculation of CNV of the samples. Our results also demonstrate our hypothesis that given that normal cells are more difficult to lyse, a higher number of normal cells will be retained in the leftover sample, which may induce the problem of false negative results of genetic alternation detection. Since tumour cells have less glycoproteins which are easier to break down during isolation process,we therefore suggest that molecular testing that is focused on tumour cell analysis should only use DNA isolation from first lysis. The data from this study suggest that normal lysis of normal cells is more difficult
<reponame>mmagician/polkadot-profit-transformer<gh_stars>1-10 import IdentityProcessorService from './identity_processor' import { PolkadotModule } from '../../modules/polkadot.module' import { LoggerModule } from '../../modules/logger.module' import { KafkaModule } from './../../modules/kafka.module' import { IEnrichmentEntry, IEvent, IExtrinsic, IExtrinsicsEntry, JudgementStatus } from './identity_processor.types' jest.mock('../../modules/polkadot.module') jest.mock('../../modules/logger.module') jest.mock('../../modules/kafka.module') PolkadotModule.inject = jest.fn(() => new PolkadotModule()) LoggerModule.inject = jest.fn(() => new LoggerModule()) KafkaModule.inject = jest.fn(() => new KafkaModule()) KafkaModule.prototype.sendEnrichmentData = jest.fn() PolkadotModule.prototype.getIdentity = jest.fn(async (account_id: string) => { if (account_id === 'isEmpty') { return { isEmpty: true } } return { isEmpty: false, toHuman: () => ({ info: { display: { Raw: 'display' }, legal: { Raw: 'legal' }, web: { Raw: 'web' }, riot: { Raw: 'riot' }, email: { Raw: 'email' }, twitter: { Raw: 'twitter' } } }) } }) const event: IEvent = { account_id: 'account_id', block_id: 1, event: 'event', event_id: 'event_id', data: '["12345",{"RegistrarIndex":123}]' } describe('Identity Processor service', () => { let identityProcessorService: IdentityProcessorService beforeEach(() => { identityProcessorService = new IdentityProcessorService() }) it('constructor', async () => { expect(identityProcessorService).toBeInstanceOf(IdentityProcessorService) }) it('pushEnrichment', async () => { const entry: IEnrichmentEntry = { account_id: '1', root_account_id: '2', display: '3', legal: '4', web: '5', riot: '6', email: '7', twitter: '8', judgement_status: JudgementStatus.REQUESTED, registrar_index: 1, created_at: 123456, killed_at: 67890 } await identityProcessorService.pushEnrichment('test', entry) expect(KafkaModule.prototype.sendEnrichmentData).toBeCalledWith('test', entry) }) }) describe('Push enrichments methods', () => { let identityProcessorService: IdentityProcessorService beforeEach(() => { IdentityProcessorService.prototype.pushEnrichment = jest.fn() identityProcessorService = new IdentityProcessorService() }) it('onNewAccount', async () => { await identityProcessorService.onNewAccount(event) expect(IdentityProcessorService.prototype.pushEnrichment).toBeCalledWith('event_id', { account_id: 'account_id', created_at: 1 }) }) it('onKilledAccount', async () => { await identityProcessorService.onKilledAccount(event) expect(IdentityProcessorService.prototype.pushEnrichment).toBeCalledWith('event_id', { account_id: 'account_id', killed_at: 1 }) }) it('onJudgementEvent', async () => { await identityProcessorService.onJudgementEvent({ event, status: JudgementStatus.GIVEN }) expect(IdentityProcessorService.prototype.pushEnrichment).toBeCalledWith('event_id', { account_id: 'account_id', judgement_status: JudgementStatus.GIVEN, registrar_index: parseInt('123', 16) }) }) it('updateAccountIdentity isEmpty or isNone', async () => { identityProcessorService = new IdentityProcessorService() await identityProcessorService.updateAccountIdentity({ id: 'id', signer: 'isEmpty' }) expect(PolkadotModule.prototype.getIdentity).toBeCalled() expect(IdentityProcessorService.prototype.pushEnrichment).toBeCalledWith('id', { account_id: 'isEmpty', display: '', legal: '', web: '', riot: '', email: '', twitter: '' }) }) it('updateAccountIdentity is not empty', async () => { identityProcessorService = new IdentityProcessorService() await identityProcessorService.updateAccountIdentity({ id: 'id', signer: 'signer' }) expect(PolkadotModule.prototype.getIdentity).toBeCalled() expect(IdentityProcessorService.prototype.pushEnrichment).toBeCalledWith('id', { account_id: 'signer', display: 'display', email: 'email', legal: 'legal', riot: 'riot', twitter: 'twitter', web: 'web' }) }) }) describe('Push enrichments methods', () => { let identityProcessorService: IdentityProcessorService beforeEach(() => { IdentityProcessorService.prototype.sendToPushEnrichmentSubs = jest.fn() identityProcessorService = new IdentityProcessorService() }) it('addSub case', async () => { const extrinsic: IExtrinsic = { id: 'id', method: 'addSub', signer: 'signer', args: [{ id: 'id' }] } await identityProcessorService.updateSubAccounts(extrinsic) expect(IdentityProcessorService.prototype.sendToPushEnrichmentSubs).toBeCalledWith({ key: 'id', accountId: 'id', rootAccountId: 'signer' }) }) it('setSubs case', async () => { const extrinsic: IExtrinsic = { id: 'id', method: 'setSubs', signer: 'signer', args: [[['account1'], ['account2']]] } await identityProcessorService.updateSubAccounts(extrinsic) expect(IdentityProcessorService.prototype.sendToPushEnrichmentSubs).toBeCalledTimes(2) expect(IdentityProcessorService.prototype.sendToPushEnrichmentSubs).nthCalledWith(1, { accountId: 'account1', key: 'id_0', rootAccountId: 'signer' }) expect(IdentityProcessorService.prototype.sendToPushEnrichmentSubs).nthCalledWith(2, { accountId: 'account2', key: 'id_1', rootAccountId: 'signer' }) }) it('removeSub case', async () => { const extrinsic: IExtrinsic = { id: 'id', method: 'removeSub', signer: 'signer', args: ['removedAccountId'] } await identityProcessorService.updateSubAccounts(extrinsic) expect(IdentityProcessorService.prototype.sendToPushEnrichmentSubs).toBeCalledWith({ key: 'id', accountId: 'removedAccountId', rootAccountId: '' }) }) it('quitSub case', async () => { const extrinsic: IExtrinsic = { id: 'id', method: 'quitSub', signer: 'signer', args: ['args'] } await identityProcessorService.updateSubAccounts(extrinsic) expect(IdentityProcessorService.prototype.sendToPushEnrichmentSubs).toBeCalledWith({ key: 'id', accountId: 'signer', rootAccountId: '' }) }) }) describe('processExtrinsics', () => { let identityProcessorService: IdentityProcessorService beforeEach(() => { IdentityProcessorService.prototype.updateAccountIdentity = jest.fn() IdentityProcessorService.prototype.updateSubAccounts = jest.fn() identityProcessorService = new IdentityProcessorService() }) it('subs cases', async () => { const extrinsics: IExtrinsicsEntry = { extrinsics: [ { id: 'id', method: 'addSub', signer: 'signer', args: [{ id: 'id' }] }, { id: 'id', method: 'quitSub', signer: 'signer', args: ['args'] }, { id: 'id', method: 'removeSub', signer: 'signer', args: ['args'] }, { id: 'id', method: 'renameSub', signer: 'signer', args: ['args'] }, { id: 'id', method: 'setSubs', signer: 'signer', args: ['args'] } ] } await identityProcessorService.processExtrinsics(extrinsics) expect(IdentityProcessorService.prototype.updateSubAccounts).toBeCalledTimes(5) }) it('identity cases', async () => { const extrinsics: IExtrinsicsEntry = { extrinsics: [ { id: 'id', method: 'clearIdentity', signer: 'signer', args: [{ id: 'id' }] }, { id: 'id', method: 'killIdentity', signer: 'signer', args: ['args'] }, { id: 'id', method: 'setFields', signer: 'signer', args: ['args'] }, { id: 'id', method: 'setIdentity', signer: 'signer', args: ['args'] } ] } await identityProcessorService.processExtrinsics(extrinsics) expect(IdentityProcessorService.prototype.updateAccountIdentity).toBeCalledTimes(4) }) })
import java.io.BufferedReader; import java.io.InputStreamReader; import java.util.ArrayDeque; import java.util.Deque; public class Main { public static void main(String args[]) throws Exception { int total = 0; BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); Deque<Integer> down = new ArrayDeque<Integer>(); Deque<int[]> ike = new ArrayDeque<int[]>(); String yama = br.readLine(); for (int i = 0; i < yama.length(); i++) { if (yama.charAt(i) == '\\') { down.add(i); } else if (!down.isEmpty() && yama.charAt(i) == '/') { int a = i - down.getLast(); total = total + a; while (!ike.isEmpty() && down.getLast() < ike.getLast()[0]) { a = a + ike.getLast()[1]; ike.removeLast(); } int[] data = { down.pollLast(), a }; ike.add(data); } } System.out.println(total); System.out.print(ike.size()); while (!ike.isEmpty()) { System.out.print(" " + ike.removeFirst()[1]); } System.out.println(); } }
/** \brief Handler for start element events. */ static void optInfoStartElem(void *userData, const XML_Char *name, const XML_Char **attr) { struct OptInfoData *data = (struct OptInfoData *)userData; enum OptInfoElem elem = bsearchStr (name, OptInfoElems, OI_COUNT); switch (elem) { case OI_DRIINFO: if (data->inDriInfo) XML_FATAL1 ("nested <driinfo> elements."); if (attr[0]) XML_FATAL1 ("attributes specified on <driinfo> element."); data->inDriInfo = true; break; case OI_SECTION: if (!data->inDriInfo) XML_FATAL1 ("<section> must be inside <driinfo>."); if (data->inSection) XML_FATAL1 ("nested <section> elements."); if (attr[0]) XML_FATAL1 ("attributes specified on <section> element."); data->inSection = true; break; case OI_DESCRIPTION: if (!data->inSection && !data->inOption) XML_FATAL1 ("<description> must be inside <description> or <option."); if (data->inDesc) XML_FATAL1 ("nested <description> elements."); data->inDesc = true; parseDescAttr (data, attr); break; case OI_OPTION: if (!data->inSection) XML_FATAL1 ("<option> must be inside <section>."); if (data->inDesc) XML_FATAL1 ("<option> nested in <description> element."); if (data->inOption) XML_FATAL1 ("nested <option> elements."); data->inOption = true; parseOptInfoAttr (data, attr); break; case OI_ENUM: if (!(data->inOption && data->inDesc)) XML_FATAL1 ("<enum> must be inside <option> and <description>."); if (data->inEnum) XML_FATAL1 ("nested <enum> elements."); data->inEnum = true; parseEnumAttr (data, attr); break; default: XML_FATAL ("unknown element: %s.", name); } }
Lebanon's Foreign Minister Gebran Bassil, left, shakes hands with his Turkish counterpart Mevlut Cavusoglu, right, after their joint news conference following their meeting in Ankara, Turkey, Thursday, Nov. 16, 2017. (AP Photo/Burhan Ozbilici) ISTANBUL (AP) — Turkey’s top diplomat says followers of a U.S.-based Muslim cleric blamed for last year’s failed coup have infiltrated the “American system” of justice and are behind accusations leveled against a Turkish-Iranian businessman in the U.S. Foreign Minister Mevlut Cavusoglu also told journalists Friday that cleric Fethullah Gulen “has entered American missions here through their local staff.” He was referring to the arrest of a local employee of the U.S. Istanbul consulate in October for alleged links to Gulen. The cleric has denied involvement in the coup attempt. The U.S. says its employee had contacts with police and a prosecutor as part of his job, not for other reasons. Cavusoglu claims that Gulen’s followers wielded influence over the U.S. judicial system, pointing to the case of Turkish-Iranian businessman Reza Zarrab, charged by an ex-U.S. attorney for evading U.S. sanctions on Iran. The gold trader was arrested in March 2016 during a trip to the United States. Cavusoglu claimed that former U.S. attorney Preet Bharara used the same indictment that alleged Gulen-linked Turkish prosecutors had filed against Zarrab in Turkey amid a sweeping corruption scandal involving leaked wiretaps and documents that shook the country in 2013. “It seems very politically motivated,” Cavusoglu said. Also indicted in the case is Mehmet Hakan Atilla, an executive of the state-owned Halkbank, currently under arrest in the U.S. and set to appear in court on Nov. 27 for violating sanctions on Iran through financial transactions amounting to hundreds of millions of dollars. The foreign minister insisted the bank did not violate any sanctions. Turkey’s former economy minister Zafer Caglayan is among the nine defendants implicated in the case. Zarrab and his lawyer have skipped several court appearances recently, leading to speculation that he may be cutting a deal with U.S. officials to avoid prosecution. Turkey is seeking Gulen’s extradition from the U.S. to try him for his alleged role in the failed coup and has been infuriated that its demand has not yet been met. Gulen has been living in self-imposed exile in Pennsylvania for nearly two decades. Turkey has arrested over 50,000 people and fired more than 100,000 from state jobs for alleged links to the cleric since the July 2016 coup attempt. ″(Gulen’s network) couldn’t succeed in the coup and they are trying in the U.S. and they are getting support from some U.S. institutions,” Cavusoglu said.
/** * The HBase143Writer implements the sink function that can be used both in bounded and unbounded stream. */ public class HBase143Writer extends HBaseWriterBase<Tuple2<Boolean, Row>> { private static final Logger LOG = LoggerFactory.getLogger(HBase143Writer.class); // rowkey field index in source input row private final int rowKeySourceIndex; // qualifier fields' indexes in source input row private final List<Integer> qualifierSourceIndexes; private final List<Tuple3<byte[], byte[], TypeInformation<?>>> qualifierList; // field serializer for HBase format, order by field index of input row private List<HBaseBytesSerializer> inputFieldSerializers; private int totalQualifiers; private String charset; public HBase143Writer( String hbaseTableName, HBaseTableSchemaV2 hbaseSchema, int rowKeySourceIndex, List<Integer> qualifierSourceIndexes, Configuration hbaseConfiguration) throws IOException { super(hbaseTableName, hbaseSchema, hbaseConfiguration); this.totalQualifiers = hbaseSchema.getFamilySchema().getTotalQualifiers(); Preconditions.checkArgument(null != qualifierSourceIndexes && totalQualifiers == qualifierSourceIndexes.size(), "qualifierSourceIndexes info should be consistent with qualifiers defined in HBaseTableSchema!"); Preconditions.checkArgument(rowKeySourceIndex > -1 && totalQualifiers > rowKeySourceIndex, "rowKeySourceIndex must > -1 and totalQualifiers number must > rowKeySourceIndex"); this.qualifierList = hbaseSchema.getFamilySchema().getFlatByteQualifiers(); this.rowKeySourceIndex = rowKeySourceIndex; this.qualifierSourceIndexes = qualifierSourceIndexes; this.charset = hbaseSchema.getFamilySchema().getStringCharset(); this.inputFieldSerializers = new ArrayList<>(); for (int index = 0; index <= totalQualifiers; index++) { if (index == rowKeySourceIndex) { inputFieldSerializers.add(new HBaseBytesSerializer(hbaseSchema.getRowKeyType(), charset)); } else { Tuple3<byte[], byte[], TypeInformation<?>> typeInfo; if (index < rowKeySourceIndex) { typeInfo = qualifierList.get(index); } else { typeInfo = qualifierList.get(index - 1); } inputFieldSerializers.add(new HBaseBytesSerializer(typeInfo.f2, charset)); } } } @Override public void invoke(Tuple2<Boolean, Row> input, Context context) throws Exception { Row row = input.f1; if (null == row) { return; } if (row.getArity() != totalQualifiers + 1) { LOG.warn("discard invalid row:{}", row); } else { byte[] rowkey = inputFieldSerializers.get(rowKeySourceIndex).toHBaseBytes(row.getField(rowKeySourceIndex)); if (input.f0) { // upsert Put put = new Put(rowkey); for (int index = 0; index <= totalQualifiers; index++) { if (index != rowKeySourceIndex) { int qualifierSrcIndex = index; if (index > rowKeySourceIndex) { qualifierSrcIndex = index - 1; } Tuple3<byte[], byte[], TypeInformation<?>> qualifierInfo = qualifierList.get(qualifierSrcIndex); int qualifierIndex = qualifierSourceIndexes.get(qualifierSrcIndex); byte[] value = inputFieldSerializers.get(index).toHBaseBytes(row.getField(qualifierIndex)); put.addColumn(qualifierInfo.f0, qualifierInfo.f1, value); } } table.put(put); } else { // delete Delete delete = new Delete(rowkey); for (int index = 0; index <= totalQualifiers; index++) { if (index != rowKeySourceIndex) { int qualifierSrcIndex = index; if (index > rowKeySourceIndex) { qualifierSrcIndex = index - 1; } Tuple3<byte[], byte[], TypeInformation<?>> typeInfo = qualifierList.get(qualifierSrcIndex); delete.addColumn(typeInfo.f0, typeInfo.f1); } } table.delete(delete); } } } @Override public String toString() { return HBase143Writer.class.getSimpleName() + "-> table:" + hTableName + " schema:{" + hTableSchema.toString() + "}"; } }
/** * @author wangzhichao * @since 2021/7/27 */ public class DirTest { public static void main(String[] args) { File dir = new File("H:\\一级目录"); transverseDir(dir,0); System.out.println("====================================="); List<File> fileList = getFilesInDir(dir); for (File file : fileList) { System.out.println(file.getAbsolutePath()); } } private static List<File> getFilesInDir(File dir) { List<File> result = new ArrayList<>(); File[] files = dir.listFiles(); for (File file : files) { if (file.isDirectory()) { result.addAll(getFilesInDir(file)); } else { result.add(file); } } return result; } private static void transverseDir(File dir, int level) { System.out.println(getIndent(level) + dir.getName()); File[] files = dir.listFiles(); level++; for (File file : files) { if (file.isDirectory()) { transverseDir(file, level); } else { System.out.println(getIndent(level) + file.getName()); } } } private static String getIndent(int level) { StringBuilder sb = new StringBuilder(); sb.append("|--"); for (int i = 0; i < level; i++) { sb.insert(0, "| "); } return sb.toString(); } }
Evidence Connects Quakes to Oil, Natural Gas Boom In the context of climate change and the environment as a whole, today’s crude oil and natural gas boom is the ultimate mixed bag. A new study from a team of researchers at Stanford and Duke universities, as well as other institutions, weighs the good with the bad of oil and gas development: Natural gas development and consumption, especially for producing electrical power, can boost local economies while reducing air pollution from coal-fired power plants and helping to wean the power grid away from sources of energy that emit huge amounts of climate-changing CO2 when burned. A USGS map showing all earthquakes in the central U.S. Magnitude 2.5 or greater occurring between Sept. 15, 2014 and Oct. 15, 2014. Oklahoma is both an oil and gas exploration and production hub and the site of earthquake swarms scientists believe have been caused by energy companies injecting large quantities of waste water deep into the ground. The earthquakes shown in southern Colorado are in the Raton Basin, another area where injection wells have been found to cause earthquakes. Credit: USGS But the list of environmental challenges fossil fuels development poses is a long one: Methane, a potent greenhouse gas, has been found leaking from oil and gas operations all over the country. Trains that carry crude oil from fields in North Dakota are prone to dangerous derailments. Fracking uses a lot of water in arid regions, and water contamination from fracking has long been a concern of people living near energy development. The Stanford study, published in August in the Annual Review of Environment and Resources, also addresses another shaky issue about fossil fuels development that comes amid a flurry of new research connecting the same dots: Oil and gas operations, including fracking, can cause earthquakes. Some of them can destroy homes and injure people. A U.S. Geological Survey study published this month found that underground injection of wastewater from a coalbed methane natural gas production field straddling the New Mexico-Colorado border has been causing earthquakes there since 2001. One of those quakes was a Magnitude 5.3 temblor that rattled southern Colorado in 2011. “We’ve seen a number of damaging earthquakes already that have been related to waste water injection,” USGS geophysicist Justin Rubinstein, the study’s lead author, told Climate Central. “We can’t rule out the possibility that there will be larger earthquakes.” He said the USGS is particularly concerned about the possibility of a major oil and gas-related earthquake striking a major urban area. In another study published in July in the journal Science, Cornell University researchers found that “seismic swarms” of earthquakes in Oklahoma since 2009 — many of which were over Magnitude 3.0 — are likely being caused by deep underground injection of oil and gas related wastewater. A Magnitude 5.7 quake induced by oil and gas wastewater injection destroyed 14 homes in 2011 in Oklahoma. Swarms of earthquakes continue to strike Oklahoma, making it one of the most seismically active areas in North America, even surpassing California in seismic activity. USGS real-time earthquake data show that between 5 a.m. and 9 a.m. Oct. 15 there were three earthquakes in northern Oklahoma ranging in strength from Magnitude 2.9 to 3.3. Over the past 30 days in Oklahoma, north Texas and southern Kansas, USGS data tally 156 earthquakes, the most severe of which were a Magnitude 4.3 near the oil and gas hub of Cushing, Okla., on Oct. 10 and a Magnitude 4.4 in Harper, Kan., on Oct. 2. By contrast, seismically active California, Nevada and Oregon experienced fewer than 140 earthquakes during the same 30-day period. Hydrualic fracturing, or fracking, during oil and gas well development has been found to cause earthquakes, too. A new study published Tuesday in the journal Seismological Research Letters shows that a swarm of 400 small earthquakes in 2013 in Ohio is linked to hydraulic fracturing, or fracking, the technique energy companies use to crack open underground rock formations to release trapped oil and gas into a well. Fracking involves the injection of millions of gallons of water, sand and chemicals at high pressures thousands of feet beneath the surface, cracking open rock formations that hold crude oil and natural gas. Regardless whether a well is fracked, a lot of waste water, or “produced water,” comes up out of the wellbore. Energy companies often dispose of that water by injecting it deep underground. All those fluids being injected can sometimes play havoc with faults, some of which may be long-dormant. An oil pump jack in Oklahoma. Credit: Public Herald/flickr “It is well known by seismologists that fluid injection into the earth, of any kind, can sometimes trigger earthquakes,” said Ohio study lead author Paul Friberg, a researcher at Instrumental Software Technologies, a firm specializing in induced earthquake analysis. Friberg co-wrote the study with a team from the Ohio Department of Natural Resources. “There are now several documented cases linking waste water injection wells to triggering earthquakes on pre-existing faults,” he said. “What is less well known is that fracking itself can also trigger earthquakes on faults, presumably through a similar mechanism.” Friberg’s study showed that the process of fracking caused micro-earthquakes in Ohio ranging from Magnitude negative 3 to Magnitude negative 1 on the moment magnitude scale, the most modern method of measuring earthquakes based on the amount of energy released during seismic events. The new scale is expressed similarly to the Richter scale, which moment-magnitude readings replaced. None of the Ohio quakes could be felt on the surface. The level of seismic activity depended on the level of fracking activity occurring at the time, likely triggering a slip in a pre-existing fault, the study says. Rob Jackson, a professor of environmental earth system science at Stanford University and the lead author of the Stanford study, said the Friberg’s work confirms that fracking can cause small earthquakes. “The tremors revealed a previously unknown fault right below the natural gas well,” he said. There have been three documented earthquakes able to be felt on the surface that were linked to fracking, Friberg said. That’s because fracking fluid is thought to lubricate faults, possibly causing them to slip, he said. But many more earthquakes have been caused by waste water injection, as many as 188 nationwide in 2011, according to the Stanford study. “Pumping wastewater deep underground is a bigger risk of causing large earthquakes than fracking,” Jackson said. “Earthquakes associated with waste water disposal have already damaged buildings and injured people in rare cases.” He said energy companies can try to prevent human-induced earthquakes by avoiding faults when they inject oil and gas wastewater into the ground and by not pumping fluids too quickly into the ground. “You can reduce the risk of large earthquakes through careful monitoring and planning,” Jackson said. “You can’t make it zero, however.” You May Also Like: Calif. Heads for Warmest Year As Drought Hangs On Pentagon: Climate Change Poses ‘Immediate Risks’ September Was Warmest on Record, NASA Data Shows Where Is El Nino? And Why Do We Care
<reponame>rkhullar/terraform-utils from typing import Dict, List, Optional, Union from pathlib import Path, PurePosixPath __all__ = ['infer_params', 'build_output', 'setup_project'] def infer_params(project_dir: Path, work_dir: Path = None, construct_var: str = 'construct', app_env_var: str = 'app_env', app_env_pos: int = 0) -> Dict[str, str]: work_dir: Path = work_dir or Path().absolute() relative_path: Path = work_dir.relative_to(project_dir) construct_parts = list(relative_path.parts) # construct_parts is empty when project_dir matches work_dir app_env = construct_parts.pop(app_env_pos) if len(construct_parts) > 0 else None construct = str(Path(*construct_parts)) if len(construct_parts) > 0 else None return {app_env_var: app_env, construct_var: construct} def build_output(data: Dict[str, str], key: str = None, component: str = None, prefix: str = '{app_name}-', suffix: str = '-{app_env}-{company}', bucket_name: str = 'terraformstate', table_name: str = 'terraformlock', state_name: str = 'terraform.tfstate', app_env_var: str = 'app_env') -> str: if key: return data.get(key) if component == 'env': return data[app_env_var] if component == 'bucket': return prefix.format(**data) + bucket_name + suffix.format(**data) if component == 'table': return prefix.format(**data) + table_name + suffix.format(**data) if component == 'object': state_path = Path(data['construct']) / state_name state_path = PurePosixPath(state_path) return str(state_path) def setup_project(path: Optional[Union[Path, str]], create: bool = True, constructs: List[str] = None, envs: List[str] = None, common_name: str = 'common', common_ext: str = 'tfvars', terragrunt_name: str = 'terragrunt', terragrunt_ext: str = 'hcl'): live_dir: Union[Path, str] = path or Path() live_dir: Path = Path(live_dir) live_dir.mkdir(exist_ok=create) envs = envs or ['sbx', 'dev', 'qa', 'uat', 'prd'] constructs = constructs or ['params', 'network', 'iam', 'app'] for env in envs: for construct in constructs: terragrunt_hcl = live_dir / env / construct / f'{terragrunt_name}.{terragrunt_ext}' terragrunt_hcl.parent.mkdir(exist_ok=True, parents=True) terragrunt_hcl.touch(exist_ok=True) terragrunt_hcl = live_dir / f'{terragrunt_name}.{terragrunt_ext}' common_tfvars = live_dir / f'{common_name}.{common_ext}' terragrunt_hcl.touch(exist_ok=True) common_tfvars.touch(exist_ok=True)
/** * Handles the command * (NewSensor {instanceName} {inputDeviceName} {indexInInputDevice}) * * @param command the command that invoked this method */ @Override protected void initialize(ConfigCommand command) { int argc = command.argc ; Object[] argv = command.argv ; if (argc != 4) { syntaxError("Incorrect number of arguments to " + command.commandName) ; } if (!isName(argv[2])) { syntaxError("The second argument to " + command.commandName + " must be the device name") ; } if (!(argv[3] instanceof Double)) { syntaxError("The third argument to " + command.commandName + " must be a sensor index") ; } sensorIndex = ((Double)argv[3]).intValue() ; configDevice = (ConfigDevice)configContainer.findConfigObject ("Device", (String)argv[2]) ; }
/** * @author Myrle Krantz */ public class BalanceSegmentSetTest extends ValidationTest<BalanceSegmentSet> { public BalanceSegmentSetTest(ValidationTestCase<BalanceSegmentSet> testCase) { super(testCase); } @Override protected BalanceSegmentSet createValidTestSubject() { final BalanceSegmentSet ret = new BalanceSegmentSet(); ret.setIdentifier("valid"); ret.setSegments(Arrays.asList(BigDecimal.ZERO, BigDecimal.valueOf(100), BigDecimal.valueOf(10_000))); ret.setSegmentIdentifiers(Arrays.asList("small", "medium", "large")); return ret; } @Parameterized.Parameters public static Collection testCases() { final Collection<ValidationTestCase> ret = new ArrayList<>(); ret.add(new ValidationTestCase<BalanceSegmentSet>("basicCase") .adjustment(x -> {}) .valid(true)); ret.add(new ValidationTestCase<BalanceSegmentSet>("null segments") .adjustment(x -> x.setSegments(null)) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("null identifiers") .adjustment(x -> x.setSegmentIdentifiers(null)) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("too short identifier list") .adjustment(x -> x.setSegmentIdentifiers(Arrays.asList("small", "large"))) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("too short segment list") .adjustment(x -> x.setSegments(Arrays.asList(BigDecimal.ZERO, BigDecimal.valueOf(100)))) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("non-zero first entry") .adjustment(x -> x.setSegments(Arrays.asList(BigDecimal.ONE, BigDecimal.valueOf(100), BigDecimal.valueOf(10_000)))) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("mis-ordered segmentation") .adjustment(x -> x.setSegments(Arrays.asList(BigDecimal.ZERO, BigDecimal.valueOf(10_000), BigDecimal.valueOf(100)))) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("invalid identifier") .adjustment(x -> x.setIdentifier("//")) .valid(false)); ret.add(new ValidationTestCase<BalanceSegmentSet>("invalid segment identifier") .adjustment(x -> x.setSegmentIdentifiers(Arrays.asList("small", "large", "//"))) .valid(false)); return ret; } }
import os from 'os'; import { ClientIdentification } from './proto/control_pb'; import { Metadata, credentials as cred, ChannelCredentials } from 'grpc'; import { Platform } from './Platform'; import { EventBus } from './EventBus'; import { QueryBus } from './QueryBus'; import { CommandBus } from './CommandBus'; function createClientIdentification(id: string, name: string) { const clientIdentification = new ClientIdentification(); clientIdentification.setClientId(id); clientIdentification.setComponentName(name); return clientIdentification; } function createMeta(token?: string) { const meta = new Metadata(); if (token) { meta.set('AxonIQ-Access-Token', token); } return meta; } export interface ClientOptions { host: string; port?: number; certificate?: Buffer; token?: string; clientId?: string; componentName: string; } export class AxonClient { private platform: Platform; private credentials: ChannelCredentials; private meta: Metadata; private clientIdentification: ClientIdentification; readonly endpoint: string | null = null; readonly eventBus: EventBus = null!; readonly queryBus: QueryBus = null!; readonly commandBus: CommandBus = null!; constructor({ clientId = process.pid + '@' + os.hostname(), componentName, host, certificate, port = 8124, token, }: ClientOptions) { const endpoint = `${host}:${port}`; const clientIdentification = createClientIdentification( clientId, componentName ); const meta = createMeta(token); const credentials = certificate ? cred.createSsl(certificate) : cred.createInsecure(); this.credentials = credentials; this.meta = meta; this.clientIdentification = clientIdentification; this.platform = new Platform({ endpoint, meta, credentials, clientIdentification, }); } async connect() { // @ts-ignore this.endpoint = await this.platform.getPlatformServer(); // @ts-ignore this.eventBus = new EventBus({ credentials: this.credentials, endpoint: this.endpoint, meta: this.meta, }); // @ts-ignore this.queryBus = new QueryBus({ credentials: this.credentials, endpoint: this.endpoint, meta: this.meta, clientIdentification: this.clientIdentification, }); // @ts-ignore this.commandBus = new CommandBus({ credentials: this.credentials, endpoint: this.endpoint, meta: this.meta, clientIdentification: this.clientIdentification, }); return this; } disconnect() { this.eventBus?.close(); this.queryBus?.close(); this.commandBus?.close(); this.platform.close(); } }
{-# LANGUAGE DeriveDataTypeable #-} -------- Declarations ------------------------------------------------------ module HsDeclStruct where import SrcLoc1 import HsIdent import HsGuardsStruct import HsAssocStruct import Data.Generics -- DI -- i identifiers -- e expression recursion type -- p pattern recursion type -- ds declaration recursion type -- t type recursion type -- c context recursion type -- tp type pattern recursion type -- This type seems to be full of awkward inconsistencies... /TH data DI i e p ds t c tp = HsTypeDecl SrcLoc tp t | HsNewTypeDecl SrcLoc c tp (HsConDeclI i t c) {-deriving-} [i] | HsDataDecl SrcLoc c tp [HsConDeclI i t c] {-deriving-} [i] | HsClassDecl SrcLoc c tp (HsFunDeps i) {-where-} ds | HsInstDecl SrcLoc (Maybe i) c t {-where-} ds -- optionally named | HsDefaultDecl SrcLoc [t] | HsTypeSig SrcLoc [i] c t | HsFunBind SrcLoc [HsMatchI i e p ds] | HsPatBind SrcLoc p (HsRhs e) {-where-} ds | HsInfixDecl SrcLoc HsFixity [HsIdentI i] -- Haskell 98 -- Hugs compatibility: | HsPrimitiveTypeDecl SrcLoc c tp -- data T a1 ... an; | HsPrimitiveBind SrcLoc i t -- primitive f :: t deriving (Ord,Read, Eq, Show, Data, Typeable) data HsMatchI i e p ds = HsMatch SrcLoc i [p] (HsRhs e) {-where-} ds deriving (Ord,Read, Eq, Show, Data, Typeable) data HsConDeclI i t c = HsConDecl SrcLoc [i] c i [HsBangType t] | HsRecDecl SrcLoc [i] c i [([i], HsBangType t)] deriving (Ord,Read, Eq, Show, Data, Typeable) data HsBangType t = HsBangedType t | HsUnBangedType t deriving (Ord,Read, Eq, Show, Data, Typeable) type HsFunDeps v = [HsFunDep v] -- ..., a b-> c d,f->g h, ->i, ... type HsFunDep v = ([v],[v]) -- a b-> c d
<reponame>aosaimy/conllu-dao export declare class Util { static repairFields: (fields: any, logger: any) => void; static strictFieldSplitter: (line: any) => any; static looseFieldSplitter: (line: any) => any; static selectParsingMode: (conll: any, log: any) => boolean; static selectFieldSplitter: (conll: any, log: any, strict: any) => (line: any) => any; static isComment: (line: any) => boolean; static hasSpace: (s: any) => boolean; static nullLogger: (message: any) => null; static isRtl: (s: any) => boolean; static rtlFix: (s: any) => any; static deepCopy: (o: any) => any; static isTatweel(first: any, second: any): boolean; static featureRegex: RegExp; static featureValueRegex: RegExp; static dependencyRegex: RegExp; static errors: string[]; static reportError(error: any): void; }
/** * Performs insertion sort on the input array. Algorithm modified from * https://www.geeksforgeeks.org/insertion-sort/. * @param arr The array to be sorted. */ private void insertionSort(int[] arr) { for (int i = 1; i < arr.length; i++) { int current = arr[i]; int j = i - 1; while (j >= 0 && current < arr[j]) { arr[j+1] = arr[j]; j--; } arr[j+1] = current; } }
Biomarkers in Vestibular Schwannoma–Associated Hearing Loss Vestibular schwannomas (VSs) are benign tumors composed of differentiated neoplastic Schwann cells. They can be classified into two groups: sporadic VS and those associated with neurofibromatosis type 2 (NF2). VSs usually grow slowly, initially causing unilateral sensorineural hearing loss (HL) and tinnitus. These tumors cause HL both due to compression of the auditory nerve or the labyrinthine artery and due to the secretion of different substances potentially toxic to the inner ear or the cochlear nerve. As more and more patients are diagnosed and need to be managed, we are more than ever in need of searching for biomarkers associated with these tumors. Owing to an unknown toxic substance generated by the tumor, HL in VS may be linked to a high protein amount of perilymph. Previous studies have identified perilymph proteins correlated with tumor-associated HL, including μ-Crystallin (CRYM), low density lipoprotein receptor-related protein 2 (LRP2), immunoglobulin (Ig) γ-4 chain C region, Ig κ-chain C region, complement C3, and immunoglobulin heavy constant γ 3. Besides, the presence of specific subtypes of heat shock protein 70 has been suggested to be associated with preservation of residual hearing. It has been recently demonstrated that chemokine receptor-4 (CXCR4) is overexpressed in sporadic VS as well as in NF2 tumors and that hearing disability and CXCR4 expression may be correlated. Further, the genetic profile of VS and its relationship with poor hearing has also been studied, including DNA methylation, deregulated genes, growth factors, and NF2 gene mutations. The knowledge of biomarkers associated with VS would be of significant value to maximize outcomes of hearing preservation in these patients. INTRODUCTION Vestibular schwannomas (VSs), previously termed acoustic neuromas, are non-malignant tumors composed of Schwann cells of the vestibulocochlear nerve (VIII cranial nerve), arising from either the internal auditory canal (IAC) or the cerebellopontine angle (CPA). They can be classified into two groups: sporadic VS and those associated with neurofibromatosis type 2 (NF2) (1). Most VSs are sporadic (90%), with a combined lifetime risk of 1:1,000 for developing a unilateral tumor (1,2). Nowadays, a higher rate of VS diagnosis has been described, due to incidental findings from magnetic resonance imaging (MRI) performed due to unrelated complaints (3). The mortality rate of VS ranges from 0.2 to 1% (4). Due to their anatomic position, patients suffer from progressive hearing loss (90%) and tinnitus (>60%), with facial numbness (12%) and facial paralysis (6%) occurring mainly among patients with larger lesions. Balance dysfunction is often present, although <20% of patients manifest with vertigo symptoms (1,2). VSs usually grow slowly, leading to unilateral sensorineural hearing loss (SNHL) and tinnitus, theoretically caused by compression of the auditory nerve (3) or by spasm or occlusion of the labyrinthine artery (5). Sudden SNHL may also be the clinical presentation in up to 22% of the cases (6). These tumors may cause hearing loss also due to the secretion of substances potentially toxic to the inner ear or the cochlear nerve (1,7,8). Diagnosis of VS may be either from cranial MRI performed for unrelated complaints or, usually, due to unilateral hearing loss or tinnitus (1,9). Audiological tests including audiometry and auditory brainstem response are not reliable predictors of CPA pathology (10). As most patients with CPA tumors have a comparable set of symptoms and audiometric results, the diagnosis relies mainly on imaging. The gold standard for the diagnosis of VS is MRI with gadolinium (1). In the last years, improvements in technology and a higher accessibility to MRI have increased the number of diagnosed VSs (11). Approximately three of each of the four VSs exhibit no growth, leading to an observation strategy (wait and scan). Mean growth is about 2-4 mm/year in growing tumors (12). The hearing status, the growth rate, the subject's age, and the surgeon's experience are the main factors when deciding between surgery (destructive or conservative) and gamma knife therapy (20). (13). In recent years, gamma knife radiosurgery is becoming a more popular choice for subjects with growing small or medium tumors and useful hearing, while patients with large-size tumors usually undergo surgery (14). As more and more patients are diagnosed and need to be managed, we are more than ever in need of searching for biomarkers associated with these tumors, in order to help with the choice of selecting between a "wait-and-scan" approach and surgery (15) aimed at reducing morbidity and increasing the hearing outcomes (4). BIOLOGICAL MARKERS Due to the lack of biopsy sampling without the destruction of the organ, little is known about the cellular and molecular correlates in inner ear pathology (16). In recent years, considerable progress in proteomics has been enabled by modern technology. By a shotgun proteomics approach, the identification of proteins with high sensitivity is enabled (17). Mass spectrometric (MS) analyses have been successfully used for auditory proteomics and require a more concerted effort for biomarker identification (18). In addition, sophisticated methods for perilymph sampling (19) have revolutionized the field of otology, offering precious biofluid samples for analysis. Perilymph Proteome Perilymph, an extracellular fluid of the inner ear, is found within the scala tympani and vestibuli of the cochlea. During an apoptotic or necrotic episode inside the inner ear, the proteins that are secreted can be found at high concentrations in this fluid (20). The knowledge of the perilymph proteome may shed some light on the mechanisms of tumor-associated hearing loss, which are mostly unknown to date (15). Stankovic et al. speculated that because of an unknown toxic substance generated by the VS, hearing loss in VS could be linked to a high protein concentration in the perilymph (7). Perilymph Proteins Related to Hearing Loss In 2011, Lysaght et al. identified 15 proteins from perilymph specimens (selected by comparing VS and cochlear implant samples) with differential expression and biological function. They suggested the use of this list in future research focused on distinguishing between better vs. worse hearing in patients with VS (see Table 1) (20). µ-Crystallin (CRYM) or nicotinamide adenine dinucleotide phosphate (NADPH)-regulated thyroid hormone binding protein is located within the cytoplasm, where it promotes transcription of the thyroid hormone triiodothyronine (T3) (20,21). CRYM gene mutations cause autosomal-dominant hearing loss due to changes in the intracellular localization and the inability to bind to T3, which may lead to an altered K+ recycling (20,22). Low density lipoprotein receptor-related protein 2 (LRP2) or megalin is a trans-membrane receptor protein, which can be found in certain epithelial cells such as those of the ear. LRP2 has the ability to bind several ligands, being essential in the process of endocytosis of different elements such as sterols, lipoproteins, hormones, and vitamin binding proteins. Two wellknown conditions, Donnai-Barrow and facio-oculo-acousticorenal (FOAR) syndromes (23), both associated with SNHL, are the result of mutations in the LRP2 gene (20). On the other hand, of the 91 commonly identified perilymph proteins of patients with VS on an individual level, Rasmussen et al. described four proteins that were significantly associated with tumor-related deafness: Immunoglobulin (Ig) γ-4 chain C region, Ig κ chain C region, complement C3, and immunoglobulin heavy constant γ 3. These 91 proteins were identified in 12 out of 15 samples they used in the study (15), which was confirmed by analogy with data from previous MS research on perilymph (20,24). Moreover, alpha-2-HS-glycoprotein, a suggested inflammatory and immunological intermediary in perilymph, was suggested to be associated with deafness in patients with SVs. It was also discovered in samples from VS patients in 2017 (24), and although its concentration was not directly linked to the hearing outcomes, the authors attempted to further investigate this potential association. Rasmussen et al. hypothesized that VS may excrete alpha-2-HS-glycoprotein to the perilymph, where its inflammatory activity may lead to SNHL. Factors elicited from the VS may also affect the inner ear, inducing an upregulation of alpha-2-HS-glycoprotein within the perilymph (15). Heat Shock Proteins and Hearing Loss Heat shock proteins (HSPs) are stress proteins, which mediate cell survival under critical environmental conditions (25). Increased perilymph levels of 10 different subgroups of HSP were detected in subjects undergoing cochlear implantation that preserved hearing when compared with those without hearing preservation, and cochlear transcriptome data suggest that there is a baseline protective expression of HSP70 1A, 1B, 2, 4, 5, 6, 8, 9, and 12A mRNA (16). HSP90 is the most important chaperone for cellular stress. It is involved in pathological processes, such as cancer development (26), and its increased expression as a stress responsive biomarker is present in multiple types of tissue inflammation (27). Recently, Schmitt et al. found that HSP90 was determined in the perilymph of half of the patients (n = 18) experiencing complete loss of residual hearing loss after cochlear implantation, whereas only one of the patients with preserved residual hearing showed HSP90 in perilymph. The upregulation of HSP90 in the perilymph may therefore induce the migration of macrophages and leukocytes, resulting in cochlear inflammation. However, despite the cellular changes observed, the authors could not detect a significant difference in HSP90 expression in patients with VS compared with patients without tumor (16). On the other hand, HSP70 has been identified as an otoprotective agent and protects hair cells from stress-induced apoptosis (28). Interestingly, the presence of some subtypes of HSP70 seemed to correlate with preservation of residual hearing in cochlear implantation (16). It has been associated with an increase in the cell proliferation rate (29) and, according to Schmitt et al., could take part in the development of VS, despite the authors not finding any correlation between HSP70 expression and VS when comparing with subjects without tumor (16). One explanation could be the low proliferation rate of these tumors; by contrast, medulloblastomas, fast-proliferating intracranial tumors with poor prognosis, showed an increased expression of HSP70 (29). According to these findings, more data on the regulation of these proteins and perilymph proteomics are mandatory to demonstrate the role of these HSPs in patients with VS and hearing loss. Increased Concentration of Perilymphatic Proteins and MRI Findings Increased signal intensity of the fluid on three-dimensional fluid-attenuated inversion recovery (3D-FLAIR) MRI has been reported in various diseases, including SNHL, and VS (30)(31)(32)(33). An increased concentration of proteins in the perilymphatic space has been proposed to explain the enhanced cochlear signal on FLAIR images in subjects with VS (34-37). Kim et al. demonstrated a correlation between a higher cochlear signal on 3D FLAIR images and hearing loss in patients with VS (38). The correlation was stronger in intrameatal tumors when compared to all subjects, and no correlation was found when considering only CPA tumors. Interestingly, the cochlear signal intensity on MRI was significantly higher in tumors confined to the IAC. Endolymphatic Hydrops Ipsilateral inner ear alterations, including endolymphatic hydrops (EH) and acidophilic-staining precipitate, have been observed in temporal bone histopathological studies from patients with VS (5,39). In the past years, intratympanic gadolinium injection has arisen as a new tool in the diagnosis of EH (40). Recently, delayed intravenous gadolinium-enhanced high-resolution MRI of the inner ear has been shown to provide resolution adequate for accurate detection of EH (41,42). In addition, heavily weighted T2 sequences are useful to evaluate the cochlear fluids in patients with VS. In patients with a tumor entirely blocking the IAC, the volume of the vestibular endolymphatic space can be determined with great certainty. Venkatasamy et al. described a difference in the perilymphatic signal on a T2-weighted steady state free precession acquisition at 3T, providing a new tool for differentiating schwannomas and meningiomas (43). In patients with VS, a correlation between the endolymphatic space volume and the level of hearing loss has been described. Eliezer et al., using a 3D non-contrast T2 heavily weighted sequence at 3T, showed that the utricle volume was correlated with the patient's hearing loss in a series of 23 VSs. As most subjects with VS benefit from a wait-andscan strategy, based on these MRI results, they suggest that a treatment leading to decreased EH could be administrated to achieve better hearing outcomes (44). In a recent paper (45), a saccular dilatation on the ipsilateral side was demonstrated in 30% of VSs (53 out of 183 patients with typical VS). In this study, a 3D non-contrast high-resolution T2-weighted sequence was used. Cytokines and Hearing Loss A large number of cytokines are produced by tumors (46) including VSs. To maintain the homeostasis in the cochlea, the cytokine balance is of vital importance (47). Like other substances, cytokines have been suggested to play a role in the labyrinth of degenerative changes (5). However, few studies have examined the role of these proteins in VS (48,49). C-X-C Motif Chemokine Ligand 12 (CXCL12), a ligand for CXCR4, cooperates with metastatic cells to CXCL12-expressing organs. The Ras/Raf/MEK and the PI3K/Akt/mTOR pathways can be activated by CXCL12 binding to the CXCR4 receptor. In a similar way, the loss of Merlin (52, 53), a tumor suppressor protein encoded by the NF2 gene in VS (54), leads to activation of these two primary pathways. Recently, Breun et al. have described that CXCR4 could play a role in the pathogenesis of both sporadic and NF2-associated VS. In their study CXCR4 was overexpressed in these tumors, with no significant differences found between the two groups. CXCR4 mRNA expression increased with the degree of hearing loss when compared with the control group, with the results lacking statistical difference (53). Although tumor extension may be related to hearing impairment in VS (55), there is usually a discrepancy between tumor size and hearing disability (9). A reason why hearing disability is not always correlated with tumor dimensions could be an invasive growth pattern caused by CXCR4 overexpression in certain tumors. Indeed, Breun et al. detected no correlation between CXCR4 expression and tumor extension; therefore, this chemokine receptor may be significant for tumor invasiveness, as exhibited by hearing disability (53). DNA Methylation Epigenetic alterations are found across many solid cancers, and although most efforts in VS are limited to the controversial DNA methylation of the NF2 gene, other changes have shown to play an important role in VS. Lassaletta et al. investigated the methylation status of 16 genes in 22 sporadic VSs and related it to clinical and radiological findings (57), the connection observed between TP73 aberrant methylation and deafness being important . A genome-wide methylation analysis in VS also showed a trend toward hypomethylation in several miRNAs and coding genes, including alternative transcripts, opening a window to possible therapeutic targets (64). Deregulated Genes In a study searching for associations between the molecular basis of VS and hearing loss (7), surgical specimens of these tumors from 13 patients were classified into two groups based on gene expression, one with good hearing (word recognition >70% and PTA ≤ 30 dB) and another with poor hearing. PEX5L, RAD54B, the prostate-specific membrane antigen-like gene, and PSMAL had low expression in VS patients with bad hearing outcomes. Besides, the CEA-CAM7 gene and Carcinoembryonic Antigen (CEA) protein were overexpressed in VS patients with poor hearing (8). Growth Factors The development of VS has also been associated with abnormal expression of growth factors. In a study of tumor samples from 11 subjects with VS, Lassaletta et al. described an inverse correlation between the expression of platelet-derived growth factor A and deafness (65). On the other hand, vascular endothelial growth factors (VEGFs) have been associated with the hearing status of patients with VSs (66,67). Most VSs express VEGF, and it has been suggested that this growth factor may play a role in both tumor growth and hearing status (68,69). Bevacizumab, a VEGF neutralizing antibody, was used by Plotkin et al. to treat patients with VS, an increase in hearing reported in four out of seven subjects treated with this drug (67). In recent years, bevacizumab has been reported to increase speech understanding and hearing quality in several NF2 patients (68,70). NF2 Gene Mutation NF2 gene mutations have been associated with the hearing level of patients with VS. In the study of Lassaletta et al., 51 cases undergoing surgery for VS were analyzed. Patients with NF2 gene mutations presented lower PTA thresholds compared with nonmutated cases (71). Selvanathan et a1. analyzed the impact of age of onset on the existence of several NF2-related symptoms, including hearing impairment or tinnitus, and found that there was a significantly younger age of onset of symptoms in patients with nonsense or frameshift mutation (i.e., mutations that produce protein truncation). They hypothesized that a younger age of onset of VS could explain the younger age of onset of hearing loss (and tinnitus) (72). Halliday et al. proposed a genetic severity score (1, tissue mosaic; 2A, mild classic; 2B, moderate classic; and 3, severe) in order to predict morbidity for NF2 subjects in certain dimensions including hearing status (73). According to Emmanouil et al., if subjects were stratified according to genetic severity, it could help to obtain a better prognostication of the hearing decline. In their study, they described a significant difference in terms of hearing decline according to the genetic severity: the median age for subjects rated as "severe" was 32 years, compared to a median of 80 years for patients classified as "tissue mosaic" (74). CONCLUSION So far, no reliable methods are able to predict the evolution of hearing loss in subjects with VS. Several markers such as perilymph proteins have been associated with tumor hearing loss. Also, specific subtypes of HSP70 have been correlated with hearing outcomes. Cytokines produced by VS, especially CXCR4 expression, have been related to hearing impairment. DNA methylation, deregulated genes, growth factors, and NF2 gene mutation have also been related to hearing loss in subjects with VS. Most of these potential markers of hearing loss are not routinely available for the clinician. On the other hand, recent findings on imaging, especially delayed intravenous gadoliniumenhanced high-resolution MRI, and 3D non-contrast heavily T2-weighted sequences are promising in terms of therapeutic management of patients with VS showing signs of EH. The precise knowledge of biomarkers associated with hearing loss in patients with VS would be useful to minimize morbidity and to maximize outcomes of hearing in these patients. AUTHOR CONTRIBUTIONS All authors listed have made a substantial, direct and intellectual contribution to the work, and approved it for publication.
/** * Prueba para eliminar un Contrato. * * @throws co.edu.uniandes.csw.vivienda.exceptions.BusinessLogicException */ @Test public void addServicioAdicionalTest() throws BusinessLogicException { ContratoEntity entity = dataContrato.get(0); ViviendaEntity vivienda = dataVivienda.get(0); ServicioAdicionalEntity servicio = data.get(0); contratoServiciosLogic.addServicioAdicional(entity.getId(), servicio.getId(), vivienda.getId()); Assert.assertEquals(0,contratoServiciosLogic.getServiciosAdicionales(entity.getId()).size()); Assert.assertNull(contratoServiciosLogic.getServicioAdicional(entity.getId(), servicio.getId(), vivienda.getId())); contratoServiciosLogic.replaceServiciosAdicionales(entity.getId(), data); contratoServiciosLogic.removeServicioAdicional(entity.getId(), servicio.getId(), vivienda.getId()); Assert.assertNull(contratoServiciosLogic.getServicioAdicional(entity.getId(), servicio.getId(), vivienda.getId())); }