content
stringlengths
10
4.9M
<filename>update_engine/common/utils_unittest.cc // // Copyright (C) 2012 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "update_engine/common/utils.h" #include <errno.h> #include <stdint.h> #include <sys/stat.h> #include <sys/types.h> #include <map> #include <string> #include <vector> #include <base/files/file_path.h> #include <base/files/file_util.h> #include <base/files/scoped_temp_dir.h> #include <base/strings/string_util.h> #include <base/strings/stringprintf.h> #include <brillo/message_loops/fake_message_loop.h> #include <brillo/message_loops/message_loop_utils.h> #include <gtest/gtest.h> #include "update_engine/common/test_utils.h" using brillo::FakeMessageLoop; using std::map; using std::string; using std::vector; namespace chromeos_update_engine { class UtilsTest : public ::testing::Test { }; TEST(UtilsTest, CanParseECVersion) { // Should be able to parse and valid key value line. EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345")); EXPECT_EQ("123456", utils::ParseECVersion( "b=1231a fw_version=123456 a=fasd2")); EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345")); EXPECT_EQ("00VFA616", utils::ParseECVersion( "vendor=\"sam\" fw_version=\"00VFA616\"")); // For invalid entries, should return the empty string. EXPECT_EQ("", utils::ParseECVersion("b=1231a fw_version a=fasd2")); } TEST(UtilsTest, ReadFileFailure) { brillo::Blob empty; EXPECT_FALSE(utils::ReadFile("/this/doesn't/exist", &empty)); } TEST(UtilsTest, ReadFileChunk) { base::FilePath file; EXPECT_TRUE(base::CreateTemporaryFile(&file)); ScopedPathUnlinker unlinker(file.value()); brillo::Blob data; const size_t kSize = 1024 * 1024; for (size_t i = 0; i < kSize; i++) { data.push_back(i % 255); } EXPECT_TRUE(utils::WriteFile(file.value().c_str(), data.data(), data.size())); brillo::Blob in_data; EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), kSize, 10, &in_data)); EXPECT_TRUE(in_data.empty()); EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 0, -1, &in_data)); EXPECT_TRUE(data == in_data); in_data.clear(); EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 10, 20, &in_data)); EXPECT_TRUE(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20) == in_data); } TEST(UtilsTest, ErrnoNumberAsStringTest) { EXPECT_EQ("No such file or directory", utils::ErrnoNumberAsString(ENOENT)); } TEST(UtilsTest, IsSymlinkTest) { string temp_dir; EXPECT_TRUE(utils::MakeTempDirectory("symlink-test.XXXXXX", &temp_dir)); string temp_file = temp_dir + "/temp-file"; EXPECT_TRUE(utils::WriteFile(temp_file.c_str(), "", 0)); string temp_symlink = temp_dir + "/temp-symlink"; EXPECT_EQ(0, symlink(temp_file.c_str(), temp_symlink.c_str())); EXPECT_FALSE(utils::IsSymlink(temp_dir.c_str())); EXPECT_FALSE(utils::IsSymlink(temp_file.c_str())); EXPECT_TRUE(utils::IsSymlink(temp_symlink.c_str())); EXPECT_FALSE(utils::IsSymlink("/non/existent/path")); EXPECT_TRUE(base::DeleteFile(base::FilePath(temp_dir), true)); } TEST(UtilsTest, SplitPartitionNameTest) { string disk; int part_num; EXPECT_TRUE(utils::SplitPartitionName("/dev/sda3", &disk, &part_num)); EXPECT_EQ("/dev/sda", disk); EXPECT_EQ(3, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/sdp1234", &disk, &part_num)); EXPECT_EQ("/dev/sdp", disk); EXPECT_EQ(1234, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/mmcblk0p3", &disk, &part_num)); EXPECT_EQ("/dev/mmcblk0", disk); EXPECT_EQ(3, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/ubiblock3_2", &disk, &part_num)); EXPECT_EQ("/dev/ubiblock", disk); EXPECT_EQ(3, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10", &disk, &part_num)); EXPECT_EQ("/dev/loop", disk); EXPECT_EQ(10, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/loop28p11", &disk, &part_num)); EXPECT_EQ("/dev/loop28", disk); EXPECT_EQ(11, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10_0", &disk, &part_num)); EXPECT_EQ("/dev/loop", disk); EXPECT_EQ(10, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/loop28p11_0", &disk, &part_num)); EXPECT_EQ("/dev/loop28", disk); EXPECT_EQ(11, part_num); EXPECT_FALSE(utils::SplitPartitionName("/dev/mmcblk0p", &disk, &part_num)); EXPECT_FALSE(utils::SplitPartitionName("/dev/sda", &disk, &part_num)); EXPECT_FALSE(utils::SplitPartitionName("/dev/foo/bar", &disk, &part_num)); EXPECT_FALSE(utils::SplitPartitionName("/", &disk, &part_num)); EXPECT_FALSE(utils::SplitPartitionName("", &disk, &part_num)); } TEST(UtilsTest, MakePartitionNameTest) { EXPECT_EQ("/dev/sda4", utils::MakePartitionName("/dev/sda", 4)); EXPECT_EQ("/dev/sda123", utils::MakePartitionName("/dev/sda", 123)); EXPECT_EQ("/dev/mmcblk2", utils::MakePartitionName("/dev/mmcblk", 2)); EXPECT_EQ("/dev/mmcblk0p2", utils::MakePartitionName("/dev/mmcblk0", 2)); EXPECT_EQ("/dev/loop8", utils::MakePartitionName("/dev/loop", 8)); EXPECT_EQ("/dev/loop12p2", utils::MakePartitionName("/dev/loop12", 2)); EXPECT_EQ("/dev/ubi5_0", utils::MakePartitionName("/dev/ubiblock", 5)); EXPECT_EQ("/dev/mtd4", utils::MakePartitionName("/dev/ubiblock", 4)); EXPECT_EQ("/dev/ubi3_0", utils::MakePartitionName("/dev/ubiblock", 3)); EXPECT_EQ("/dev/mtd2", utils::MakePartitionName("/dev/ubiblock", 2)); EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionName("/dev/ubiblock", 1)); } TEST(UtilsTest, MakePartitionNameForMountTest) { EXPECT_EQ("/dev/sda4", utils::MakePartitionNameForMount("/dev/sda4")); EXPECT_EQ("/dev/sda123", utils::MakePartitionNameForMount("/dev/sda123")); EXPECT_EQ("/dev/mmcblk2", utils::MakePartitionNameForMount("/dev/mmcblk2")); EXPECT_EQ("/dev/mmcblk0p2", utils::MakePartitionNameForMount("/dev/mmcblk0p2")); EXPECT_EQ("/dev/loop0", utils::MakePartitionNameForMount("/dev/loop0")); EXPECT_EQ("/dev/loop8", utils::MakePartitionNameForMount("/dev/loop8")); EXPECT_EQ("/dev/loop12p2", utils::MakePartitionNameForMount("/dev/loop12p2")); EXPECT_EQ("/dev/ubiblock5_0", utils::MakePartitionNameForMount("/dev/ubiblock5_0")); EXPECT_EQ("/dev/mtd4", utils::MakePartitionNameForMount("/dev/ubi4_0")); EXPECT_EQ("/dev/ubiblock3_0", utils::MakePartitionNameForMount("/dev/ubiblock3")); EXPECT_EQ("/dev/mtd2", utils::MakePartitionNameForMount("/dev/ubi2")); EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionNameForMount("/dev/ubiblock1")); } TEST(UtilsTest, FuzzIntTest) { static const uint32_t kRanges[] = { 0, 1, 2, 20 }; for (uint32_t range : kRanges) { const int kValue = 50; for (int tries = 0; tries < 100; ++tries) { uint32_t value = utils::FuzzInt(kValue, range); EXPECT_GE(value, kValue - range / 2); EXPECT_LE(value, kValue + range - range / 2); } } } TEST(UtilsTest, RunAsRootGetFilesystemSizeTest) { string img; EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &img, nullptr)); ScopedPathUnlinker img_unlinker(img); test_utils::CreateExtImageAtPath(img, nullptr); // Extend the "partition" holding the file system from 10MiB to 20MiB. EXPECT_EQ(0, test_utils::System(base::StringPrintf( "dd if=/dev/zero of=%s seek=20971519 bs=1 count=1 status=none", img.c_str()))); EXPECT_EQ(20 * 1024 * 1024, utils::FileSize(img)); int block_count = 0; int block_size = 0; EXPECT_TRUE(utils::GetFilesystemSize(img, &block_count, &block_size)); EXPECT_EQ(4096, block_size); EXPECT_EQ(10 * 1024 * 1024 / 4096, block_count); } // Squashfs example filesystem, generated with: // echo hola>hola // mksquashfs hola hola.sqfs -noappend -nopad // hexdump hola.sqfs -e '16/1 "%02x, " "\n"' const uint8_t kSquashfsFile[] = { 0x68, 0x73, 0x71, 0x73, 0x02, 0x00, 0x00, 0x00, // magic, inodes 0x3e, 0x49, 0x61, 0x54, 0x00, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x11, 0x00, 0xc0, 0x00, 0x02, 0x00, 0x04, 0x00, 0x00, 0x00, // flags, noids, major, minor 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // root_inode 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // bytes_used 0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x6f, 0x6c, 0x61, 0x0a, 0x2c, 0x00, 0x78, 0xda, 0x63, 0x62, 0x58, 0xc2, 0xc8, 0xc0, 0xc0, 0xc8, 0xd0, 0x6b, 0x91, 0x18, 0x02, 0x64, 0xa0, 0x00, 0x56, 0x06, 0x90, 0xcc, 0x7f, 0xb0, 0xbc, 0x9d, 0x67, 0x62, 0x08, 0x13, 0x54, 0x1c, 0x44, 0x4b, 0x03, 0x31, 0x33, 0x10, 0x03, 0x00, 0xb5, 0x87, 0x04, 0x89, 0x16, 0x00, 0x78, 0xda, 0x63, 0x60, 0x80, 0x00, 0x46, 0x28, 0xcd, 0xc4, 0xc0, 0xcc, 0x90, 0x91, 0x9f, 0x93, 0x08, 0x00, 0x04, 0x70, 0x01, 0xab, 0x10, 0x80, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xab, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x78, 0xda, 0x63, 0x60, 0x80, 0x00, 0x05, 0x28, 0x0d, 0x00, 0x01, 0x10, 0x00, 0x21, 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x99, 0xcd, 0x02, 0x00, 0x88, 0x13, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; TEST(UtilsTest, GetSquashfs4Size) { uint8_t buffer[sizeof(kSquashfsFile)]; memcpy(buffer, kSquashfsFile, sizeof(kSquashfsFile)); int block_count = -1; int block_size = -1; // Not enough bytes passed. EXPECT_FALSE(utils::GetSquashfs4Size(buffer, 10, nullptr, nullptr)); // The whole file system is passed, which is enough for parsing. EXPECT_TRUE(utils::GetSquashfs4Size(buffer, sizeof(kSquashfsFile), &block_count, &block_size)); EXPECT_EQ(4096, block_size); EXPECT_EQ(1, block_count); // Modify the major version to 5. uint16_t* s_major = reinterpret_cast<uint16_t*>(buffer + 0x1c); *s_major = 5; EXPECT_FALSE(utils::GetSquashfs4Size(buffer, 10, nullptr, nullptr)); memcpy(buffer, kSquashfsFile, sizeof(kSquashfsFile)); // Modify the bytes_used to have 6 blocks. int64_t* bytes_used = reinterpret_cast<int64_t*>(buffer + 0x28); *bytes_used = 4096 * 5 + 1; // 6 "blocks". EXPECT_TRUE(utils::GetSquashfs4Size(buffer, sizeof(kSquashfsFile), &block_count, &block_size)); EXPECT_EQ(4096, block_size); EXPECT_EQ(6, block_count); } namespace { void GetFileFormatTester(const string& expected, const vector<uint8_t>& contents) { test_utils::ScopedTempFile file; ASSERT_TRUE(utils::WriteFile(file.path().c_str(), reinterpret_cast<const char*>(contents.data()), contents.size())); EXPECT_EQ(expected, utils::GetFileFormat(file.path())); } } // namespace TEST(UtilsTest, GetFileFormatTest) { EXPECT_EQ("File not found.", utils::GetFileFormat("/path/to/nowhere")); GetFileFormatTester("data", vector<uint8_t>{1, 2, 3, 4, 5, 6, 7, 8}); GetFileFormatTester("ELF", vector<uint8_t>{0x7f, 0x45, 0x4c, 0x46}); // Real tests from cros_installer on different boards. // ELF 32-bit LSB executable, Intel 80386 GetFileFormatTester( "ELF 32-bit little-endian x86", vector<uint8_t>{0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x90, 0x83, 0x04, 0x08, 0x34, 0x00, 0x00, 0x00}); // ELF 32-bit LSB executable, MIPS GetFileFormatTester( "ELF 32-bit little-endian mips", vector<uint8_t>{0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, 0xc0, 0x12, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00}); // ELF 32-bit LSB executable, ARM GetFileFormatTester( "ELF 32-bit little-endian arm", vector<uint8_t>{0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x00, 0x85, 0x8b, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00}); // ELF 64-bit LSB executable, x86-64 GetFileFormatTester( "ELF 64-bit little-endian x86-64", vector<uint8_t>{0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x3e, 0x00, 0x01, 0x00, 0x00, 0x00, 0xb0, 0x04, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00}); } TEST(UtilsTest, ScheduleCrashReporterUploadTest) { // Not much to test. At least this tests for memory leaks, crashes, // log errors. FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); utils::ScheduleCrashReporterUpload(); // Test that we scheduled one callback from the crash reporter. EXPECT_EQ(1, brillo::MessageLoopRunMaxIterations(&loop, 100)); EXPECT_FALSE(loop.PendingTasks()); } TEST(UtilsTest, FormatTimeDeltaTest) { // utils::FormatTimeDelta() is not locale-aware (it's only used for logging // which is not localized) so we only need to test the C locale EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromMilliseconds(100)), "0.1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(0)), "0s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(1)), "1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(59)), "59s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(60)), "1m0s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(61)), "1m1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(90)), "1m30s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(1205)), "20m5s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(3600)), "1h0m0s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(3601)), "1h0m1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(3661)), "1h1m1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(7261)), "2h1m1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(86400)), "1d0h0m0s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(86401)), "1d0h0m1s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(200000)), "2d7h33m20s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(200000) + base::TimeDelta::FromMilliseconds(1)), "2d7h33m20.001s"); EXPECT_EQ(utils::FormatTimeDelta(base::TimeDelta::FromSeconds(-1)), "-1s"); } TEST(UtilsTest, TimeFromStructTimespecTest) { struct timespec ts; // Unix epoch (Thursday 00:00:00 UTC on Jan 1, 1970) ts = (struct timespec) {.tv_sec = 0, .tv_nsec = 0}; EXPECT_EQ(base::Time::UnixEpoch(), utils::TimeFromStructTimespec(&ts)); // 42 ms after the Unix billennium (Sunday 01:46:40 UTC on September 9, 2001) ts = (struct timespec) {.tv_sec = 1000 * 1000 * 1000, .tv_nsec = 42 * 1000 * 1000}; base::Time::Exploded exploded = (base::Time::Exploded) { .year = 2001, .month = 9, .day_of_week = 0, .day_of_month = 9, .hour = 1, .minute = 46, .second = 40, .millisecond = 42}; EXPECT_EQ(base::Time::FromUTCExploded(exploded), utils::TimeFromStructTimespec(&ts)); } TEST(UtilsTest, DecodeAndStoreBase64String) { base::FilePath path; // Ensure we return false on empty strings or invalid base64. EXPECT_FALSE(utils::DecodeAndStoreBase64String("", &path)); EXPECT_FALSE(utils::DecodeAndStoreBase64String("not valid base64", &path)); // Pass known base64 and check that it matches. This string was generated // the following way: // // $ echo "Update Engine" | base64 // VXBkYXRlIEVuZ2luZQo= EXPECT_TRUE(utils::DecodeAndStoreBase64String("VXBkYXRlIEVuZ2luZQo=", &path)); ScopedPathUnlinker unlinker(path.value()); string expected_contents = "Update Engine\n"; string contents; EXPECT_TRUE(utils::ReadFile(path.value(), &contents)); EXPECT_EQ(contents, expected_contents); EXPECT_EQ(static_cast<off_t>(expected_contents.size()), utils::FileSize(path.value())); } TEST(UtilsTest, ConvertToOmahaInstallDate) { // The Omaha Epoch starts at Jan 1, 2007 0:00 PST which is a // Monday. In Unix time, this point in time is easily obtained via // the date(1) command like this: // // $ date +"%s" --date="Jan 1, 2007 0:00 PST" const time_t omaha_epoch = 1167638400; int value; // Points in time *on and after* the Omaha epoch should not fail. EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch), &value)); EXPECT_GE(value, 0); // Anything before the Omaha epoch should fail. We test it for two points. EXPECT_FALSE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch - 1), &value)); EXPECT_FALSE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch - 100*24*3600), &value)); // Check that we jump from 0 to 7 exactly on the one-week mark, e.g. // on Jan 8, 2007 0:00 PST. EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch + 7*24*3600 - 1), &value)); EXPECT_EQ(value, 0); EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch + 7*24*3600), &value)); EXPECT_EQ(value, 7); // Check a couple of more values. EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch + 10*24*3600), &value)); EXPECT_EQ(value, 7); EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch + 20*24*3600), &value)); EXPECT_EQ(value, 14); EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch + 26*24*3600), &value)); EXPECT_EQ(value, 21); EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(omaha_epoch + 29*24*3600), &value)); EXPECT_EQ(value, 28); // The date Jun 4, 2007 0:00 PDT is a Monday and is hence a point // where the Omaha InstallDate jumps 7 days. Its unix time is // 1180940400. Notably, this is a point in time where Daylight // Savings Time (DST) was is in effect (e.g. it's PDT, not PST). // // Note that as utils::ConvertToOmahaInstallDate() _deliberately_ // ignores DST (as it's hard to implement in a thread-safe way using // glibc, see comments in utils.h) we have to fudge by the DST // offset which is one hour. Conveniently, if the function were // someday modified to be DST aware, this test would have to be // modified as well. const time_t dst_time = 1180940400; // Jun 4, 2007 0:00 PDT. const time_t fudge = 3600; int value1, value2; EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(dst_time + fudge - 1), &value1)); EXPECT_TRUE(utils::ConvertToOmahaInstallDate( base::Time::FromTimeT(dst_time + fudge), &value2)); EXPECT_EQ(value1, value2 - 7); } TEST(UtilsTest, GetMinorVersion) { // Test GetMinorVersion by verifying that it parses the conf file and returns // the correct value. uint32_t minor_version; brillo::KeyValueStore store; EXPECT_FALSE(utils::GetMinorVersion(store, &minor_version)); EXPECT_TRUE(store.LoadFromString("PAYLOAD_MINOR_VERSION=one-two-three\n")); EXPECT_FALSE(utils::GetMinorVersion(store, &minor_version)); EXPECT_TRUE(store.LoadFromString("PAYLOAD_MINOR_VERSION=123\n")); EXPECT_TRUE(utils::GetMinorVersion(store, &minor_version)); EXPECT_EQ(123U, minor_version); } static bool BoolMacroTestHelper() { int i = 1; unsigned int ui = 1; bool b = 1; std::unique_ptr<char> cptr(new char); TEST_AND_RETURN_FALSE(i); TEST_AND_RETURN_FALSE(ui); TEST_AND_RETURN_FALSE(b); TEST_AND_RETURN_FALSE(cptr); TEST_AND_RETURN_FALSE_ERRNO(i); TEST_AND_RETURN_FALSE_ERRNO(ui); TEST_AND_RETURN_FALSE_ERRNO(b); TEST_AND_RETURN_FALSE_ERRNO(cptr); return true; } static void VoidMacroTestHelper(bool* ret) { int i = 1; unsigned int ui = 1; bool b = 1; std::unique_ptr<char> cptr(new char); *ret = false; TEST_AND_RETURN(i); TEST_AND_RETURN(ui); TEST_AND_RETURN(b); TEST_AND_RETURN(cptr); TEST_AND_RETURN_ERRNO(i); TEST_AND_RETURN_ERRNO(ui); TEST_AND_RETURN_ERRNO(b); TEST_AND_RETURN_ERRNO(cptr); *ret = true; } TEST(UtilsTest, TestMacros) { bool void_test = false; VoidMacroTestHelper(&void_test); EXPECT_TRUE(void_test); EXPECT_TRUE(BoolMacroTestHelper()); } } // namespace chromeos_update_engine
<reponame>m-labs/compiler-rt-lm32<gh_stars>0 //===-- tsan_mman.cc ------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_common.h" #include "tsan_mman.h" #include "tsan_allocator.h" #include "tsan_rtl.h" #include "tsan_report.h" #include "tsan_flags.h" namespace __tsan { static void SignalUnsafeCall(ThreadState *thr, uptr pc) { if (!thr->in_signal_handler || !flags()->report_signal_unsafe) return; StackTrace stack; stack.ObtainCurrent(thr, pc); ScopedReport rep(ReportTypeSignalUnsafe); rep.AddStack(&stack); OutputReport(rep, rep.GetReport()->stacks[0]); } void *user_alloc(ThreadState *thr, uptr pc, uptr sz) { CHECK_GT(thr->in_rtl, 0); if (sz + sizeof(MBlock) < sz) return 0; MBlock *b = (MBlock*)Alloc(sz + sizeof(MBlock)); if (b == 0) return 0; b->size = sz; void *p = b + 1; if (CTX() && CTX()->initialized) { MemoryResetRange(thr, pc, (uptr)p, sz); } DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); SignalUnsafeCall(thr, pc); return p; } void user_free(ThreadState *thr, uptr pc, void *p) { CHECK_GT(thr->in_rtl, 0); CHECK_NE(p, (void*)0); DPrintf("#%d: free(%p)\n", thr->tid, p); MBlock *b = user_mblock(thr, p); p = b + 1; if (CTX() && CTX()->initialized && thr->in_rtl == 1) { MemoryRangeFreed(thr, pc, (uptr)p, b->size); } Free(b); SignalUnsafeCall(thr, pc); } void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { CHECK_GT(thr->in_rtl, 0); void *p2 = 0; // FIXME: Handle "shrinking" more efficiently, // it seems that some software actually does this. if (sz) { p2 = user_alloc(thr, pc, sz); if (p2 == 0) return 0; if (p) { MBlock *b = user_mblock(thr, p); internal_memcpy(p2, p, min(b->size, sz)); } } if (p) { user_free(thr, pc, p); } return p2; } void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align) { CHECK_GT(thr->in_rtl, 0); void *p = user_alloc(thr, pc, sz + align); void *pa = RoundUp(p, align); DCHECK_LE((uptr)pa + sz, (uptr)p + sz + align); return pa; } MBlock *user_mblock(ThreadState *thr, void *p) { CHECK_GT(thr->in_rtl, 0); CHECK_NE(p, (void*)0); MBlock *b = (MBlock*)AllocBlock(p); // FIXME: Output a warning, it's a user error. if (p < (char*)(b + 1) || p > (char*)(b + 1) + b->size) { TsanPrintf("user_mblock p=%p b=%p size=%zu beg=%p end=%p\n", p, b, b->size, (char*)(b + 1), (char*)(b + 1) + b->size); CHECK_GE(p, (char*)(b + 1)); CHECK_LE(p, (char*)(b + 1) + b->size); } return b; } void *internal_alloc(MBlockType typ, uptr sz) { ThreadState *thr = cur_thread(); CHECK_GT(thr->in_rtl, 0); return InternalAlloc(sz); } void internal_free(void *p) { ThreadState *thr = cur_thread(); CHECK_GT(thr->in_rtl, 0); InternalFree(p); } } // namespace __tsan
Kids these days. Just look at them. They've got those headphones in their ears and a gadget in every hand. They speak in tongues and text in code. They wear flip-flops everywhere. Does anyone really understand them? Only some people do, or so it seems. They are experts who have earned advanced degrees, dissected data, and published books. If the minds of college students are a maze, these specialists sell maps. Ask them to explain today's teenagers and twentysomethings. Invite them to your campus to describe this generation's traits. Just make sure that they don't all show up at the same time. They would argue, contradict one another, and leave you more baffled than ever. Figuring out young people has always been a chore, but today it's also an industry. Colleges and corporations pay experts big bucks to help them understand the fresh-faced hordes that pack the nation's dorms and office buildings. As in any business, there's variety as well as competition. One speaker will describe youngsters as the brightest bunch of do-gooders in modern history. Another will call them self-involved knuckleheads. Depending on the prediction, this generation either will save the planet, one soup kitchen at a time, or crash-land on a lonely moon where nobody ever reads. Everyone in higher education has pondered "the Millennials," people born between 1982 and 2004 or thereabouts (the years themselves are a subject of debate). Ever since the term went prime time about a decade ago, a zillion words have been written about who Millennials are, how they think, and why they always _______________. In short, Millennials talk is contagious. Those who have shaped the nation's understanding of young people are not nearly as famous as their subjects, however. That's a shame, for these experts are colorful characters in their own right. Some are scholars, and some aren't. Many can recall watching the Beatles on a black-and-white television, and some grew up just before Barney the purple dinosaur arrived. Most can entertain an audience, though a few prefer to comb through statistics. In other words, they're all different. But just for fun, let's stereotype them as smart, successful, and full of unshakeable opinions. Although they have described one another's work as "wrong," "unempirical," and "wildly mistaken," these experts have something in common: They are products of their time. In an era when the wants of young consumers have become a fixation for colleges and businesses alike, these unlikely entrepreneurs have fed a world with a bottomless craving for labels. • Advertisement For as long as human hair has turned gray, elders have looked at their successors and frowned. "Children nowadays are tyrants," goes an old quotation widely attributed to Socrates. "They contradict their parents, chatter before company, gobble their food, and tyrannize their teachers." In 1855 a professor at Davidson College described college students as "indulged, petted, and uncontrolled at home … with an undisciplined mind, and an uncultivated heart, yet with exalted ideas of personal dignity, and a scowling contempt for lawful authority." Albert Einstein opined that while classrooms are many, "the number of young people who genuinely thirst after truth and justice is small." Criticizing the young is inevitable, but so, too, is change. In 2000, Neil Howe and William Strauss published Millennials Rising: The Next Great Generation, which cast turn-of-the-century teenagers as rule followers who were engaged, optimistic, and downright pleasant. The authors assigned them seven "core traits": special, sheltered, confident, team-oriented, conventional, pressured, and achieving. These conclusions were based on a hodgepodge of anecdotes, statistics, and pop-culture references, as well as on surveys of teachers and about 600 high-school seniors in Fairfax County, Va., which in 2007 became the first county in the nation to have a median household income of more than $100,000, about twice the national average. The authors made a sweeping prediction. "This generation is going to rebel by behaving not worse, but better," they wrote of Millennials, a term they had coined. "Their life mission will not be to tear down old institutions that don't work, but to build up new ones that do." Such thinking promised to give educators, not to mention tens of millions of parents, a warm feeling. Who wouldn't want to hear that their kids are special? Mr. Howe and Mr. Strauss were unlikely messengers of this "good-news revolution." After all, they were not social scientists; they were Washington wonks. At the time, Mr. Howe was an economic-policy consultant and an adviser to the Concord Coalition, a nonpartisan group that supports deficit reduction and Social Security. Mr. Strauss, who had worked in President Ford's White House and as a staffer in the U.S. Senate, was the director of the Capitol Steps, a satirical singing group. The two shared political views, Ivy League degrees, and a love of history. The latter had inspired them to write their first book, Generations: The History of America's Future, 1584 to 2069. Although Millennials Rising would fill the bookshelves of college presidents, deans, and professors, Generations laid the foundation for the authors' writings on students. Published in 1991, the elaborate chronicle contained a bold, almost mystical theory: that the nation's entire history had revolved in a predictable cycle of spiritual awakenings and secular crises. In turn, each generation fit one of four distinct archetypes (prophet, nomad, hero, and artist), which have repeated continuously in the same sequence. As surely as autumn follows summer, the Millennials would become the next "hero" generation, destined for coming-of-age triumphs, intent on taking action and building community, just like the "G.I. Generation" decades before. This retelling of history impressed many reviewers, as well as some influential people. Former Vice President Al Gore—who graduated from Harvard University with Mr. Strauss—called Generations the most stimulating book on American history he'd ever read. He even sent a copy to each member of Congress. Yet Publishers Weekly called the book "as woolly as a newspaper horoscope." And in academe, scholars chuckled. Nothing like this had ever been written with a straight face. Arthur E. Levine, a former president of the Teachers College of Columbia University and co-author of When Hope and Fear Collide: A Portrait of Today's College Student, remains unimpressed. "Generational images are stereotypes," says Mr. Levine, now president of the Woodrow Wilson Foundation. "There are some differences that stand out, but there are more similarities between students of the past and the present. But if you wrote a book saying that, how interesting would that book be?" Generations established its creators as pioneers in a burgeoning field. They soon became media darlings, best-selling authors, and busy speakers. Generations would popularize the idea that people in a particular age group share distinct personae and values by virtue of occupying the same "place" in time as they grow up. In turn, this would affirm the notion that Millennials were a riddle waiting to be solved. • Advertisement These days people all over the world seek Mr. Howe's advice about Millennials. Mellow and soft-spoken, he listens for rhythms in history. Meandering through a conversation, he can relate the generational significance of the RMS Lusitania to that of F. Scott Fitzgerald, Animal House's Bluto Blutarsky, and Louisiana's Bobby Jindal, the first U.S. governor of Indian descent—all in five minutes. Close your eyes, and Mr. Howe, 57, might be a philosophical ex-hippie, riffing on how the universe fits together. In fact, he's a well-connected consultant who runs a bustling business, LifeCourse Associates, from the ground floor of his spacious home in Great Falls, Va., just outside Washington. Mr. Strauss died of cancer in 2007, and Mr. Howe now works side by side with three employees, the oldest of whom is 28. Soon the company plans to publish Millennials in the Workplace, which follows several other books, such as Millennials Go to College, Millennials & K-12 Schools, and Millennials and the Pop Culture. On a recent Monday afternoon, Mr. Howe's telephone is ringing. Evidence of several half-finished projects covers his desk. Soon he must submit a draft of an article about changing moods throughout American history, which the Harvard Business Review plans to publish. He must prepare for several trips, including a visit to the United Nations, where he will discuss "global aging and demographic security." On his computer screen are rainbows of charts, on crime, drinking habits, and pregnancy rates among young people. A deliveryman arrives with packages. "The market is so vast," Mr. Howe says. "There are so many projects that I don't have time to do." As if to prove this, he tells his colleagues that he's thinking of canceling a contract with a client—a state chapter of the National Guard—that's haggling over some small details. "They're all bureaucrats!" he says. Each year Mr. Howe gives about 60 speeches, often followed by customized workshops. He speaks at colleges, elementary schools, and corporations, and he charges between $5,000 and $14,000, plus travel expenses. He has consulted with various colleges, including Arizona State University, Dartmouth College, Georgetown University, and the University of Texas. His recommendations have influenced the mailings admissions offices send, the extracurricular activities colleges offer, the way professors teach, and even the food students eat. LifeCourse Associates has a partnership with Chartwells, a food-service company that has redefined campus cafeterias and menus at many colleges (think small-group seating and made-to-order meals). Mr. Howe has also consulted with some of the globe's biggest companies, including Nike, Hewlett-Packard, and Kraft Nabisco. Recently an investment firm in Prague hired him to do a demographic forecast. Soon the U.S. Army's lucrative advertising contract will go up for grabs, and Mr. Howe is advising an agency that will compete for it. A while back, the Ford Motor Company hired him to answer a question: What kind of car would Millennials want to buy? He advised the company to consider the power of "hero myths"—Hercules, Superman, and the boys of Iwo Jima—in its marketing. "Millennials want to do big things," he wrote in a report for Ford. "Even when driving back and forth to community college in a Focus … their future will be anything but mundane." Those are the grand terms in which Mr. Howe thinks, even when he's just sitting here, shooting the breeze, with his brown walking shoes propped on a desk. When this thirtysomething reporter makes an offhand observation, he remarks, "That's such an Xer thing to say." He means Generation X, whose members hail from 1961 to 1981, according to his timeline. Because they tend to be skeptical, hardened pragmatists, he says, they have trouble seeing what's so great about todays's kids. For emphasis, he pauses, then says of Millennials, "They are so special." And who is Mr. Howe? "A typical boomer," he says. There is such a thing, he insists. That historical events shape people of a given generation in specific ways is a pillar of his philosophy. The Vietnam War was one event that shaped him. As a student at the University of California at San Diego, he watched a national debate boil. In 1970, when he was a freshman, a fellow student named George Winne Jr. set himself ablaze on the campus while protesting the war and died the next day. Mr. Howe later transferred to Berkeley, where tie-dyed curtains hung in fraternity windows and students bagged classes to hold teach-ins. Everywhere, he saw a cultural rift between young and old. "There was a hysteria in the air," he says. "A sense that we were headed for the apocalypse." Advertisement A similar feeling swept the nation in September 2001, just as the first Millennials were settling into college campuses. The day after the terrorist attacks on New York and the Pentagon, Mr. Howe appeared on CNN to discuss historical cycles, a subject he and Mr. Strauss had described in a 1997 book called The Fourth Turning, which described four repeating "saecula," or seasons, of history—awakenings, unravelings, crises, and highs. Did the smoldering twin towers portend a crisis era? The day after the interview, The Fourth Turning appeared in Amazon's top 20. Weeks later, Mr. Howe and Mr. Strauss flew to San Antonio to give a keynote speech at the National Association for College Admission Counseling's annual conference. Attendees stood and sang "God Bless America." In the convention center, as on college campuses and town squares, people perceived that a line had just been drawn in the sand of history. Soon Newsweek published a cover story called "Generation 9-11," which described the unprecedented attacks as a "defining moment" for high-school and college students. The aftermath made many people more receptive to the message of Millennials Rising, Mr. Howe believes. "Whenever there's a change in social mood," he says, "it makes thinking about generations clearer." • As cheery as a bouquet of roses, the good news about Millennials intrigued many people who recruit, serve, and teach college students. Administrators and professors had long stereotyped the students walking through the campus gates, but as the 21st century began, higher education was evolving in ways that made the time ripe for a new and tidy explanation of contemporary undergraduates. For one, colleges turned to marketing as never before. Among selective colleges, the decade brought intense competition for applicants. Even among less-selective institutions, recruitment meant expanding into new territories and reaching out to more-diverse students. Early-acceptance programs ballooned. Parents morphed into co-purchasers. Deans embraced holistic evaluations, attempting to peer deeper into hearts and noggins. Sophisticated statistical models predicted who would enroll—and at what price. Meanwhile, technology changed the application process. The Web was the Wild West of the enrollment profession, and with it came "stealth applicants" and much uncertainty. Many admissions officials found themselves under pressure to meet ambitious enrollment goals while protecting the bottom line. Understanding the whys of students' attitudes and behaviors was more crucial than ever. Amid this complexity, the Millennials message was not only comforting but empowering. "It tickled our ears," says Palmer H. Muntz, director of admissions and an enrollment-management consultant at Lincoln Christian University, in Illinois. "It packaged today's youth in a way that we really wanted to see them. It gave us a formula for understanding them." Advertisement Over time, however, Mr. Muntz started to doubt the formula. Each year he visited many rural and urban high schools. He did not meet many students who had sweated their grades or taken standardized tests multiple times. Millennials Go to College, published in 2003, described an "intense new emphasis on preparation and planning" among students who were competing in a college-application "arms race," who thought about their futures in "five- or 10-year time horizons," and who perceived the high achievements of their peers as "a constant source of personal pressure." Yet Mr. Muntz met few students who seemed to have these "pressured" and "achieving" traits. Generally, he saw what he had always seen—sharp kids, average kids, and kids with weaknesses, all with hopes and worries, floating day to day through teenage life. He wondered if the sample of students in Millennials Rising had corrupted the findings. After all, most students do not apply to top-20 colleges. And so Mr. Muntz confronted a fact: To accept generational thinking, one must find a way to swallow two large assumptions. That tens of millions of people, born over about 20 years, are fundamentally different from people of other age groups—and that those tens of millions of people are similar to each other in meaningful ways. This idea is the underpinning of Mr. Howe's conclusion that each generation turns a historical corner, breaking sharply with the previous generation's traits and values. Several researchers have blasted this theory of "nonlinear" social change. Some cite data from the Cooperative Institutional Research Program at the University of California at Los Angeles, which has conducted an annual survey of college freshmen since 1966. The survey, which provides a longitudinal view of trends, suggests that many changes among students happen gradually, not abruptly. Moreover, the survey complicates the Millennials theory in numerous ways. According a recent report by the program, "American Freshmen: Forty Year Trends," today's students are not significantly busier, more confident, or more positive than they were in recent decades. Though more say they want to contribute to society, more also cite "being well off financially" as a goal. They are only slightly less likely to say they want to go to college to get a job, make money, or go to graduate school. They are not any more or less cooperative or competitive, nor do they seem more interested in developing a meaningful philosophy of life Not long ago, Mr. Muntz attended a presentation about those findings. He has since decided to stop thinking in generational terms. "You can't just take one stamp and put it on this generation," says Mr. Muntz. "But it sure was nice when I thought I could." In other corners of academe, many people have wrestled with similar thoughts. Among those who serve students, Millennials theories seemed to offer crucial clues during a time when the profession was changing rapidly. Over the last decade, the umbrella of student affairs widened to cover a vast array of programs and services. More and more staff members became co-educators and crisis managers. "Student engagement" turned into a full-time mission amid growing concerns about retention. Mental-health services multiplied. Colleges built walls for students to climb and heated pools for them to swim. They opened parent offices, started parent orientations, and published parent newsletters. Studying students went hand in hand with the growing interest in measurements of "learning outcomes" outside the classroom. "We really had to know what our students were thinking, feeling, and learning in everything we were doing," says Richard H. Mullendore, a former vice president for student affairs at the University of Georgia. He credits Mr. Howe and Mr. Strauss for several keen observations about Millennials, especially their tendency to enjoy close relationships with their parents. But he reached a conclusion similar to Mr. Muntz's. He need look no farther than the town of Athens, one of the poorest in the state, where high schools have much lower graduation rates than most of those that send students to Georgia. "A large number of young people have been totally overlooked in this literature," Mr. Mullendore says. "Their battles have not been similar to anything those other students have faced." Some student-affairs professionals struggled to square Millennials Rising with what they saw on their campuses each day. A decade ago, Gwendolyn Jordan Dungy, executive director of Naspa-Student Affairs Administrators in Higher Education, recognized the inherent appeal of the Millennials framework. "People in student affairs have this philosophy of believing in the basic goodness of young people," she says. Yet she believes that the book is longer on generalizations than on truth. After all, a competing narrative about students had developed. In it, more of them were anxious and depressed, and more were as self-centered and demanding as diners in a crowded restaurant. "We heard that this was the next great generation," Ms. Dungy says, "but many people just weren't seeing them that way." Jeannine C. Lalonde was skeptical from the start. She read Millennials Rising when she was an assistant hall director at Boston College. "To be frank, I just laughed," says Ms. Lalonde, now senior assistant dean of admissions at the University of Virginia. "It was really singular in its approach." As a residence-life staffer, she believed her job was not only to support students, but also to challenge them. Yet some students, who seemed to see themselves as customers, did not want those challenges—they wanted problems solved for them. "I was seeing many of these positive things, but I was also confused by all the entitlement I was seeing," Ms. Lalonde says. "Where was that in the book?" Advertisement • Jean M. Twenge asked the same question when she read Millennials Rising. After all, she had spent years in library stacks, studying generational differences. While working toward a Ph.D. in personality psychology at the University of Michigan at Ann Arbor, she discovered questionnaires that academic psychologists had designed to measure personality traits and attitudes. The questionnaires had been used widely since the 1950s, and most had been completed by college students and schoolchildren. That allowed her to compare changes in young people over time. Like Mr. Howe and Mr. Strauss, Ms. Twenge concluded that when people were born shapes them more than (or at least as much as) where they were born or who their parents were. Yet she did not buy the idea that changes in students came suddenly. "Changes are linear; they happen over time," she says. In Millennials Rising, Ms. Twenge did not find sufficient evidence to compare this generation with previous ones. Moreover, her findings did not come with a big smiley face. In 2006, Ms. Twenge described her research in her first book, Generation Me: Why Today's Young Americans Are More Confident, Assertive, Entitled—and More Miserable Than Ever Before. "I see no evidence that today's young people feel much attachment to duty or to group cohesion." Ms. Twenge wrote. "Young people have been consistently taught to put their own needs first and to focus on feeling good about themselves." Ms. Twenge defined Generation Me as anyone born in the 1970s through the 1990s. Born in 1971, the author thus included herself in this generation. Many children of this era, she wrote, had been raised in a culture of constant praise, in which everyone got trophies and parents filled their children's ears with assurances that they were unique, talented, and special. Call it too much of a good thing. Among other outcomes, she found, the "self-esteem movement" had led to a rise in narcissism. She had analyzed some 15,000 students' responses to a questionnaire called the Narcissistic Personality Inventory between 1987 and 2006. The inventory contained statements like, "I think I am a special person," "I can live my life any way I want to," and "If I ruled the world, it would be a better place." Over time, the percentage who scored high had risen substantially. Mr. Howe and Mr. Strauss had labeled Millennials as "special," which they described as a positive trait, a feeling of self-worth instilled by doting parents. Generation Me cast this same feeling in a darker light. Ms. Twenge even suggested that the rise in volunteering Mr. Howe and Mr. Strauss had described might not indicate an increase in altruism. After all, students knew that doing community service helped them fulfill requirements for the National Honor Society and perhaps get into college. Over time, Ms. Twenge's research created a buzz in higher education, even prompting mention on Late Night With Conan O'Brien. Before long, Mr. Howe and Mr. Strauss pounced on her findings, questioning her research and her motivations. In an opinion piece published in The Christian Science Monitor, they wrote, "No message … could be so perverse and contrary to fact as the accusation of selfishness." Mr. Howe has described Ms. Twenge as having a "Manichaean" view of the world. He has accused her of mistaking self-confidence for narcissism. "You can tell young people that they're not special and see if that works," he says. Colleges and companies alike, he believes, can "leverage" this feeling of specialness among young people and turn it into good things. Advertisement Ms. Twenge has stopped short of calling students selfish, but her message has prompted many questions. For one, who is this woman who upset the Millennials' apple cart? As it turns out, Ms. Twenge is an engaging teacher who draws bell curves on napkins and has no time for nonsense. An associate professor of psychology at San Diego State University, she insists that she likes her students, at least most of them. The ones who ask if they can take final exams early so they can go to Las Vegas, or who grub for grades and demand extra credit? Not so much. Ms. Twenge's research has given her insights into her personal life. About 10 years ago, she went over the narcissism inventory with a man she was dating. He scored in the 99th percentile, which, she says, confirmed problems in their relationship. After their breakup, she vowed not to end up with the same kind of person. So on her fourth date with another man, she asked him to complete the same questionnaire. He scored low, and they eventually married. She calls the inventory "the boyfriend test" and has given copies to students who want to find out if they're dating a narcissist. On a Tuesday in August, Ms. Twenge is teaching a course on personality. She arrives a few minutes late because she had to do a radio interview about public perceptions of generations. Today's the last class before the final exam, and students have many questions. One asks if she can get extra points because she listened to Ms. Twenge's interview on the way to class. The answer is no. While reviewing the semester's lessons, Ms. Twenge walks over to tug on a student's sleeve to demonstrate what a clingy, anxious person might be like in relationship. Later she introduces some of her research on narcissism. She shows a slide of Whitney Houston from way back and asks if any students remember the singer's 1986 hit "Greatest Love of All." The sight of Ms. Houston's hairdo draws laughter, but Ms. Twenge is serious about one of the song's lyrics—"learning to love yourself is the greatest love of all." In the 1950s, she explains, this very idea would have been beyond weird, but these days, it's normal—and unhealthy. She draws a distinction between self-confidence and narcissism, the latter being associated with a lack of empathy and with aggression after insults. Ms. Twenge then shows her students a list of statements, such as "Be yourself," "You are special," and "You can be anything you want to be." Then she asks a question: "These phrases are individualistic, but are they good advice?" "No!" several students say. "Good," Ms. Twenge replies with a grin. "I've taught you well." "Are you just being defensively pessimistic?" one student asks. "Maybe," Ms. Twenge replies. Advertisement "Defensive pessimism" is a psychological strategy in which one considers worst-case scenarios and braces for the worst, to avoid disappointment. It's fair description of her, not to mention of her book, says Ms. Twenge, who describes Generation Me as a warning about young people, not an indictment of them. "These kids didn't raise themselves," she says. Ms. Twenge tries to practice what she preaches. She does not ask her young daughter, Kate, too many open-ended questions, like "What would you like for dinner?" She does not tell her that she's special, nor does she buy her clothes that say things like "Little Princess." Ms. Twenge does, however, take her along on speaking trips. This year she has given about 15 presentations, for which she charges between $1,000 and $5,000. Recently she has spoken at PepsiCo, McGraw-Hill, and the Florida Association of Blood Banks, where she encouraged attendees to appeal to young peoples' sense that they can make a personal difference by donating their blood—"Make it about them." During her presentations, she asks her audience to sing along to a song that's become popular in preschools. It's a song she dislikes. Sung to the tune of "Frère Jacques," it goes, "I am special, I am special, look at me, look at me. …" Teenagers who grow up with this chorus in their heads have a venue for self-absorption that their parents never imagined. It's called the Internet. Ms. Twenge argues that Facebook and other social media have fed a bonfire of vanity among young people. On the other side of the country, a scholar named Mark Bauerlein has reached a similar conclusion. Mr. Bauerlein, an English professor at Emory University, in Atlanta, is the author of The Dumbest Generation: How the Digital Age Stupefies Young Americans and Jeopardizes Our Future. The sub-subtitle turns an old generational rallying cry on its head: "Don't trust anyone under 30." Mr. Bauerlein (who writes for The Chronicle Review's Brainstorm blog) concerns himself with only one generational trait, what he calls the "intellectual condition." Today's students, though blessed with limitless high-tech wonders, have squandered these tools, using computers mostly for their amusement—chatting, networking, and posting online updates about themselves, Mr. Bauerlein argues. Teenagers, he writes, "are drowning in their own ignorance and aliteracy." To tout the technological skills of today's students, he continues, "feeds the generational consciousness that keeps kids from growing up." Mr. Bauerlein, 50, directed the survey reported in "Reading at Risk: A Survey of Literary Reading in America," published by the National Endowment for the Arts in 2004. It found a sharp decline in reading among all age groups between 1982 and 2002, and the largest drop was among people between 18 and 24. In The Dumbest Generation, he cited numerous other studies that affirmed that today's students were reading less and absorbing fewer facts than their predecessors had. His own experiences in the classroom also informed his conclusions. He describes most of his students as highly professional; he encounters fewer and fewer who seem interested in culture, in wrestling with ideas. "Many of them have a mercenary attitude about the university, and they regard humanities as an interruption," he says. In this, he foresees cultural doom. Not long ago, Mr. Bauerlein faced off against Mr. Howe in Washington during a debate sponsored by the American Enterprise Institute. He thinks Mr. Howe has many good insights, but he sees limits to them. "There's an investment in being enthusiastic—maybe too strong an investment in that," Mr. Bauerlein says. Like Ms. Twenge, Mr. Bauerlein describes his book as a labor of love, not scorn. "It's a provocation with a generous aim," he says. "In the raising and rearing of young people, a critical voice is essential. They have to hear someone knock them down, and if they fight back, that's good. It's part of the health of a culture from generation to generation." Several technophiles in academe have cast Mr. Bauerlein as a Luddite who clings to a single (and dated) definition of literacy. He invites them to his classes. "They've never sat across from a freshman who comes in and says, 'I don't want to read any novel.' It's a lot easier to be sanguine about students if you've never encountered that." The professor acknowledges that the book's title is incendiary. As his agent assured him, bold proclamations help get authors on the radar, though his conclusions are more nuanced than the cover might suggest. Still, when he told his wife that he planned to dedicate the book to her, she said no thanks. She knew that a book that called roughly 100 million people dumb would make him a public enemy. Sure enough, since the book came out last year, Mr. Bauerlein has received scores of angry e-mail messages, many of them from teenagers. Recently, a 13-year-old wrote that he was "great, big hypocrite." Another began: "Dear sir, you are an ass." A curious thing has happened, though. Mr. Bauerlein, who says that he has responded to each message he has received, has become engaged in several positive, continuing dialogues with some of the parents and students who wrote to him. It's a testament to the possibilities of the very technology he has questioned. Advertisement As the Millennial decade rolled on, Mr. Bauerlein and other professors encountered waves of teenagers who had grown up using search engines and instant messaging, and they wondered how those experiences might affect the way students learned. Many students were indeed behaving more like fussy consumers. It was not clear how far their demands would go for personalization, satisfaction, and instant gratification. This uncertainty led to a larger question about supposed generational traits. Were educators to see them as something to indulge—or to cure? Many instructors who weighed this question with regard to technology have tried to meet students where they are, by incorporating Facebook, Twitter, and all kinds of multimedia platforms into their teaching. Siva Vaidhyanathan has no problem with such innovation per se, but he questions the notion that regardless of what they are teaching, instructors must do all they can to please Millennials by embracing technological portals like some kind of magical device. "There's this expectation that your No. 1 job is to pander to this exotic alien consumer," says Mr. Vaidhyanathan, an associate professor of media studies at the University of Virginia. "At that point, you cease being a teacher and you are simply selling yourself to an audience that might not be interested in buying." Mr. Vaidhyanathan has read Millennials Rising. He says Mr. Howe and Mr. Strauss might as well have written a book on how to reach out to Geminis. "If you work in higher education, the first thing you should do is throw out all their books," says Mr. Vaidhyanathan. "Generational thinking is just a benign form of bigotry, in which you flatten out diversity. This is debilitating to the job of trying to work with young people." • Over the last decade, commentators have tended to slap the Millennial label on white, affluent teenagers who accomplish great things as they grow up in the suburbs, who confront anxiety when applying to super-selective colleges, and who multitask with ease as their helicopter parents hover reassuringly above them. The label tends not to appear in renderings of teenagers who happen to be minorities, or poor, or who have never won a spelling bee. Nor does the term often refer to students from big cities and small towns that are nothing like Fairfax County, Va. Or who lack technological know-how. Or who struggle to complete high school. Or who never even consider college. Or who commit crimes. Or who suffer from too little parental support. Or who drop out of college. Aren't they Millennials, too? Many pieces of the Millennial puzzle are missing, says Fred A. Bonner II. He's one of several researchers who have examined the experiences of nonwhite students in hopes of broadening the understanding of the generation. Mr. Bonner, an associate professor in the department of educational administration and human resources at Texas A&M University, has described how the prevailing generational descriptions focus narrowly on the experiences of majority populations. He believes the Howe/Strauss model is useful, but limited. "Many other kinds of students have not come from backgrounds where they felt safe, sheltered, and secure, or from schools that recognized their gifts and talents," says Mr. Bonner, who is 40. During class discussions, he has listened to black and Hispanic students describe how some or all of the so-called seven core traits did not apply to them. They often say the "special" trait, in particular, is unrecognizable. "It's not that many diverse parents don't want to treat their kids as special," he says, "but they often don't have the social and cultural capital, the time and resources, to do that." Advertisement Mr. Bonner is a co-editor of a forthcoming book, tentatively titled Diverse Millennials in College, which Stylus Publishing plans to publish in 2010. In recent years, Mr. Bonner has also done some generational consulting of his own. So far that work has been limited to speaking engagements at two- and four-year colleges. Generally his audiences understand that the experiences of a black Millennial from, say, Houston may differ greatly from the experiences of a white student from the Houston suburbs. After all, people who work in higher education see plenty of reminders that the when of a student's birth is but one factor in that student's development. Where a student is born, who a student's parents are, and how much money they have—all these things influence that student's educational opportunities, scores on standardized tests, and expectations of college. "Some folks are using this as a template and a cookbook," Mr. Bonner says of Millennials descriptions. "It makes it very difficult to see and understand variations because people who don't fit the recipe may be viewed as outliers. That anesthetizes nuances." At the same time, generalizations are often as necessary as lifeboats; they allow people to navigate a sea of complexity. This is the very reason that many people in higher education have found Mr. Howe so useful. The list of those who swear by his work is long. One is Lisa A. Rossbacher, president of Southern Polytechnic State University, in Georgia. After hearing Mr. Howe at a conference a year ago, she invited him to come talk to faculty and staff members on her campus recently. The university has made many changes that incorporate his insights into Millennials. To acknowledge their comfort with technology, it offers more hybrid courses that combine classroom and online learning. To satisfy their wish for more feedback, it encourages instructors to assign more group work and more short, graded assignments. To involve their parents, it provides them with cellphone numbers for the vice presidents for students affairs and for enrollment. Those are all changes that the university probably would have made anyway, Ms. Rossbacher suspects, only without knowing exactly why. "We can see the trends, but Neil gives us the context to help us understand why we're seeing the things we are seeing," she says. "He speaks as an outside authority, as a prophet not in his own land." Among other things, Mr. Howe is a gifted storyteller. He describes generational membership as an underappreciated part of people's stories, but concedes that it's just one part. So perhaps his conclusions about the generations are best thought of as medieval maps, with their rough approximations of a land's boundaries and rivers. They suggest a general features, though they do not give you all the specifics you would need to get somewhere. Like inside a particular student's head. These days Mr. Howe's talking about the next birth cohort, born 2005-25, which he calls the "Homeland Generation." According to his framework, those Americans will fit an "artist" archetype. "Such generations tend to be remembered for their quiet years of rising adulthood," he has written, "and their midlife years of flexible, consensus-building leadership." One day Mr. Howe hopes to start a nonprofit group devoted to the study of generational differences. After all, historians may never fully embrace it. "Academia gives this no home despite the fact that managers of for-profits and nonprofits find it so valuable," Mr. Howe says. "Why is it that I constantly get calls? This is a demand-driven business." It's a business that begets business. In the Millennials industry, plenty of people owe their success—not to mention their talking points—to Mr. Howe. If you're a career counselor on a college campus, odds are good that many of your students go on to work for companies that have paid experts to come and explain how to make young workers happy and retain them. Perhaps the expert was Mary Crane, who was once a lobbyist, then an assistant chef at the White House, before becoming a full-time generational consultant for Fortune 500 companies and law firms. Recently she was featured on a 60 Minutes segment about Millennials. Or perhaps it was Eric Greenberg, a philanthropist who found the time to write a book called Generation We: How Millennial Youth Are Taking Over America and Changing Our World Forever in between running Beautifull Inc., a health-food company, and endowing genetics laboratories. Lynne Lancaster, a management consultant and "cultural translator," is a co-founder and partner of BridgeWorks LLC, which offers companies advice on bridging generational divides among employees. So does Kanna Hudson, 26, a former academic counselor who works for a consulting company called Futurist.com. Another consultant, Scott Degraffenreid, a former forensic accountant, wrote Understanding the Millennial Mind: A Menace or Amazing? and patented the term "crash-test geniuses" to refer to young people's willingness to "reboot" and learn from failures, even if it means walking away from their jobs. Eric Chester, a former teacher, runs a consulting business called Generation Why; his Web site describes young people as "weird-looking and impossible to understand." Such descriptions are reminders that most renderings of Millennials are done by older people, looking through the windows of their own experiences. So in any discussion of generations, it's only fair to give a Millennial the last word. This is tricky exercise, however. After all, it's easy to find one who agrees—or disagrees—with the idea that students are team-oriented, or narcissistic, or anything. And many have given generational labels no more consideration than the ingredients of their breakfast cereal. Susanna Wolff, however, has thought a lot about the differences between younger and older people, at least in terms of their mastery of technology, a theme she mines for laughs. Ms. Wolff, a senior at Columbia University, compiles a weekly feature called "Parents Just Don't Understand," for collegehumor.com, a popular Web site. Submissions come from all over the country, about mothers who don't understand how e-mail works and fathers who ask about joining "MyFace." Besides technology, however, Ms. Wolff believes that people her age have few common experiences to bind them together the way Millennial theories describe. When she hears the term "Millennial," she thinks of marketing executives huddled around tables, looking at pie charts and figuring out how to sell stuff. "When every commercial is marketed to you," she says, "it feeds the idea that everything revolves around you." Ms. Wolff sees many things that complicate generational generalizations. Take her own family. Although she's close to her parents, they call her more than she calls them. In fact, she talks most often to her grandmother—who recently sought her advice on starting a blog. Although she is wary of the many predictions about her generation, Ms. Wolff, 21, offers a guess about what people her age will be like in, say, 20 years. "We'll be really good at the technology we're familiar with and really bad about learning anything new," she says. "And we'll complain about the young people."
<gh_stars>0 export interface RelayNode { id?: string | null; __typename?: string | null; } export interface RelayConnection { __typename?: string; edges?: RelayEdge[]; pageInfo?: RelayPageInfo; } export interface RelayEdge { __typename?: string | null; cursor?: string | null; node?: RelayNode | null; } export interface RelayPageInfo { __typename?: string; startCursor?: string; endCursor?: string; } export interface RelayQueryData { [s: string]: RelayNode | RelayNode[]; }
<reponame>plum-umd/pasket from functools import partial import logging import lib.const as C import lib.visit as v from .. import util from .. import sample from ..meta import class_lookup from ..meta.template import Template from ..meta.clazz import Clazz from ..meta.method import Method from ..meta.field import Field from ..meta.statement import Statement, to_statements from ..meta.expression import Expression, to_expression class Singleton(object): @classmethod def find_singleton(cls): return lambda anno: anno.by_name(C.A.SINGLE) def __init__(self, smpls): self._smpls = smpls @v.on("node") def visit(self, node): """ This is the generic method to initialize the dynamic dispatcher """ ## @Singleton ## class C { ... } ## => ## class C { ... ## private C() { } // private constructor ## private static C instance; // singleton holder ## public static C getInstance() { // retriever ## if (instance == null) { instance = new C(); } ## return instance; ## } ## } def rewrite(self, cls): cname = cls.name logging.debug("reducing: @{} class {} {{ ... }}".format(C.A.SINGLE, cname)) # make the constructor(s) *private* inits = cls.inits if not inits: inits = [cls.add_default_init()] for init in inits: if C.mod.PR not in init.mods: init.mods.append(C.mod.PR) # rip off *public* modifier, if exists try: init.mods.remove(C.mod.PB) except ValueError: pass Fname = cname fname = cname.lower() for mtd in cls.mtds: mname = mtd.name if mname.startswith("get") and mname.endswith(cname): Fname = mname.replace("get",'') fname = Fname[:1].lower() + Fname[1:] break # add a static field to hold the singleton instance holder = cls.fld_by_name(fname) if not holder: holder = Field(clazz=cls, mods=[C.mod.PR, C.mod.ST], typ=cname, name=fname) logging.debug("adding field {0}.{1} of type {0}".format(cname, fname)) cls.add_flds([holder]) # retriever mname = sample.find_getter(self._smpls, [cname], Fname) mtd_g = cls.mtd_by_sig(mname) if not mtd_g: mtd_g = Method(clazz=cls, mods=[C.mod.PB, C.mod.ST], typ=cname, name=mname) logging.debug("adding method {}.{}".format(cname, mname)) cls.add_mtds([mtd_g]) body = u""" if ({fname} == null) {{ {fname} = new {cname}(); }} return {fname}; """.format(**locals()) logging.debug("filling getter {}.{}".format(cname, mname)) mtd_g.body = to_statements(mtd_g, body) # to replace annotation @Singleton(Class) in expressions setattr(cls, "singleton", holder) setattr(holder, "getter", mtd_g) @v.when(Template) def visit(self, node): for cls in node.classes: if util.exists(Singleton.find_singleton(), cls.annos): self.rewrite(cls) @v.when(Clazz) def visit(self, node): pass @v.when(Field) def visit(self, node): pass @v.when(Method) def visit(self, node): pass @v.when(Statement) def visit(self, node): return [node] ## @Singleton(C) => C.getInstance() @v.when(Expression) def visit(self, node): if node.kind == C.E.ANNO: _anno = node.anno if _anno.name == C.A.SINGLE: logging.debug("reducing: {}".format(str(_anno))) cls_s = class_lookup(_anno.cid) mtd_g = cls_s.singleton.getter return to_expression(u"{}.{}()".format(cls_s.name, mtd_g.name)) return node
<filename>server/mysite/studygroups/views.py from django.shortcuts import render from django.http import JsonResponse from django.core import serializers from students.models import Student from classes.models import Course, CRN from studygroups.models import Studygroup import json def gen_bounds(): """ For use with the scheduling algorithm. Create time bounds that study groups are allowed to occur between. """ bounds = {} for i in range(1, 6): init_bounds = [] # Initial off-limits bounds will be 12AM-9AM and 9PM-12AM init_bounds.append((0000, 900)) init_bounds.append((2100, 2400)) bounds[str(i)] = init_bounds return bounds def delete_studygroups(request): """ Remove all study groups from Mongo. """ Studygroup.objects.all().delete() res = {'res': 'OK'} return JsonResponse(res, safe=False) def create_studygroups(request): """ The scheduling algorithm. Generates all valid study groups and stores them in Mongo. """ # Get all courses that all students are taken all_students = Student.objects.all() class_set = set() for student in all_students: if student.classes == "": continue classes = json.loads(student.classes) for crn in classes['crns']: match = CRN.objects.get(crn=crn) class_set.add(match.name) group_id = 0 # Now have a list of all classes we need study groups for for course_name in class_set: match = Course.objects.get(name=course_name) bounds = gen_bounds() sections = json.loads(match.sections) for section in sections: periods = section['periods'] for period in periods: day = str(period['day']) bound = (int(period['start']), int(period['end'])) bounds[day].append(bound) # Now have all bounds that we are not # allowed to schedule study groups in for day in range(1, 6): for time in range(0000, 2500, 100): valid = True for bound in bounds[str(day)]: if time >= bound[0] and time <= bound[1]: valid = False break # This time doesn't violate any off-limits bounds, # so make a session. if valid: participants = [] participants = json.dumps(participants) session = Studygroup( number=group_id, course_name=course_name, time=time, participants=participants, day=day) session.save() group_id = group_id + 1 res = {'res': 'OK'} return JsonResponse(res, safe=False) def create_studygroup(request): """ Create a single study group (for use with a professor creating his/her own studygroup) """ body = json.loads(request.body) course_name = body['course_name'] day = body['day'] time = body['time'] # Get index for next study group to be created max_num = int(Studygroup.objects.all().latest('number').number) number = max_num + 1 participants = [] new_group = Studygroup( number=number, course_name=course_name, time=time, participants=participants, day=day) new_group.save() res = {'res': 'OK'} return JsonResponse(res, safe=False) def get_studygroups(request): """ Retrieve all the study groups for a specific course """ body = json.loads(request.body) course_name = body['course_name'] res = Studygroup.objects.filter(course_name=course_name) res = serializers.serialize("json", res) return JsonResponse(res, safe=False) def join_studygroup(request): """ Place a student into the list of participants for a particular study group. """ body = json.loads(request.body) group_id = body['group_id'] token = body['token'] rcs = Student.objects.get(token=token).rcs group = Studygroup.objects.get(id=group_id) participants = json.loads(group.participants) participants.append(rcs) group.participants = json.dumps(participants) group.save() res = {'res': 'OK'} return JsonResponse(res, safe=False) def leave_studygroup(request): """ Remove a student from the list of participants of a study group. """ body = json.loads(request.body) group_id = body['id'] token = body['token'] rcs = Student.objects.get(token=token).rcs group = Studygroup.objects.get(id=group_id) participants = json.loads(group.participants) participants.remove(rcs) group.participants = json.dumps(participants) group.save() res = {'res': 'OK'} return JsonResponse(res, safe=False) def get_user_groups(request): """ Get the list of study groups that a user is a member of. """ body = json.loads(request.body) token = body['token'] rcs = Student.objects.get(token=token).rcs all_groups = Studygroup.objects.all() res = [] for group in all_groups: participants = json.loads(group.participants) if rcs in participants: res.append(serializers.serialize("json", [group])) res = {'groups': res} print(res) return JsonResponse(res, safe=False)
/** * The collection of all modifications done to a graph (grouped by the rules that did them, divided into two groups: Insertions, Deletions) * @author Jakub Daniel */ public class GraphModificationImpl implements GraphModification { private static final long serialVersionUID = 1L; private Map<DataNormalizationRule, RuleModification> modifications = new HashMap<DataNormalizationRule, RuleModification>(); private String graphName; @Override public void addInsertion (DataNormalizationRule rule, String s, String p, String o) { if (modifications.containsKey(rule)) { /** * Extend an existing modification done by a certain rule */ modifications.get(rule).addInsertion(s, p, o); } else { /** * Add new modification that corresponds to a certain rule */ RuleModification subModifications = new RuleModificationImpl(); subModifications.addInsertion(s, p, o); modifications.put(rule, subModifications); } } @Override public void addDeletion(DataNormalizationRule rule, String s, String p, String o) { if (modifications.containsKey(rule)) { /** * Extend an existing modification done by a certain rule */ modifications.get(rule).addDeletion(s, p, o); } else { /** * Add new modification that corresponds to a certain rule */ RuleModification subModifications = new RuleModificationImpl(); subModifications.addDeletion(s, p, o); modifications.put(rule, subModifications); } } /** * @return iterator over rules that have a record of modification (either insertion or deletion or both) in the structure */ @Override public Iterator<DataNormalizationRule> getRuleIterator() { return modifications.keySet().iterator(); } /** * @param rule The rule to find modifications for (usually obtained through getRuleIterator dereferencing (.next())) * @return modifications (insertions and deletions) done by the rule */ @Override public RuleModification getModificationsByRule(DataNormalizationRule rule) { return modifications.get(rule); } /** * @return name of the graph the modifications were applied to */ @Override public String getGraphName() { return graphName; } /** * @param graphName the name of the modified graph * @return bind this modification to a concrete graph */ @Override public void setGraphName(String graphName) { this.graphName = graphName; } }
package com.thinkaurelius.titan.graphdb.berkeleyje; import com.thinkaurelius.titan.BerkeleyJeStorageSetup; import com.thinkaurelius.titan.graphdb.SpeedComparisonPerformanceTest; import com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration; import org.apache.commons.configuration.Configuration; /** * (c) <NAME> (<EMAIL>) */ public class BerkeleyJESpeedComparisonPerformanceTest extends SpeedComparisonPerformanceTest { public BerkeleyJESpeedComparisonPerformanceTest() { super(getConfiguration()); } private static final Configuration getConfiguration() { Configuration config = BerkeleyJeStorageSetup.getBerkeleyJEGraphConfiguration(); config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(GraphDatabaseConfiguration.STORAGE_TRANSACTIONAL_KEY,false); return config; } }
<reponame>fakecoinbase/elastosslashElastos.ELA // Copyright (c) 2017-2020 The Elastos Foundation // Use of this source code is governed by an MIT // license that can be found in the LICENSE file. // package state import ( "bytes" "encoding/hex" "fmt" "io" "math" "sync" "github.com/elastos/Elastos.ELA/common" "github.com/elastos/Elastos.ELA/common/config" "github.com/elastos/Elastos.ELA/core/contract" "github.com/elastos/Elastos.ELA/core/types" "github.com/elastos/Elastos.ELA/core/types/outputpayload" "github.com/elastos/Elastos.ELA/core/types/payload" "github.com/elastos/Elastos.ELA/utils" ) // ProducerState represents the state of a producer. type ProducerState byte const ( // Pending indicates the producer is just registered and didn't get 6 // confirmations yet. Pending ProducerState = iota // Active indicates the producer is registered and confirmed by more than // 6 blocks. Active // Inactive indicates the producer has been inactivated for a period which shall // be punished and will be activated later. Inactive // Canceled indicates the producer was canceled. Canceled // Illegal indicates the producer was found to break the consensus. Illegal // Returned indicates the producer has canceled and deposit returned. Returned ) // CacheVotesSize indicate the size to cache votes information. const CacheVotesSize = 6 // producerStateStrings is a array of producer states back to their constant // names for pretty printing. var producerStateStrings = []string{"Pending", "Active", "Inactive", "Canceled", "Illegal", "Returned"} func (ps ProducerState) String() string { if int(ps) < len(producerStateStrings) { return producerStateStrings[ps] } return fmt.Sprintf("ProducerState-%d", ps) } // Producer holds a producer's info. It provides read only methods to access // producer's info. type Producer struct { info payload.ProducerInfo state ProducerState registerHeight uint32 cancelHeight uint32 inactiveCountingHeight uint32 inactiveSince uint32 activateRequestHeight uint32 illegalHeight uint32 penalty common.Fixed64 votes common.Fixed64 depositAmount common.Fixed64 depositHash common.Uint168 } // Info returns a copy of the origin registered producer info. func (p *Producer) Info() payload.ProducerInfo { return p.info } // State returns the producer's state, can be pending, active or canceled. func (p *Producer) State() ProducerState { return p.state } // RegisterHeight returns the height when the producer was registered. func (p *Producer) RegisterHeight() uint32 { return p.registerHeight } // CancelHeight returns the height when the producer was canceled. func (p *Producer) CancelHeight() uint32 { return p.cancelHeight } // Votes returns the votes of the producer. func (p *Producer) Votes() common.Fixed64 { return p.votes } func (p *Producer) NodePublicKey() []byte { return p.info.NodePublicKey } func (p *Producer) OwnerPublicKey() []byte { return p.info.OwnerPublicKey } func (p *Producer) Penalty() common.Fixed64 { return p.penalty } func (p *Producer) InactiveSince() uint32 { return p.inactiveSince } func (p *Producer) IllegalHeight() uint32 { return p.illegalHeight } func (p *Producer) ActivateRequestHeight() uint32 { return p.activateRequestHeight } func (p *Producer) DepositAmount() common.Fixed64 { return p.depositAmount } func (p *Producer) Serialize(w io.Writer) error { if err := p.info.Serialize(w, payload.ProducerInfoVersion); err != nil { return err } if err := common.WriteUint8(w, uint8(p.state)); err != nil { return err } if err := common.WriteUint32(w, p.registerHeight); err != nil { return err } if err := common.WriteUint32(w, p.cancelHeight); err != nil { return err } if err := common.WriteUint32(w, p.inactiveCountingHeight); err != nil { return err } if err := common.WriteUint32(w, p.inactiveSince); err != nil { return err } if err := common.WriteUint32(w, p.activateRequestHeight); err != nil { return err } if err := common.WriteUint32(w, p.illegalHeight); err != nil { return err } if err := common.WriteUint64(w, uint64(p.penalty)); err != nil { return err } if err := common.WriteUint64(w, uint64(p.votes)); err != nil { return err } return p.depositHash.Serialize(w) } func (p *Producer) Deserialize(r io.Reader) (err error) { if err = p.info.Deserialize(r, payload.ProducerInfoVersion); err != nil { return } var state uint8 if state, err = common.ReadUint8(r); err != nil { return } p.state = ProducerState(state) if p.registerHeight, err = common.ReadUint32(r); err != nil { return } if p.cancelHeight, err = common.ReadUint32(r); err != nil { return } if p.inactiveCountingHeight, err = common.ReadUint32(r); err != nil { return } if p.inactiveSince, err = common.ReadUint32(r); err != nil { return } if p.activateRequestHeight, err = common.ReadUint32(r); err != nil { return } if p.illegalHeight, err = common.ReadUint32(r); err != nil { return } var penalty uint64 if penalty, err = common.ReadUint64(r); err != nil { return } p.penalty = common.Fixed64(penalty) var votes uint64 if votes, err = common.ReadUint64(r); err != nil { return } p.votes = common.Fixed64(votes) return p.depositHash.Deserialize(r) } const ( // maxHistoryCapacity indicates the maximum capacity of change history. maxHistoryCapacity = 10 // ActivateDuration is about how long we should activate from pending or // inactive state ActivateDuration = 6 ) // State is a memory database storing DPOS producers state, like pending // producers active producers and their votes. type State struct { *StateKeyFrame // getArbiters defines methods about get current arbiters getArbiters func() [][]byte getProducerDepositAmount func(programHash common.Uint168) ( common.Fixed64, error) getTxReference func(tx *types.Transaction) ( map[*types.Input]types.Output, error) chainParams *config.Params mtx sync.RWMutex history *utils.History } // getProducerKey returns the producer's owner public key string, whether the // given public key is the producer's node public key or owner public key. func (s *State) getProducerKey(publicKey []byte) string { key := hex.EncodeToString(publicKey) // If the given public key is node public key, get the producer's owner // public key. if owner, ok := s.NodeOwnerKeys[key]; ok { return owner } return key } // getProducer returns a producer with the producer's node public key or it's // owner public key, if no matches return nil. func (s *State) getProducer(publicKey []byte) *Producer { key := s.getProducerKey(publicKey) return s.getProducerByOwnerPublicKey(key) } // getProducer returns a producer with the producer's owner public key, // if no matches return nil. func (s *State) getProducerByOwnerPublicKey(key string) *Producer { if producer, ok := s.ActivityProducers[key]; ok { return producer } if producer, ok := s.CanceledProducers[key]; ok { return producer } if producer, ok := s.IllegalProducers[key]; ok { return producer } if producer, ok := s.PendingProducers[key]; ok { return producer } if producer, ok := s.InactiveProducers[key]; ok { return producer } return nil } // updateProducerInfo updates the producer's info with value compare, any change // will be updated. func (s *State) updateProducerInfo(origin *payload.ProducerInfo, update *payload.ProducerInfo) { producer := s.getProducer(origin.OwnerPublicKey) // compare and update node nickname. if origin.NickName != update.NickName { delete(s.Nicknames, origin.NickName) s.Nicknames[update.NickName] = struct{}{} } // compare and update node public key, we only query pending and active node // because canceled and illegal node can not be updated. if !bytes.Equal(origin.NodePublicKey, update.NodePublicKey) { oldKey := hex.EncodeToString(origin.NodePublicKey) newKey := hex.EncodeToString(update.NodePublicKey) delete(s.NodeOwnerKeys, oldKey) s.NodeOwnerKeys[newKey] = hex.EncodeToString(origin.OwnerPublicKey) } producer.info = *update } func (s *State) ExistProducerByDepositHash(programHash common.Uint168) bool { s.mtx.RLock() defer s.mtx.RUnlock() _, ok := s.ProducerDepositMap[programHash] return ok } // GetProducer returns a producer with the producer's node public key or it's // owner public key including canceled and illegal producers. If no matches // return nil. func (s *State) GetProducer(publicKey []byte) *Producer { s.mtx.RLock() producer := s.getProducer(publicKey) s.mtx.RUnlock() return producer } // GetProducers returns all producers including pending and active producers (no // canceled and illegal producers). func (s *State) GetProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.PendingProducers)+ len(s.ActivityProducers)) for _, producer := range s.PendingProducers { producers = append(producers, producer) } for _, producer := range s.ActivityProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // GetAllProducers returns all producers including pending, active, canceled, illegal and inactive producers. func (s *State) GetAllProducers() []*Producer { s.mtx.RLock() defer s.mtx.RUnlock() return s.getAllProducers() } func (s *State) getAllProducers() []*Producer { producers := make([]*Producer, 0, len(s.PendingProducers)+ len(s.ActivityProducers)) for _, producer := range s.PendingProducers { producers = append(producers, producer) } for _, producer := range s.ActivityProducers { producers = append(producers, producer) } for _, producer := range s.InactiveProducers { producers = append(producers, producer) } for _, producer := range s.CanceledProducers { producers = append(producers, producer) } for _, producer := range s.IllegalProducers { producers = append(producers, producer) } return producers } // GetPendingProducers returns all producers that in pending state. func (s *State) GetPendingProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.PendingProducers)) for _, producer := range s.PendingProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // GetActiveProducers returns all producers that in active state. func (s *State) GetActiveProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.ActivityProducers)) for _, producer := range s.ActivityProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // GetVotedProducers returns all producers that in active state with votes. func (s *State) GetVotedProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.ActivityProducers)) for _, producer := range s.ActivityProducers { // limit arbiters can only be producers who have votes if producer.Votes() > 0 { producers = append(producers, producer) } } s.mtx.RUnlock() return producers } // GetCanceledProducers returns all producers that in cancel state. func (s *State) GetCanceledProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.CanceledProducers)) for _, producer := range s.CanceledProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // GetPendingCanceledProducers returns all producers that in pending canceled state. func (s *State) GetPendingCanceledProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.PendingCanceledProducers)) for _, producer := range s.PendingCanceledProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // GetReturnedDepositProducers returns producers that in returned deposit state. func (s *State) GetReturnedDepositProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.CanceledProducers)) for _, producer := range s.CanceledProducers { if producer.state == Returned { producers = append(producers, producer) } } s.mtx.RUnlock() return producers } // GetIllegalProducers returns all illegal producers. func (s *State) GetIllegalProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.IllegalProducers)) for _, producer := range s.IllegalProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // GetIllegalProducers returns all inactive producers. func (s *State) GetInactiveProducers() []*Producer { s.mtx.RLock() producers := make([]*Producer, 0, len(s.InactiveProducers)) for _, producer := range s.InactiveProducers { producers = append(producers, producer) } s.mtx.RUnlock() return producers } // IsPendingProducer returns if a producer is in pending list according to the // public key. func (s *State) IsPendingProducer(publicKey []byte) bool { s.mtx.RLock() _, ok := s.PendingProducers[s.getProducerKey(publicKey)] s.mtx.RUnlock() return ok } // IsActiveProducer returns if a producer is in activate list according to the // public key. func (s *State) IsActiveProducer(publicKey []byte) bool { s.mtx.RLock() _, ok := s.ActivityProducers[s.getProducerKey(publicKey)] s.mtx.RUnlock() return ok } // IsInactiveProducer returns if a producer is in inactivate list according to // the public key. func (s *State) IsInactiveProducer(publicKey []byte) bool { s.mtx.RLock() ok := s.isInactiveProducer(publicKey) s.mtx.RUnlock() return ok } func (s *State) isInactiveProducer(publicKey []byte) bool { _, ok := s.InactiveProducers[s.getProducerKey(publicKey)] return ok } // IsCanceledProducer returns if a producer is in canceled list according to the // public key. func (s *State) IsCanceledProducer(publicKey []byte) bool { s.mtx.RLock() _, ok := s.CanceledProducers[s.getProducerKey(publicKey)] s.mtx.RUnlock() return ok } // IsIllegalProducer returns if a producer is in illegal list according to the // public key. func (s *State) IsIllegalProducer(publicKey []byte) bool { s.mtx.RLock() _, ok := s.IllegalProducers[s.getProducerKey(publicKey)] s.mtx.RUnlock() return ok } // IsAbleToRecoverFromInactiveMode returns if most of the emergency arbiters have activated // and able to work again func (s *State) IsAbleToRecoverFromInactiveMode() bool { activatedNum := 0 s.mtx.RLock() totalNum := len(s.EmergencyInactiveArbiters) for k := range s.EmergencyInactiveArbiters { if _, ok := s.InactiveProducers[k]; !ok { activatedNum++ } } s.mtx.RUnlock() return totalNum == 0 || float64(activatedNum)/float64(totalNum) > MajoritySignRatioNumerator/MajoritySignRatioDenominator } // IsAbleToRecoverFromInactiveMode returns if there are enough active arbiters func (s *State) IsAbleToRecoverFromUnderstaffedState() bool { s.mtx.RLock() result := len(s.ActivityProducers) >= s.chainParams.GeneralArbiters s.mtx.RUnlock() return result } // LeaveEmergency will reset EmergencyInactiveArbiters variable func (s *State) LeaveEmergency(history *utils.History, height uint32) { s.mtx.Lock() oriArbiters := s.EmergencyInactiveArbiters history.Append(height, func() { s.EmergencyInactiveArbiters = map[string]struct{}{} }, func() { s.EmergencyInactiveArbiters = oriArbiters }) s.mtx.Unlock() } // NicknameExists returns if a nickname is exists. func (s *State) NicknameExists(nickname string) bool { s.mtx.RLock() _, ok := s.Nicknames[nickname] s.mtx.RUnlock() return ok } // ProducerExists returns if a producer is exists by it's node public key or // owner public key. func (s *State) ProducerExists(publicKey []byte) bool { s.mtx.RLock() producer := s.getProducer(publicKey) s.mtx.RUnlock() return producer != nil } // ProducerExists returns if a producer is exists by it's owner public key. func (s *State) ProducerOwnerPublicKeyExists(publicKey []byte) bool { s.mtx.RLock() key := hex.EncodeToString(publicKey) producer := s.getProducerByOwnerPublicKey(key) s.mtx.RUnlock() return producer != nil } // ProducerExists returns if a producer is exists by it's node public key. func (s *State) ProducerNodePublicKeyExists(publicKey []byte) bool { s.mtx.RLock() key := hex.EncodeToString(publicKey) _, ok := s.NodeOwnerKeys[key] s.mtx.RUnlock() return ok } // SpecialTxExists returns if a special tx (typically means illegal and // inactive tx) is exists by it's hash func (s *State) SpecialTxExists(tx *types.Transaction) bool { illegalData, ok := tx.Payload.(payload.DPOSIllegalData) if !ok { log.Error("special tx payload cast failed, tx:", tx.Hash()) return false } hash := illegalData.Hash() s.mtx.RLock() _, ok = s.SpecialTxHashes[hash] s.mtx.RUnlock() return ok } // IsDPOSTransaction returns if a transaction will change the producers and // votes state. func (s *State) IsDPOSTransaction(tx *types.Transaction) bool { switch tx.TxType { // Transactions will changes the producers state. case types.RegisterProducer, types.UpdateProducer, types.CancelProducer, types.ActivateProducer, types.IllegalProposalEvidence, types.IllegalVoteEvidence, types.IllegalBlockEvidence, types.IllegalSidechainEvidence, types.InactiveArbitrators, types.ReturnDepositCoin: return true // Transactions will change the producer votes state. case types.TransferAsset: if tx.Version >= types.TxVersion09 { // Votes to producers. for _, output := range tx.Outputs { if output.Type != types.OTVote { continue } p, _ := output.Payload.(*outputpayload.VoteOutput) if p.Version == outputpayload.VoteProducerVersion { return true } else { for _, content := range p.Contents { if content.VoteType == outputpayload.Delegate { return true } } } } } } s.mtx.RLock() defer s.mtx.RUnlock() // Cancel votes. for _, input := range tx.Inputs { _, ok := s.Votes[input.ReferKey()] if ok { return true } } return false } // ProcessBlock takes a block and it's confirm to update producers state and // votes accordingly. func (s *State) ProcessBlock(block *types.Block, confirm *payload.Confirm) { s.mtx.Lock() defer s.mtx.Unlock() s.tryInitProducerAssetAmounts(block.Height) s.processTransactions(block.Transactions, block.Height) s.ProcessVoteStatisticsBlock(block) if confirm != nil { s.countArbitratorsInactivity(block.Height, confirm) } // Commit changes here if no errors found. s.history.Commit(block.Height) } // ProcessVoteStatisticsBlock deal with block with vote statistics error. func (s *State) ProcessVoteStatisticsBlock(block *types.Block) { if block.Height == s.chainParams.VoteStatisticsHeight { s.processTransactions(block.Transactions, block.Height) } } // processTransactions takes the transactions and the height when they have been // packed into a block. Then loop through the transactions to update producers // state and votes according to transactions content. func (s *State) processTransactions(txs []*types.Transaction, height uint32) { for _, tx := range txs { s.processTransaction(tx, height) } // Check if any pending producers has got 6 confirms, set them to activate. activateProducerFromPending := func(key string, producer *Producer) { s.history.Append(height, func() { producer.state = Active s.ActivityProducers[key] = producer delete(s.PendingProducers, key) }, func() { producer.state = Pending s.PendingProducers[key] = producer delete(s.ActivityProducers, key) }) } // Check if any pending inactive producers has got 6 confirms, // then set them to activate. activateProducerFromInactive := func(key string, producer *Producer) { s.history.Append(height, func() { producer.state = Active s.ActivityProducers[key] = producer delete(s.InactiveProducers, key) }, func() { producer.state = Inactive s.InactiveProducers[key] = producer delete(s.ActivityProducers, key) }) } // Check if any pending illegal producers has got 6 confirms, // then set them to activate. activateProducerFromIllegal := func(key string, producer *Producer) { s.history.Append(height, func() { producer.state = Active s.ActivityProducers[key] = producer delete(s.IllegalProducers, key) }, func() { producer.state = Illegal s.IllegalProducers[key] = producer delete(s.ActivityProducers, key) }) } if len(s.PendingProducers) > 0 { for key, producer := range s.PendingProducers { if height-producer.registerHeight+1 >= ActivateDuration { activateProducerFromPending(key, producer) } } } if len(s.InactiveProducers) > 0 { for key, producer := range s.InactiveProducers { if height > producer.activateRequestHeight && height-producer.activateRequestHeight+1 >= ActivateDuration { activateProducerFromInactive(key, producer) } } } if height >= s.chainParams.EnableActivateIllegalHeight && len(s.IllegalProducers) > 0 { for key, producer := range s.IllegalProducers { if height > producer.activateRequestHeight && height-producer.activateRequestHeight+1 >= ActivateDuration { activateProducerFromIllegal(key, producer) } } } } // processTransaction take a transaction and the height it has been packed into // a block, then update producers state and votes according to the transaction // content. func (s *State) processTransaction(tx *types.Transaction, height uint32) { switch tx.TxType { case types.RegisterProducer: s.registerProducer(tx, height) case types.UpdateProducer: s.updateProducer(tx.Payload.(*payload.ProducerInfo), height) case types.CancelProducer: s.cancelProducer(tx.Payload.(*payload.ProcessProducer), height) case types.ActivateProducer: s.activateProducer(tx.Payload.(*payload.ActivateProducer), height) case types.TransferAsset: s.processVotes(tx, height) s.processDeposit(tx, height) case types.IllegalProposalEvidence, types.IllegalVoteEvidence, types.IllegalBlockEvidence, types.IllegalSidechainEvidence: s.processIllegalEvidence(tx.Payload, height) s.recordSpecialTx(tx, height) case types.InactiveArbitrators: s.processEmergencyInactiveArbitrators( tx.Payload.(*payload.InactiveArbitrators), height) s.recordSpecialTx(tx, height) case types.ReturnDepositCoin: s.returnDeposit(tx, height) s.processDeposit(tx, height) case types.UpdateVersion: s.updateVersion(tx, height) } s.processCancelVotes(tx, height) } // registerProducer handles the register producer transaction. func (s *State) registerProducer(tx *types.Transaction, height uint32) { info := tx.Payload.(*payload.ProducerInfo) nickname := info.NickName nodeKey := hex.EncodeToString(info.NodePublicKey) ownerKey := hex.EncodeToString(info.OwnerPublicKey) // ignore error here because this converting process has been ensured in // the context check already programHash, _ := contract.PublicKeyToDepositProgramHash(info. OwnerPublicKey) amount := common.Fixed64(0) depositOutputs := make(map[string]common.Fixed64) for i, output := range tx.Outputs { if output.ProgramHash.IsEqual(*programHash) { amount += output.Value op := types.NewOutPoint(tx.Hash(), uint16(i)) depositOutputs[op.ReferKey()] = output.Value } } producer := Producer{ info: *info, registerHeight: height, votes: 0, inactiveSince: 0, inactiveCountingHeight: 0, penalty: common.Fixed64(0), activateRequestHeight: math.MaxUint32, depositAmount: amount, depositHash: *programHash, } s.history.Append(height, func() { s.Nicknames[nickname] = struct{}{} s.NodeOwnerKeys[nodeKey] = ownerKey s.PendingProducers[ownerKey] = &producer s.ProducerDepositMap[*programHash] = struct{}{} for k, v := range depositOutputs { s.DepositOutputs[k] = v } }, func() { delete(s.Nicknames, nickname) delete(s.NodeOwnerKeys, nodeKey) delete(s.PendingProducers, ownerKey) delete(s.ProducerDepositMap, *programHash) for k := range depositOutputs { delete(s.DepositOutputs, k) } }) } // updateProducer handles the update producer transaction. func (s *State) updateProducer(info *payload.ProducerInfo, height uint32) { producer := s.getProducer(info.OwnerPublicKey) producerInfo := producer.info s.history.Append(height, func() { s.updateProducerInfo(&producerInfo, info) }, func() { s.updateProducerInfo(info, &producerInfo) }) } // cancelProducer handles the cancel producer transaction. func (s *State) cancelProducer(payload *payload.ProcessProducer, height uint32) { key := hex.EncodeToString(payload.OwnerPublicKey) producer := s.getProducer(payload.OwnerPublicKey) isPending := producer.state == Pending s.history.Append(height, func() { producer.state = Canceled producer.cancelHeight = height s.CanceledProducers[key] = producer if isPending { delete(s.PendingProducers, key) s.PendingCanceledProducers[key] = producer } else { delete(s.ActivityProducers, key) } delete(s.Nicknames, producer.info.NickName) }, func() { producer.cancelHeight = 0 delete(s.CanceledProducers, key) if isPending { producer.state = Pending s.PendingProducers[key] = producer delete(s.PendingCanceledProducers, key) } else { producer.state = Active s.ActivityProducers[key] = producer } s.Nicknames[producer.info.NickName] = struct{}{} }) } // activateProducer handles the activate producer transaction. func (s *State) activateProducer(p *payload.ActivateProducer, height uint32) { producer := s.getProducer(p.NodePublicKey) if producer == nil { log.Error("can't find producer to activate") return } s.history.Append(height, func() { producer.activateRequestHeight = height }, func() { producer.activateRequestHeight = math.MaxUint32 }) } // processVotes takes a transaction, if the transaction including any vote // inputs or outputs, validate and update producers votes. func (s *State) processVotes(tx *types.Transaction, height uint32) { if tx.Version >= types.TxVersion09 { // Votes to producers. for i, output := range tx.Outputs { if output.Type != types.OTVote { continue } p, _ := output.Payload.(*outputpayload.VoteOutput) if p.Version == outputpayload.VoteProducerVersion { op := types.NewOutPoint(tx.Hash(), uint16(i)) s.history.Append(height, func() { s.Votes[op.ReferKey()] = struct{}{} }, func() { delete(s.Votes, op.ReferKey()) }) s.processVoteOutput(output, height) } else { var exist bool for _, content := range p.Contents { if content.VoteType == outputpayload.Delegate { exist = true break } } if exist { op := types.NewOutPoint(tx.Hash(), uint16(i)) s.history.Append(height, func() { s.Votes[op.ReferKey()] = struct{}{} }, func() { delete(s.Votes, op.ReferKey()) }) s.processVoteOutput(output, height) } } } } } // tryInitProducerAssetAmounts will initialize deposit amount of all // producers after CR voting start height. func (s *State) tryInitProducerAssetAmounts(blockHeight uint32) { if blockHeight != s.chainParams.CRVotingStartHeight { return } setAmount := func(producer *Producer, amount common.Fixed64) { s.history.Append(blockHeight, func() { producer.depositAmount = amount }, func() { producer.depositAmount = common.Fixed64(0) }) } producers := s.getAllProducers() for _, v := range producers { programHash, err := contract.PublicKeyToDepositProgramHash( v.info.OwnerPublicKey) if err != nil { log.Warn(err) continue } amount, err := s.getProducerDepositAmount(*programHash) if err != nil { log.Warn(err) continue } producer := v setAmount(producer, amount) } } // processDeposit takes a transaction output with deposit program hash. func (s *State) processDeposit(tx *types.Transaction, height uint32) { for i, output := range tx.Outputs { if contract.GetPrefixType(output.ProgramHash) == contract.PrefixDeposit { if s.addProducerAssert(output, height) { op := types.NewOutPoint(tx.Hash(), uint16(i)) s.DepositOutputs[op.ReferKey()] = output.Value } } } } // getProducerByDepositHash will try to get producer with specified program // hash, note the producer state should be pending active or inactive. func (s *State) getProducerByDepositHash(hash common.Uint168) *Producer { for _, producer := range s.PendingProducers { if producer.depositHash.IsEqual(hash) { return producer } } for _, producer := range s.ActivityProducers { if producer.depositHash.IsEqual(hash) { return producer } } for _, producer := range s.InactiveProducers { if producer.depositHash.IsEqual(hash) { return producer } } for _, producer := range s.CanceledProducers { if producer.depositHash.IsEqual(hash) { return producer } } for _, producer := range s.IllegalProducers { if producer.depositHash.IsEqual(hash) { return producer } } return nil } // addProducerAssert will plus deposit amount for producers referenced in // program hash of transaction output. func (s *State) addProducerAssert(output *types.Output, height uint32) bool { if producer := s.getProducerByDepositHash(output.ProgramHash); producer != nil { s.history.Append(height, func() { producer.depositAmount += output.Value }, func() { producer.depositAmount -= output.Value }) return true } return false } // processCancelVotes takes a transaction output with vote payload. func (s *State) processCancelVotes(tx *types.Transaction, height uint32) { var exist bool for _, input := range tx.Inputs { referKey := input.ReferKey() if _, ok := s.Votes[referKey]; ok { exist = true } } if !exist { return } references, err := s.getTxReference(tx) if err != nil { log.Errorf("get tx reference failed, tx hash:%s", tx.Hash()) return } for _, input := range tx.Inputs { referKey := input.ReferKey() _, ok := s.Votes[referKey] if ok { out := references[input] s.processVoteCancel(&out, height) } } } // processVoteOutput takes a transaction output with vote payload. func (s *State) processVoteOutput(output *types.Output, height uint32) { countByGross := func(producer *Producer) { s.history.Append(height, func() { producer.votes += output.Value }, func() { producer.votes -= output.Value }) } countByVote := func(producer *Producer, vote common.Fixed64) { s.history.Append(height, func() { producer.votes += vote }, func() { producer.votes -= vote }) } p := output.Payload.(*outputpayload.VoteOutput) for _, vote := range p.Contents { for _, cv := range vote.CandidateVotes { producer := s.getProducer(cv.Candidate) if producer == nil { continue } switch vote.VoteType { case outputpayload.Delegate: if p.Version == outputpayload.VoteProducerVersion { countByGross(producer) } else { v := cv.Votes countByVote(producer, v) } } } } } // processVoteCancel takes a previous vote output and decrease producers votes. func (s *State) processVoteCancel(output *types.Output, height uint32) { subtractByGross := func(producer *Producer) { s.history.Append(height, func() { producer.votes -= output.Value }, func() { producer.votes += output.Value }) } subtractByVote := func(producer *Producer, vote common.Fixed64) { s.history.Append(height, func() { producer.votes -= vote }, func() { producer.votes += vote }) } p := output.Payload.(*outputpayload.VoteOutput) for _, vote := range p.Contents { for _, cv := range vote.CandidateVotes { producer := s.getProducer(cv.Candidate) if producer == nil { continue } switch vote.VoteType { case outputpayload.Delegate: if p.Version == outputpayload.VoteProducerVersion { subtractByGross(producer) } else { v := cv.Votes subtractByVote(producer, v) } } } } } // returnDeposit change producer state to ReturnedDeposit func (s *State) returnDeposit(tx *types.Transaction, height uint32) { var inputValue common.Fixed64 for _, input := range tx.Inputs { inputValue += s.DepositOutputs[input.ReferKey()] } returnAction := func(producer *Producer) { s.history.Append(height, func() { if height >= s.chainParams.CRVotingStartHeight { producer.depositAmount -= inputValue } producer.state = Returned }, func() { if height >= s.chainParams.CRVotingStartHeight { producer.depositAmount += inputValue } producer.state = Canceled }) } for _, program := range tx.Programs { pk := program.Code[1 : len(program.Code)-1] if producer := s.getProducer(pk); producer != nil && producer.state == Canceled { returnAction(producer) } } } // updateVersion record the update period during that inactive arbitrators // will not need to pay the penalty func (s *State) updateVersion(tx *types.Transaction, height uint32) { p, ok := tx.Payload.(*payload.UpdateVersion) if !ok { log.Error("tx payload cast failed, tx:", tx.Hash()) return } oriVersionStartHeight := s.VersionStartHeight oriVersionEndHeight := s.VersionEndHeight s.history.Append(height, func() { s.VersionStartHeight = p.StartHeight s.VersionEndHeight = p.EndHeight }, func() { s.VersionStartHeight = oriVersionStartHeight s.VersionEndHeight = oriVersionEndHeight }) } // processEmergencyInactiveArbitrators change producer state according to // emergency inactive arbitrators func (s *State) processEmergencyInactiveArbitrators( inactivePayload *payload.InactiveArbitrators, height uint32) { addEmergencyInactiveArbitrator := func(key string, producer *Producer) { s.history.Append(height, func() { s.setInactiveProducer(producer, key, height, true) s.EmergencyInactiveArbiters[key] = struct{}{} }, func() { s.revertSettingInactiveProducer(producer, key, height, true) delete(s.EmergencyInactiveArbiters, key) }) } for _, v := range inactivePayload.Arbitrators { nodeKey := hex.EncodeToString(v) key, ok := s.NodeOwnerKeys[nodeKey] if !ok { continue } if p, ok := s.ActivityProducers[key]; ok { addEmergencyInactiveArbitrator(key, p) } if p, ok := s.InactiveProducers[key]; ok { addEmergencyInactiveArbitrator(key, p) } } } // recordSpecialTx record hash of a special tx func (s *State) recordSpecialTx(tx *types.Transaction, height uint32) { illegalData, ok := tx.Payload.(payload.DPOSIllegalData) if !ok { log.Error("special tx payload cast failed, tx:", tx.Hash()) return } hash := illegalData.Hash() s.history.Append(height, func() { s.SpecialTxHashes[hash] = struct{}{} }, func() { delete(s.SpecialTxHashes, hash) }) } // processIllegalEvidence takes the illegal evidence payload and change producer // state according to the evidence. func (s *State) processIllegalEvidence(payloadData types.Payload, height uint32) { // Get illegal producers from evidence. var illegalProducers [][]byte switch p := payloadData.(type) { case *payload.DPOSIllegalProposals: illegalProducers = [][]byte{p.Evidence.Proposal.Sponsor} case *payload.DPOSIllegalVotes: illegalProducers = [][]byte{p.Evidence.Vote.Signer} case *payload.DPOSIllegalBlocks: signers := make(map[string]interface{}) for _, pk := range p.Evidence.Signers { signers[hex.EncodeToString(pk)] = nil } for _, pk := range p.CompareEvidence.Signers { key := hex.EncodeToString(pk) if _, ok := signers[key]; ok { illegalProducers = append(illegalProducers, pk) } } case *payload.SidechainIllegalData: illegalProducers = [][]byte{p.IllegalSigner} default: return } // Set illegal producers to FoundBad state for _, pk := range illegalProducers { key, ok := s.NodeOwnerKeys[hex.EncodeToString(pk)] if !ok { continue } if producer, ok := s.ActivityProducers[key]; ok { s.history.Append(height, func() { producer.state = Illegal producer.illegalHeight = height s.IllegalProducers[key] = producer producer.activateRequestHeight = math.MaxUint32 delete(s.ActivityProducers, key) delete(s.Nicknames, producer.info.NickName) }, func() { producer.state = Active producer.illegalHeight = 0 s.ActivityProducers[key] = producer producer.activateRequestHeight = math.MaxUint32 delete(s.IllegalProducers, key) s.Nicknames[producer.info.NickName] = struct{}{} }) continue } if producer, ok := s.CanceledProducers[key]; ok { s.history.Append(height, func() { producer.state = Illegal producer.illegalHeight = height s.IllegalProducers[key] = producer delete(s.CanceledProducers, key) delete(s.Nicknames, producer.info.NickName) }, func() { producer.state = Canceled producer.illegalHeight = 0 s.CanceledProducers[key] = producer delete(s.IllegalProducers, key) s.Nicknames[producer.info.NickName] = struct{}{} }) continue } } } // ProcessIllegalBlockEvidence takes a illegal block payload and change the // producers state immediately. This is a spacial case that can be handled // before it packed into a block. func (s *State) ProcessSpecialTxPayload(p types.Payload, height uint32) { s.mtx.Lock() defer s.mtx.Unlock() if inactivePayload, ok := p.(*payload.InactiveArbitrators); ok { s.processEmergencyInactiveArbitrators(inactivePayload, 0) } else { s.processIllegalEvidence(p, 0) } // Commit changes here if no errors found. s.history.Commit(height) } // setInactiveProducer set active producer to inactive state func (s *State) setInactiveProducer(producer *Producer, key string, height uint32, emergency bool) { producer.inactiveSince = height producer.activateRequestHeight = math.MaxUint32 producer.state = Inactive s.InactiveProducers[key] = producer delete(s.ActivityProducers, key) if height < s.VersionStartHeight || height >= s.VersionEndHeight { if !emergency { producer.penalty += s.chainParams.InactivePenalty } else { producer.penalty += s.chainParams.EmergencyInactivePenalty } } } // revertSettingInactiveProducer revert operation about setInactiveProducer func (s *State) revertSettingInactiveProducer(producer *Producer, key string, height uint32, emergency bool) { producer.inactiveSince = 0 producer.activateRequestHeight = math.MaxUint32 producer.state = Active s.ActivityProducers[key] = producer delete(s.InactiveProducers, key) if height < s.VersionStartHeight || height >= s.VersionEndHeight { penalty := s.chainParams.InactivePenalty if emergency { penalty = s.chainParams.EmergencyInactivePenalty } if producer.penalty < penalty { producer.penalty = common.Fixed64(0) } else { producer.penalty -= penalty } } } // countArbitratorsInactivity count arbitrators inactive rounds, and change to // inactive if more than "MaxInactiveRounds" func (s *State) countArbitratorsInactivity(height uint32, confirm *payload.Confirm) { // check inactive arbitrators after producers has participated in if height < s.chainParams.PublicDPOSHeight { return } // changingArbiters indicates the arbiters that should reset inactive // counting state. With the value of true means the producer is on duty or // is not current arbiter any more, or just becoming current arbiter; and // false means producer is arbiter in both heights and not on duty. changingArbiters := make(map[string]bool) for k := range s.PreBlockArbiters { changingArbiters[k] = true } s.PreBlockArbiters = make(map[string]struct{}) for _, a := range s.getArbiters() { key := s.getProducerKey(a) s.PreBlockArbiters[key] = struct{}{} if _, exist := changingArbiters[key]; exist { changingArbiters[key] = false } } changingArbiters[s.getProducerKey(confirm.Proposal.Sponsor)] = true // CRC producers are not in the ActivityProducers, // so they will not be inactive for k, v := range changingArbiters { key := k // avoiding pass iterator to closure producer, ok := s.ActivityProducers[key] if !ok { continue } countingHeight := producer.inactiveCountingHeight needReset := v // avoiding pass iterator to closure s.history.Append(height, func() { s.tryUpdateInactivity(key, producer, needReset, height) }, func() { s.tryRevertInactivity(key, producer, needReset, height, countingHeight) }) } } func (s *State) tryRevertInactivity(key string, producer *Producer, needReset bool, height, startHeight uint32) { if needReset { producer.inactiveCountingHeight = startHeight return } if producer.inactiveCountingHeight == height { producer.inactiveCountingHeight = 0 } if producer.state == Inactive { s.revertSettingInactiveProducer(producer, key, height, false) producer.inactiveCountingHeight = startHeight } } func (s *State) tryUpdateInactivity(key string, producer *Producer, needReset bool, height uint32) { if needReset { producer.inactiveCountingHeight = 0 return } if producer.inactiveCountingHeight == 0 { producer.inactiveCountingHeight = height } if height-producer.inactiveCountingHeight >= s.chainParams.MaxInactiveRounds { s.setInactiveProducer(producer, key, height, false) producer.inactiveCountingHeight = 0 } } // RollbackTo restores the database state to the given height, if no enough // history to rollback to return error. func (s *State) RollbackTo(height uint32) error { s.mtx.Lock() defer s.mtx.Unlock() return s.history.RollbackTo(height) } // GetHistory returns a history state instance storing the producers and votes // on the historical height. func (s *State) GetHistory(height uint32) (*StateKeyFrame, error) { s.mtx.RLock() defer s.mtx.RUnlock() // Seek to state to target height. if err := s.history.SeekTo(height); err != nil { return nil, err } // Take a snapshot of the history. return s.snapshot(), nil } // NewState returns a new State instance. func NewState(chainParams *config.Params, getArbiters func() [][]byte, getProducerDepositAmount func(common.Uint168) (common.Fixed64, error)) *State { return &State{ chainParams: chainParams, getArbiters: getArbiters, getProducerDepositAmount: getProducerDepositAmount, history: utils.NewHistory(maxHistoryCapacity), StateKeyFrame: NewStateKeyFrame(), } }
/// Returns a new bounding box with a margin pub fn margin(&self, margin: f32) -> BoundingBox { BoundingBox { origin: self.origin, extents: self.extents + Vec3::new(0.5, 0.5, 0.5) * margin, } }
Media playback is unsupported on your device Media caption There were chaotic scenes at the al-Shifa hospital in Gaza City, as Orla Guerin reports Israel's prime minister has vowed the campaign in Gaza will continue "until quiet and security are returned to the citizens of Israel". Benjamin Netanyahu issued the statement after Israel's seven-hour "humanitarian window" for parts of Gaza closed. The truce slowed violence in Gaza, although Palestinians said Israel broke it by hitting a house in Gaza City. Two attacks on Israelis were reported in Jerusalem and Israel said militant rocket fire from Gaza had continued. Health officials in Gaza say 1,868 Palestinians, most of them civilians, have been killed and some 9,470 injured since the conflict began nearly four weeks ago. Sixty-seven Israelis have died, all but three of them soldiers. A Thai national working in Israel was also killed. Separately, Egypt's state media said that Palestinian delegates meeting in Cairo had agreed to a 72-hour humanitarian ceasefire. Media playback is unsupported on your device Media caption James Reynolds reports from Jerusalem after Israeli police shoot a man who allegedly attacked a bus with a stolen digger The Mena news agency, citing unnamed Palestinian officials, said the truce would be announced later on Monday and would begin at 08:00 (05:00 GMT) on Tuesday. However, Israel has taken no part in the meetings. 'Quiet and security' Following a security meeting at the Southern military HQ, Mr Netanyahu vowed to pursue Operation Protective Edge. He said: "What is left to be done before it ends is for the Israel Defense Forces to deal with the Gaza attack tunnels." Image copyright AP Image caption Palestinians accused Israel of breaking its own truce with an attack on Gaza City Image copyright AFP Image caption An attack on a bus in Jerusalem left an Israeli and a Palestinian dead Israel says militants use the tunnels to infiltrate its territory. Mr Netanyahu added: "But the operation also won't end until quiet and security are returned to the citizens of Israel for an extended period of time." Mr Netanyahu said Israel had "no intention to harm civilians in Gaza" and accused Hamas, which controls Gaza, of preventing humanitarian aid from reaching them. Israeli military spokesman Lt Col Peter Lerner said aerial operations "against terrorist infrastructures in Gaza" were being resumed. He told the BBC: "The mission is not time bound, but rather goal bound." Image copyright AFP Image caption Israel's military at the Gaza border - Israel says the campaign goes on Image copyright AP Image caption Rafah, in the south, was not included in the truce Hamas said Israel was using the truce, which ran from 10:00 local time (07:00 GMT) to 17:00 (14:00 GMT), to "divert the attention from Israeli massacres". There have been several truces called during the conflict but none has lasted, with each side accusing the other of violations. Palestinian health officials said Israel carried out an air strike on a refugee camp inside Gaza City just minutes after the latest ceasefire began. Reports suggest the strike killed a young girl and injured at least 15 others, many of them women and children. Media playback is unsupported on your device Media caption The BBC's Mark Lowen asks what Egyptians make of the crisis in Gaza One local, Ayman Mahmud, told AFP: "There is no truce. How could there be a truce? They are liars. They don't even respect their own commitments." Israel's ceasefire otherwise generally slowed the violence, although it insisted it would not apply to Rafah in the south, where there was still an Israeli military presence. Palestinians in many areas of Gaza headed out to markets and there were long queues for money. Israel said a number of rockets had been fired from Gaza during the window. Meanwhile in Jerusalem, a construction vehicle driven by a man, identified by police as a Palestinian from east Jerusalem, overturned a bus in an ultra-Orthodox neighbourhood. A passer-by was killed and several people were injured before police shot dead the driver. Later one person - reportedly a soldier - was seriously injured in a suspected drive-by shooting in Jerusalem's Mt Scopus area. In other developments on Monday: French President Francois Hollande called for an end to the "massacres in Gaza" and Foreign Minister Laurent Fabius said Israel's right to security did not justify the "killing of children and slaughter of civilians" The UK Foreign Office is investigating reports that a British national has been killed in Gaza Are you in Gaza or Israel? Have you been affected by the violence? You can send us your views and experiences by emailing [email protected] using the subject line "Gaza Israel". If you are willing to feature in an interview, please ensure you include your contact details and a photograph of yourself to help us illustrate the story.
Beliefs about sexual behavior and other predictors of Papanicolaou smear screening among Latinas and Anglo women. BACKGROUND Latinas use cervical cancer prevention services less often than Anglo women. OBJECTIVE To assess whether beliefs about cervical cancer influence the use of Papanicolaou (Pap) smears among Latinas and Anglo women in Orange County, California. METHODS We conducted a telephone survey using the computer-assisted telephone interview system, randomdigit dialing, and an instrument adapted from national surveys and a previous ethnographic study. RESULTS Participants included 1225 noninstitutionalized Spanish- or English-speaking respondents 18 years or older-803 Latinas (533 immigrants and 270 US born) and 422 Anglo women. Latina immigrants were more likely than US-born Latinas or Anglo women to believe that a variety of behaviors were risk factors for this disease. These behaviors included medically accepted risk factors such as early initiation of sexual intercourse (53% vs 41% vs 39%; P < .01) as well as unaccepted factors such as having sex during menstruation (56% vs 10% vs 3%; P < .01). Logistic regression analysis revealed that Latinas who held such beliefs were significantly less likely than others to report receiving a Pap smear within the past 3 years. Other independent predictors of Pap smear use included health insurance status, martial status, and acculturation. CONCLUSIONS Latinas have culturally based beliefs about cervical cancer that reflect the moral framework within which they interpret diseases and that may influence their use of Pap smears. These beliefs are most prevalent among Latina immigrants. Because the known risk factors for cervical cancer are primarily related to sexual activities and because such activities are private and sensitive for many Latinas, physicians should be cautious when counseling these patients about the cause of this disease. Indeed, stressing the sexual transmission of cervical cancer could even discourage Latina immigrants from obtaining appropriate Pap smear screening.
/* This file is part of t8code. t8code is a C library to manage a collection (a forest) of multiple connected adaptive space-trees of general element types in parallel. Copyright (C) 2015 the developers t8code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. t8code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with t8code; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <t8_forest/t8_forest_iterate.h> #include <sc_options.h> #include <sc_refcount.h> #include <t8_eclass.h> #include <t8_element_cxx.hxx> #include <t8_schemes/t8_default/t8_default_cxx.hxx> #include <t8_forest/t8_forest_general.h> #include <t8_forest/t8_forest_io.h> #include <t8_forest/t8_forest_geometrical.h> #include <t8_cmesh.h> #include <t8_cmesh_readmshfile.h> #include <t8_cmesh_vtk_writer.h> #include <t8_cmesh/t8_cmesh_examples.h> #include <t8_data/t8_containers.h> typedef struct { double coords[3]; t8_locidx_t count; } t8_test_fiterate_udata_t; static int t8_test_fiterate_callback (t8_forest_t forest, t8_locidx_t ltreeid, const t8_element_t *element, int face, void *user_data, t8_locidx_t leaf_index) { double *coords; if (leaf_index >= 0) { coords = ((t8_test_fiterate_udata_t *) user_data)->coords; t8_forest_element_coordinate (forest, ltreeid, element, 0, coords); t8_debugf ("Leaf element in tree %i at face %i, tree local index %i has corner 0 coords %lf %lf %lf\n", ltreeid, face, (int) leaf_index, coords[0], coords[1], coords[2]); ((t8_test_fiterate_udata_t *) user_data)->count++; } return 1; } /* Only refine the first tree on a process. */ static int t8_basic_adapt (t8_forest_t forest, t8_forest_t forest_from, t8_locidx_t which_tree, t8_locidx_t lelement_id, t8_eclass_scheme_c *ts, const int is_family, const int num_elements, t8_element_t *elements[]) { int mpirank, mpiret; T8_ASSERT (!is_family || num_elements == ts->t8_element_num_children (elements[0])); mpiret = sc_MPI_Comm_rank (sc_MPI_COMM_WORLD, &mpirank); SC_CHECK_MPI (mpiret); if (which_tree == 0 && mpirank == 0 && ts->t8_element_level (elements[0]) < 2) { return 1; } return 0; } static void t8_test_fiterate (t8_forest_t forest) { t8_locidx_t itree, num_trees; t8_eclass_t eclass; t8_eclass_scheme_c *ts; t8_element_t *first_el, *last_el, *nca; t8_element_array_t *leaf_elements; t8_test_fiterate_udata_t udata; int iface; num_trees = t8_forest_get_num_local_trees (forest); for (itree = 0; itree < num_trees; itree++) { eclass = t8_forest_get_tree_class (forest, itree); ts = t8_forest_get_eclass_scheme (forest, eclass); first_el = t8_forest_get_element_in_tree (forest, itree, 0); last_el = t8_forest_get_element_in_tree (forest, itree, t8_forest_get_tree_num_elements (forest, itree) - 1); ts->t8_element_new (1, &nca); ts->t8_element_nca (first_el, last_el, nca); leaf_elements = t8_forest_tree_get_leafs (forest, itree); for (iface = 0; iface < ts->t8_element_num_faces (nca); iface++) { udata.count = 0; t8_forest_iterate_faces (forest, itree, nca, iface, leaf_elements, &udata, 0, t8_test_fiterate_callback); t8_debugf ("Leaf elements at face %i:\t%i\n", iface, udata.count); } ts->t8_element_destroy (1, &nca); } } static void t8_test_fiterate_refine_and_partition (t8_cmesh_t cmesh, int level, sc_MPI_Comm comm, int partition_cmesh, int no_vtk) { t8_forest_t forest, forest_adapt, forest_partition; t8_cmesh_t cmesh_partition; if (!no_vtk) { t8_cmesh_vtk_write_file (cmesh, "test_fiterate_cmesh0", 1.0); } if (partition_cmesh) { /* partition the initial cmesh according to a uniform forest */ t8_cmesh_init (&cmesh_partition); t8_cmesh_set_derive (cmesh_partition, cmesh); t8_cmesh_set_partition_uniform (cmesh_partition, level, t8_scheme_new_default_cxx ()); t8_cmesh_commit (cmesh_partition, comm); } else { /* do not partition the initial cmesh */ cmesh_partition = cmesh; } if (!no_vtk) { t8_cmesh_vtk_write_file (cmesh_partition, "test_fiterate_cmesh1", 1.0); } forest = t8_forest_new_uniform (cmesh_partition, t8_scheme_new_default_cxx (), level, 0, comm); t8_test_fiterate (forest); t8_forest_init (&forest_adapt); t8_forest_set_adapt (forest_adapt, forest, t8_basic_adapt, 1); t8_forest_commit (forest_adapt); if (!no_vtk) { t8_forest_write_vtk (forest_adapt, "test_fiterate"); } t8_global_productionf ("Output vtk to test_fiterate.pvtu\n"); /* partition the adapted forest */ t8_forest_init (&forest_partition); t8_forest_set_partition (forest_partition, forest_adapt, 0); t8_forest_commit (forest_partition); t8_debugf ("Created ghost structure with %li ghost elements.\n", (long) t8_forest_get_num_ghosts (forest_partition)); if (!no_vtk) { t8_forest_write_vtk (forest_partition, "test_fiterate_partition"); } t8_global_productionf ("Output vtk to test_fiterate_partition.pvtu\n"); t8_test_fiterate (forest_partition); t8_forest_unref (&forest_partition); } /* Build a forest on a 2d or 3d brick connectivity, * refine and partition it and for each of these stages construct * the ghost layer. */ static void t8_test_fiterate_brick (int dim, int x, int y, int z, int periodic_x, int periodic_y, int periodic_z, int level, sc_MPI_Comm comm, int no_vtk) { t8_cmesh_t cmesh; p4est_connectivity_t *conn4; p8est_connectivity_t *conn8; if (dim == 2) { conn4 = p4est_connectivity_new_brick (x, y, periodic_x, periodic_y); cmesh = t8_cmesh_new_from_p4est (conn4, comm, 0); p4est_connectivity_destroy (conn4); } else { T8_ASSERT (dim == 3); conn8 = p8est_connectivity_new_brick (x, y, z, periodic_x, periodic_y, periodic_z); cmesh = t8_cmesh_new_from_p8est (conn8, comm, 0); p8est_connectivity_destroy (conn8); } t8_test_fiterate_refine_and_partition (cmesh, level, comm, 1, no_vtk); } /* Build a forest on a hypercube mesh * and refine the first tree of a process once. * Create ghost layer and print it. * partition the forest, create ghost layer and print it. */ static void t8_test_fiterate_hypercube (t8_eclass_t eclass, int level, sc_MPI_Comm comm, int no_vtk) { t8_cmesh_t cmesh; cmesh = t8_cmesh_new_hypercube (eclass, comm, 0, 0, 0); t8_test_fiterate_refine_and_partition (cmesh, level, comm, 1, no_vtk); } /* Build a forest on a cmesh read from a .msh file. * and refine the first tree of a process once. * Create ghost layer and print it. * partition the forest, create ghost layer and print it. */ static void t8_test_fiterate_msh_file (const char *fileprefix, int level, int dim, sc_MPI_Comm comm, int no_vtk) { t8_cmesh_t cmesh; cmesh = t8_cmesh_from_msh_file (fileprefix, 0, comm, dim, 0, 0); t8_test_fiterate_refine_and_partition (cmesh, level, comm, 1, no_vtk); } int main (int argc, char **argv) { int mpiret, parsed, eclass_int, level, helpme; int x_dim, y_dim, z_dim, periodic; int dim, no_vtk; sc_options_t *opt; const char *prefix; char usage[BUFSIZ]; char help[BUFSIZ]; int sreturnA, sreturnB; sreturnA = snprintf (usage, BUFSIZ, "Usage:\t%s <OPTIONS>", basename (argv[0])); sreturnB = snprintf (help, BUFSIZ, "help string\n%s\n", usage); if (sreturnA > BUFSIZ || sreturnB > BUFSIZ) { /* The usage string or help message was truncated */ /* Note: gcc >= 7.1 prints a warning if we * do not check the return value of snprintf. */ t8_debugf ("Warning: Truncated usage string and help message to '%s' and '%s'\n", usage, help); } mpiret = sc_MPI_Init (&argc, &argv); SC_CHECK_MPI (mpiret); sc_init (sc_MPI_COMM_WORLD, 1, 1, NULL, SC_LP_ESSENTIAL); t8_init (SC_LP_DEFAULT); opt = sc_options_new (argv[0]); sc_options_add_int (opt, 'l', "level", &level, 0, "The refinement level of the mesh."); sc_options_add_switch (opt, 'o', "no-vtk", &no_vtk, "disable vtk output"); sc_options_add_string (opt, 'f', "prefix", &prefix, "", "Prefix of a" " .msh file."); sc_options_add_int (opt, 'd', "dim", &dim, 2, "If a .msh file " "is read, the dimension must be specified."); sc_options_add_int (opt, 'x', "x-dim", &x_dim, 0, "Number of brick mesh cells in x direction."); sc_options_add_int (opt, 'y', "y-dim", &y_dim, 0, "Number of brick mesh cells in y direction."); sc_options_add_int (opt, 'z', "z-dim", &z_dim, 0, "Number of brick mesh cells in z direction." " If specified, then the mesh is automatically 3d."); sc_options_add_int (opt, 'p', "periodic", &periodic, 0, "Periodicity of brick mesh. A three (two) digit decimal" " number zyx. If digit i is nonzero then the representative" " coordinate direction of the brick mesh is periodic."); sc_options_add_int (opt, 'e', "elements", &eclass_int, 2, "If neither -f nor -x,-y,-z are used a cubical mesh is" " generated. This option specifies" " the type of elements to use.\n" "\t\t0 - vertex\n\t\t1 - line\n\t\t2 - quad\n" "\t\t3 - triangle\n\t\t4 - hexahedron\n" "\t\t5 - tetrahedron\n\t\t6 - prism\n\t\t7 - pyramid"); sc_options_add_switch (opt, 'h', "help", &helpme, "Display a short help message."); /* parse command line options */ parsed = sc_options_parse (t8_get_package_id (), SC_LP_DEFAULT, opt, argc, argv); /* check for wrong usage of arguments */ if (parsed < 0 || parsed != argc || x_dim < 0 || y_dim < 0 || z_dim < 0 || dim < 2 || dim > 3 || eclass_int < T8_ECLASS_VERTEX || eclass_int >= T8_ECLASS_COUNT) { sc_options_print_usage (t8_get_package_id (), SC_LP_ERROR, opt, NULL); return 1; } if (helpme) { t8_global_productionf ("%s\n", help); sc_options_print_usage (t8_get_package_id (), SC_LP_ERROR, opt, NULL); } else { if (x_dim == 0 && !strcmp (prefix, "")) { t8_global_productionf ("Testing ghost on a hypercube cmesh with %s " "elements\n", t8_eclass_to_string[eclass_int]); t8_test_fiterate_hypercube ((t8_eclass_t) eclass_int, level, sc_MPI_COMM_WORLD, no_vtk); } else if (x_dim > 0) { int x_per, y_per, z_per; if (y_dim <= 0 || z_dim < 0) { t8_global_productionf ("\tERROR: Wrong usage\n"); return 1; } dim = z_dim != 0 ? 3 : 2; x_per = periodic % 10; y_per = periodic / 10 % 10; z_per = periodic / 100 % 10; t8_global_productionf ("Testing ghost on a %i x %i x %i brick " "mesh in %iD\n", x_dim, y_dim, z_dim, dim); t8_test_fiterate_brick (dim, x_dim, y_dim, z_dim, x_per, y_per, z_per, level, sc_MPI_COMM_WORLD, no_vtk); } else { /* A triangle or tetgen file collection must be given. */ T8_ASSERT (strcmp (prefix, "")); T8_ASSERT (dim == 2 || dim == 3); t8_global_productionf ("Testing ghost on cmesh read from %s.msh\n", prefix); t8_test_fiterate_msh_file (prefix, level, dim, sc_MPI_COMM_WORLD, no_vtk); } } sc_options_destroy (opt); sc_finalize (); mpiret = sc_MPI_Finalize (); SC_CHECK_MPI (mpiret); return 0; }
/** * Extract database type from url string. * * @param url the url * @return the string */ public static String extractDatabaseTypeFromUrl(String url) { if (url == null) { return ""; } else { url = url.toLowerCase(); if (url.contains(":mysql")) { return "MySql"; } else if (url.contains(":oracle")) { return "Oracle"; } else if (url.contains(":postgresql")) { return "PostgreSQL"; } else if (url.contains(":sqlserver")) { return "SqlServer"; } else { return url.contains(":sqlite") ? "Sqlite" : ""; } } }
<reponame>feiyyx/blog const num2mon = { '01': 'Jan', '02': 'Feb', '03': 'Mar', '04': 'Apr', '05': 'May', '06': 'Jun', '07': 'Jul', '08': 'Aug', '09': 'Sep', '10': 'Oct', '11': 'Nov', '12': 'Dec', }; const mon2num = { 'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12', } export function encodeDate(date:string | undefined = ''):string { let res = date; try { const temp = date.split('-'); res = `${temp[0]} ${num2mon[temp[1]]} ${temp[2]}` } catch(err) { throw Error(err+''); } return res; } export function decodeDate(date:string | undefined = ''):string { let res = date || ''; try { const temp = date.split(' '); res = `${temp[0]}-${mon2num[temp[1]]}-${temp[2]}` } catch(err) { throw Error(err+''); } return res; } export default encodeDate;
“Apparently, it’s not enough that UND’s administration is attacking the quality of education by cutting programs and experienced faculty and jacking class sizes,” University of North Dakota Assistant Professor Heidi Czerwiec opines in a letter to grandforksherald.com. “Now, we must also feel under physical attack as well.” Wait. What? “I look up from my office computer to see two figures in camo with guns outside my window. My first thought is for my students’ and my safety: I grab my phone, crawl under my desk and call 911 . . . Seriously? Ms. Czerwiec crawled under her desk? Well, she is an English professor. Maybe she’s using hyperbole to create a little dramatic tension. If so, there are no rhetorical holds barred. I can barely talk—first, with fear, and then with rage when the dispatcher reports back that yes, in fact, I’ve probably just seen ROTC cadets, though they’re going to send an officer to check because no one has cleared it with them. They thank me for reporting it. A few minutes later, a university officer calls me back—not to reassure me, but to scold me for calling 911. He says ROTC has permission to do this exercise. When I tell him that this was news to 911 and that they encouraged me to call whenever I see a gun on campus, he seems surprised. He also tells me that ROTC will be doing these exercises for the next couple weeks. So I reply that I guess I’ll be calling 911 for the next couple weeks—and I will. Every time. It’s not my job to decide whether people carrying guns at school are an actual threat. It’s my job to teach and to get home to my family. Just so we’re clear: North Dakota punishes false reporting of a crime with up to a year in prison and/or a $2000 fine. More than that, I respectfully suggest that Ms. Czerwiec consider the fact that those ROTC cadets are practicing with replica (i.e. non-firing) rifles. Equally, America’s armed forces protect the liberties which she enjoys at the University of North Dakota. A memo she clearly didn’t get. It’s already highly inappropriate to conduct unnecessary military maneuvers in the middle of the quad. But with school shootings on the increase and tensions at UND running high, it’s especially irresponsible. We’re already under financial and emotional attack. We don’t need to feel under physical attack, too. To quote Morris Albert, feelings. Nothing more than feelings. Nothing less, either.
#include <bits/stdc++.h> using namespace std; #define PB push_back #define MP make_pair #define F first #define S second #define MAXI 1000001 typedef long long ll; typedef vector<int> vi; typedef pair<int,int>pi; const ll inf = 1e18; const int N = 1000000000+7; #define mid ((l+r)>>1) int main() { ios_base::sync_with_stdio(0); cin.tie(NULL); long double a1,a2,b1,b2,c1,c2,d1,d2,t,k; cin>>a1>>a2>>b1>>b2>>c1>>c2; d1=(a1+c1)/2; d2=(a2+c2)/2; if(pow(abs(b2-a2),2)+pow(abs(a1-b1),2)==pow(abs(b2-c2),2)+pow(abs(c1-b1),2)) { //cout<<"yo"; k=pow(abs(c2-a2),2)+pow(abs(a1-c1),2); t=pow(abs(b2-d2),2)+pow(abs(d1-b1),2); if((sqrt(t*k))/2) { cout<<"Yes"; return 0; } } cout<<"No"; return 0; }
<reponame>Permify/vue-permify<filename>src/Permify.ts<gh_stars>1-10 // @ts-ignore import Vue from "vue"; import { PermifyService } from "@permify/permify-service-js" const LOCAL_STORAGE_KEY_USER_ID = "__permifyUserId"; export interface PluginOptions { workspaceId: string, publicToken: string } let instance: Vue; const getInstance = () => instance; const usePermify = (options: PluginOptions): Vue => { if (instance) return instance; instance = new Vue({ data() { return { userId: "", isLoading: false, } }, methods: { async isAuthorized(policyName: string, actionAlias: string, resourceId?: string, resourceType?: string): Promise<boolean> { // @ts-ignore if (typeof this.userId !== 'undefined') { // @ts-ignore return await new PermifyService().isAuthorizedPermify(options.workspaceId, options.publicToken, this.userId, actionAlias, policyName, resourceId, resourceType) } else { return false } }, setUserId(newUserId: string) { // @ts-ignore this.userId = newUserId; localStorage.setItem(LOCAL_STORAGE_KEY_USER_ID, newUserId); }, }, created() { const storedUserId = localStorage.getItem(LOCAL_STORAGE_KEY_USER_ID); if (storedUserId) { this.userId = storedUserId; } }, }); return instance; }; export const Permify = { install(app: any, options: PluginOptions) { app.prototype.$permify = usePermify(options); Vue.prototype.$permify = usePermify(options); } };
<reponame>soybase/DroneImageScripts<filename>ImageProcess/CalculatePhenotypeSurf.py<gh_stars>1-10 # USAGE # python /home/nmorales/cxgn/DroneImageScripts/ImageProcess/CalculatePhenotypeSurf.py --image_paths /folder/mypic1.png,/folder/mypic2.png --outfile_paths /export/mychoppedimages/outimage2.png,/export/mychoppedimages/outimage2.png # import the necessary packages import argparse import imutils import cv2 import numpy as np import urllib.request import math from matplotlib import pyplot as plt # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image_paths", required=True, help="image path") ap.add_argument("-o", "--outfile_paths", required=True, help="file path directory where the output will be saved") ap.add_argument("-r", "--results_outfile_path", required=True, help="file path where results will be saved") args = vars(ap.parse_args()) input_images = args["image_paths"] outfile_paths = args["outfile_paths"] results_ourfile = args["results_outfile_path"] images = input_images.split(",") outfiles = outfile_paths.split(",") count = 0 for image in images: img = cv2.imread(image) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) orb = cv2.xfeatures2d.SURF_create() kp, des = orb.detectAndCompute(img, None) print(kp) #print(des) kpsimage = cv2.drawKeypoints(img, kp, img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) #cv2.imshow('image'+str(count),kpsimage) cv2.imwrite(outfiles[count], kpsimage) count += 1 #cv2.waitKey(0)
Alignment of atmospheric mineral dust due to electric field Optical polarimetry observations on La Palma, Canary Islands, during a Saharan dust episode show dichroic extinction indicating the presence of vertically aligned parti- cles in the atmosphere. Modelling of the extinction together with particle orientation indicates that the alignment could have been due to an electric field of the order of 2 kV/m. Two alternative mechanisms for the origin of the field are ex- amined: the effect of reduced atmospheric conductivity and charging of the dust layer, the latter effect being a more likely candidate. It is concluded that partial alignment may be a common feature of Saharan dust layers. The modelling in- dicates that the alignment can significantly alter dust optical depth. This "Venetian blind effect" may have decreased opti- cal thickness in the vertical direction by as much as 10% for the case reported here. It is also possible that the alignment and the electric field modify dust transport.
// Prefix returns the prefix of a command, if event is a command func (e *Event) Prefix() string { _, span := global.Tracer("cacophony.dev/kit").Start(e.Context(), "event.Prefix") defer span.End() if e.prefix != "" { return e.prefix } if e.GuildID == "" { e.prefix = defaultPrefix return e.prefix } prefix, err := config.GuildGetString(e.DB(), e.GuildID, guildCmdPrefixKey) if err == nil && prefix != "" { e.prefix = prefix } else { e.prefix = defaultPrefix } return e.prefix }
/** * Test samples using the provided bins. Values correspond to the bin upper * limit. It is assumed the values span most of the distribution. Additional * tests are performed using a region of the distribution sampled. * * @param values Bin upper limits * @param factory Factory to create the sampler * @param distribution The distribution under test * @param ranges Ranges of the distribution to test */ private static void testSamples(double[] values, Function<UniformRandomProvider, ContinuousSampler> factory, Supplier<AbstractRealDistribution> distribution, double[]... ranges) { final int bins = values.length; final int samples = 10000000; final long[] observed = new long[bins]; final RestorableUniformRandomProvider rng = RandomSource.XO_SHI_RO_128_PP.create(SEED); final ContinuousSampler sampler = factory.apply(rng); for (int i = 0; i < samples; i++) { final double x = sampler.sample(); final int index = findIndex(values, x); observed[index]++; } final AbstractRealDistribution dist = distribution.get(); final double[] expected = new double[bins]; double x0 = Double.NEGATIVE_INFINITY; for (int i = 0; i < bins; i++) { final double x1 = values[i]; expected[i] = dist.probability(x0, x1); x0 = x1; } final double significanceLevel = 0.001; final double lowerBound = dist.getSupportLowerBound(); final ChiSquareTest chiSquareTest = new ChiSquareTest(); final double pValue = chiSquareTest.chiSquareTest(expected, observed); Assertions.assertFalse(pValue < 0.001, () -> String.format("(%s <= x < %s) Chi-square p-value = %s", lowerBound, values[bins - 1], pValue)); for (final double[] range : ranges) { final int min = findIndex(values, range[0]); final int max = findIndex(values, range[1]); if (max - min + 1 < 2) { Assertions.fail("Invalid range: " + Arrays.toString(range)); } final long[] observed2 = Arrays.copyOfRange(observed, min, max + 1); final double[] expected2 = Arrays.copyOfRange(expected, min, max + 1); final double pValueB = chiSquareTest.chiSquareTest(expected2, observed2); Assertions.assertFalse(pValueB < significanceLevel, () -> String.format("(%s <= x < %s) Chi-square p-value = %s", min == 0 ? lowerBound : values[min - 1], values[max], pValueB)); } }
About a month ago, after some amount of umming and ahhing, I bought a Nintendo 2DS. I’d been interested in picking one up for a while, and the promise of price-drops around Black Friday had me checking every now and then. I’m not a massive gamer, but there are a handful of games on the system that looked appealing to me. The 2DS is the cheaper version of the 3DS. For one thing it, obviously, omits the 3D screen. This wasn’t a huge concern to me. It also has a simple, flat construction rather than the 3DS’ hinged, folding clamshell approach. This makes the 2DS less neat and pocketable, but still quite portable. (Trivia: the flat construction means that although the 2DS appears to have two separate screens, it’s actually one big slab under the plastic shell to save money. Clever!) I’ll talk about the games another time – I’m dozens of hours into Weeaboo Junction alone – but today I’m thinking about one of the system’s core functions: StreetPass. StreetPass 2DS and 3DS devices all have WiFi built in. This lets the user download games, play online multiplayer, and browse the web (to some degree). So far, so ordinary for a games console these days. But 2DSes and 3DSes can also talk to eachother, primarily for local multiplayer, but also for StreetPass functionality. Contrary to most device-to-device communications, StreetPass actually allows devices to talk without any input from the users. As long as a 2DS or 3DS is on standby/in sleep mode, it is constantly seeking other devices to talk to. And when it finds one, it will perform a sort of ‘handshake’ with the other device. Later, both users will see a green LED notification light on the device, and they’ll have any one of a number of things to check/play with. Obviously, this kind of exchange only happens when two devices are in close proximity, so it’s more likely in busier places, like cities and public transport hubs. Therefore, it can be hugely beneficial to take your 2DS anywhere you go, leaving it in standby mode, just on the off-chance you pass another user. This quite unique system function was one of the things that attracted me to the console. I rather like the idea of pseudo-random exchanges with strangers within the walled garden of Nintendo’s network and games. “But Paul!” you cry. “Don’t you know the 3DS was first launched six years ago?! Just how many users do you think there will be still bumbling around on the streets of London?” Fair point. My experiences with StreetPass have been… Well, pretty much as I’d expected. I’ve had a handful of exchanges so far, usually near busier transport interchanges, like Golders Green, Euston station, and other busy trains/stations I’ve passed through recently. But frequenting locations such as these is by no means a dead cert in terms of receiving StreetPasses. I’ve carried my 2DS in my bag when visiting Euston station, Oxford Street, etc, and had no ‘hits’. I tell myself that other factors may be at play – too many other devices interfering with WiFi reception, or the limited range of each device. But I really think it’s just down to a low install base. Which, in a way, makes each StreetPass all the sweeter. The other way to get StreetPass hits is via a Nintendo Zone. Nintendo Zones in London Nintendo Zones in Europe These are public WiFi hotspots and there used to be loads in the UK, but now it appears to be limited to branches of GAME, the videogame retailer. Above, left, shows the remaining Nintendo Zones within the M25, and, right, the map shows the European countries still with Nintendo Zones. Source. Users who StreetPass within a Nintendo Zone get the data from, I believe, the last six users to also do the same. This makes it a good way to bulk up your StreetPass hits without actually needing to be in proximity with the other users. The clever bit is, much like an actual StreetPass, you don’t need to do anything to connect to a Nintendo Zone. It just works when you’re in range. I tested this out recently by stuffing my 2DS into my running backpack and passing my nearest branch of GAME. It worked perfectly, and gave the expected boost to my StreetPass hit count. So what do you get when you StreetPass with someone? It depends. The main port of call is a sort-of game called Mii Plaza, where other users’ Miis will be gathered. Miis are Nintendo’s weird bobble-headed avatars which you can tweak to vaguely resemble you or whomever you please. The associated metadata is limited: your birthday (DD/MM only), the latest 3DS game you’ve been playing, and a few other child-friendly tidbits. You’re quite limited in what data you can include in the free text fields. It would be a stretch, even, to hack your contact details into it. And that’s sort of the point. Nintendo intentionally made this system pseudo-anonymous. They have a two-way handshake friend code system for real-life buddies, but StreetPass is just kind of vague. Miis, as avatars, are kind of ageless. It’s hard to tell if you’ve StreetPassed with a ten-year-old or a fifty-year-old. Anyway, in Mii Plaza there are a handful of cute mini-games, and you also see a frankly horrifying army of all the Miis you’ve ‘met’ recently, lined up and grinning. But Mii Plaza is pre-installed and is just one bit of 3DS software that makes use of StreetPass. Other games for the system such as Mario Kart 7 and Animal Crossing: New Leaf can trade data with other users via StreetPass, including fastest lap times in the former and sharing home interior designs with the latter. There are a ton of other examples. There’s even a music player built into the 2DS/3DS, and if you add some tracks to your favourites, StreetPass will work a bit of Last.FM-style magic and notify you if you share any favourite tracks with other users. Even with my limited experiences with StreetPass, I’ve had a number of quite charming ‘interactions’ (these are only ever after the fact, never real-time) with other users, particularly seeing their Animal Crossing homes. The age of the system and the lack of remaining Nintendo Zones means the opportunities to StreetPass are getting fewer and fewer. I wish there were still more Zones available – I think they also used to use the ubiquitous ‘The Cloud’ WiFi hotspots, which would’ve really boosted your chances. But still, it’s a fun little bonus feature of the system which adds some charm and some unexpected surprises now and then. The fact that StreetPass isn’t coming to the Nintendo Switch means I guess its days are numbered. Hopefully the 2DS/3DS system will live on alongside the Switch, as the new system isn’t truly portable – or certainly not pocketable – and I believe Nintendo has indicated that the two platforms will co-exist for the time being. Even if StreetPass ceased to exist tomorrow, the vast library of games for the 3DS, not to mention DS and Virtual Console titles, means I will have plenty to keep me going for a while yet. Postscript Perhaps unsurprisingly for a closed system run by someone like Nintendo, the technology behind StreetPass is proving hard to research. Online, in the marketing bumf, you’ll find real-world scenarios and in-game features, rather than frequencies, power consumption transmission ranges, amount and format of data transmitted or time needed for a successful StreetPass. Some of these factors are applicable to the WiFi standard generally. But other factors remain a bit of a mystery. And perhaps that’s best. Certainly it makes sense to keep it simple for the purposes of a mainstream, predominantly kid-friendly console. The more information a company releases on functions like this, the more likely it is to be hacked or gamed. There are, or were, ways to create your own bootleg Nintendo Zones with a hacked router, which intrigues me greatly. But I also like that it all just works for the average user. Anyway, the whole concept has got me thinking about near field communications (NFC), local WiFi and Bluetooth handshakes and beacons, mesh networks, and other possibilities with similar technologies. I had some reckons on StreetPass in general, and I have further reckons on the wider concept of device-to-device communications and other potential applications. Watch this space.
/** * Initialize a {@link Builder} to export the data from input data stream with {@link Row}s into iceberg table. We use * {@link RowData} inside the sink connector, so users need to provide a {@link TableSchema} for builder to convert * those {@link Row}s to a {@link RowData} DataStream. * * @param input the source input data stream with {@link Row}s. * @param tableSchema defines the {@link TypeInformation} for input data. * @return {@link Builder} to connect the iceberg table. */ public static Builder forRow(DataStream<Row> input, TableSchema tableSchema) { RowType rowType = (RowType) tableSchema.toRowDataType().getLogicalType(); DataType[] fieldDataTypes = tableSchema.getFieldDataTypes(); DataFormatConverters.RowConverter rowConverter = new DataFormatConverters.RowConverter(fieldDataTypes); return builderFor(input, rowConverter::toInternal, RowDataTypeInfo.of(rowType)) .tableSchema(tableSchema); }
// Ensures that Messages returns the messages set on the context and the error message // when an error is set. func TestMessagesWithError(t *testing.T) { assert := assert.New(t) req, _ := http.NewRequest("GET", "http://example.com/foo", nil) writer := httptest.NewRecorder() ctx := NewContext(nil, req, writer) message := "foo" errMessage := "blah" err := fmt.Errorf(errMessage) ctx = ctx.setError(err) if assert.Equal(1, len(ctx.Messages())) { assert.Equal(errMessage, ctx.Messages()[0]) } ctx.AddMessage(message) if assert.Equal(2, len(ctx.Messages())) { assert.Equal(message, ctx.Messages()[0]) assert.Equal(errMessage, ctx.Messages()[1]) } }
export type Grouper<TInp> = (t: TInp) => string | undefined; export type Aggregator<TStat> = (numbers: number[]) => TStat; export function groupBy<T>( objs: readonly T[], grouper: Grouper<T>, ): Record<string, T[]> { const grouped: Record<string, T[]> = {}; for (let i = 0; i < objs.length; i++) { const obj = objs[i]; const val = grouper(obj); if (val !== undefined) { (grouped[val] || (grouped[val] = [])).push(obj); } } return grouped; } export function mapObject<TS, TD>( obj: Record<string, TS>, func: (val: TS) => TD, ): Record<string, TD> { const out: Record<string, TD> = {}; for (let key in obj) { if (obj.hasOwnProperty(key)) { out[key] = func(obj[key]); } } return out; } function groupNumbers<T extends object>( objs: readonly T[], ): Record<string, number[]> { const vals: Record<string, number[]> = {}; for (let i = 0; i < objs.length; i++) { const obj = objs[i]; for (let prop in obj) { const val = obj[prop]; if (typeof val === "number") { (vals[prop] || (vals[prop] = [])).push(val); } } } return vals; } export function aggregate<TInp extends object, TStat>( data: readonly TInp[], grouper: Grouper<TInp>, aggregator: Aggregator<TStat>, ): Record<string, Record<string, TStat>> { return mapObject(groupBy(data, grouper), (objs) => mapObject(groupNumbers(objs), aggregator), ); } export function multiAggregate<TInp extends object, TStat>( data: readonly TInp[], groupers: Record<string, Grouper<TInp>>, aggregator: Aggregator<TStat>, ): Record<string, Record<string, Record<string, TStat>>> { return mapObject(groupers, (grouper) => aggregate(data, grouper, aggregator)); }
<reponame>flaranda/fn-screener-backend import * as inversify from 'inversify'; import mongoose from 'mongoose'; import { ITransformer } from '../../../common/interfaces/ITransformer'; import { EntityMongo } from '../../../common/models/mongo/EntityMongo'; import { EntityMongoModelName } from '../../../common/models/mongo/EntityMongoModelName'; import { EntityMongoFindOneRepository } from '../../../common/modules/mongo/EntityMongoFindOneRepository'; import { MongoDatasource } from '../../../mongo/datasources/MongoDatasource'; import { mongoInjectionTypes } from '../../../mongo/inversify/mongoInjectionTypes'; import { userInjectionTypes } from '../../inversify/userInjectionTypes'; import { User } from '../../models/domain/User'; import { UserFindQuery } from '../../models/domain/UserFindQuery'; import { UserMongo } from '../../models/mongo/UserMongo'; import { UserMongoDocument } from '../../models/mongo/UserMongoDocument'; @inversify.injectable() export class UserMongoFindOneRepository extends EntityMongoFindOneRepository< UserFindQuery, User, UserMongo, UserMongoDocument > { constructor( @inversify.inject(mongoInjectionTypes.MongoDatasource) mongoDatasource: MongoDatasource, @inversify.inject(userInjectionTypes.UserMongoDocumentToUserTransformer) userMongoDocumentToUserTransformer: ITransformer<UserMongoDocument, User>, ) { super( mongoDatasource, userMongoDocumentToUserTransformer, EntityMongoModelName.User, ); } protected hydrateBaseMongooseFilterQueryFromEntityFindQuery( baseMongooseFilterQuery: mongoose.FilterQuery<UserMongo>, _userFindQuery: UserFindQuery, ): mongoose.FilterQuery<EntityMongo> { return baseMongooseFilterQuery; } }
<reponame>rasidin/LimitEngineV2<gh_stars>0 /*********************************************************** LIMITEngine Header File Copyright (C), LIMITGAME, 2012 ----------------------------------------------------------- @file LE_Camera2D.h @brief Camera Class in 2D @author minseob (https://github.com/rasidin) ----------------------------------------------------------- History: - 2013/05/19 Created by minseob ***********************************************************/ #ifndef _LE_CAMERA2D_H_ #define _LE_CAMERA2D_H_ #include "LE_Camera.h" namespace LimitEngine { class Camera2D : public Camera { public: Camera2D(); virtual ~Camera2D(); void Update(); void SetScreenSize(int width, int height); void SetScale(float scale) { mScale = scale; } private: float mScale; }; } #endif
def forward(self, features, labels=None): bert_inputs = features["bert_input"]["feature"] token_type_ids = features["token_type"]["feature"] tagged_sub_token_idxs = features["tagged_sub_token_idxs"]["feature"] num_tokens = features["num_tokens"]["feature"] attention_mask = (bert_inputs > 0).long() outputs = self.model( bert_inputs, token_type_ids=token_type_ids, attention_mask=attention_mask ) logits = outputs[0] gather_token_pos_idxs = tagged_sub_token_idxs.unsqueeze(-1).repeat(1, 1, self.num_tags) token_tag_logits = logits.gather(1, gather_token_pos_idxs) sliced_token_tag_logits = [token_tag_logits[idx, :n, :] for idx, n in enumerate(num_tokens)] output_dict = {"tag_logits": sliced_token_tag_logits} if labels: tag_idxs = labels["tag_idxs"] data_idx = labels["data_idx"] output_dict["tag_idxs"] = tag_idxs output_dict["data_idx"] = data_idx loss = self.criterion(token_tag_logits.view(-1, self.num_tags), tag_idxs.view(-1)) output_dict["loss"] = loss.unsqueeze(0) return output_dict
<reponame>masud-technope/ACER-Replication-Package-ASE2017 /******************************************************************************* * Copyright (c) 2007, 2011 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.jdt.internal.corext.refactoring.nls; import org.eclipse.core.runtime.CoreException; import org.eclipse.core.runtime.IPath; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.core.runtime.NullProgressMonitor; import org.eclipse.core.runtime.OperationCanceledException; import org.eclipse.core.runtime.SubProgressMonitor; import org.eclipse.core.resources.IFile; import org.eclipse.core.resources.IStorage; import org.eclipse.core.resources.mapping.IResourceChangeDescriptionFactory; import org.eclipse.core.filebuffers.FileBuffers; import org.eclipse.core.filebuffers.ITextFileBufferManager; import org.eclipse.core.filebuffers.LocationKind; import org.eclipse.text.edits.ReplaceEdit; import org.eclipse.text.edits.TextEditGroup; import org.eclipse.jface.text.IDocument; import org.eclipse.ltk.core.refactoring.Change; import org.eclipse.ltk.core.refactoring.RefactoringStatus; import org.eclipse.ltk.core.refactoring.TextEditChangeGroup; import org.eclipse.ltk.core.refactoring.TextFileChange; import org.eclipse.ltk.core.refactoring.participants.CheckConditionsContext; import org.eclipse.ltk.core.refactoring.participants.RefactoringArguments; import org.eclipse.ltk.core.refactoring.participants.RefactoringProcessor; import org.eclipse.ltk.core.refactoring.participants.RenameArguments; import org.eclipse.ltk.core.refactoring.participants.RenameParticipant; import org.eclipse.ltk.core.refactoring.participants.ResourceChangeChecker; import org.eclipse.jdt.core.ICompilationUnit; import org.eclipse.jdt.core.IField; import org.eclipse.jdt.core.IImportDeclaration; import org.eclipse.jdt.core.IInitializer; import org.eclipse.jdt.core.IType; import org.eclipse.jdt.core.JavaModelException; import org.eclipse.jdt.core.dom.Modifier; import org.eclipse.jdt.internal.corext.util.Messages; import org.eclipse.jdt.internal.ui.JavaPlugin; /** * @since 3.4 */ public class NLSAccessorFieldRenameParticipant extends RenameParticipant { private IField fField; private String fNewName; private TextFileChange fChange; public NLSAccessorFieldRenameParticipant() { } @Override public String getName() { return NLSMessages.NLSAccessorFieldRenameParticipant_participantName; } @Override public RefactoringStatus checkConditions(IProgressMonitor pm, CheckConditionsContext context) throws OperationCanceledException { if (pm == null) pm = new NullProgressMonitor(); //$NON-NLS-1$ pm.beginTask("", 100); try { ICompilationUnit unit = fField.getCompilationUnit(); if (unit == null) return null; IType[] types = unit.getTypes(); if (types.length > 1) return null; if (!isPotentialNLSAccessor(unit)) return null; IStorage resourceBundle = NLSHintHelper.getResourceBundle(unit); if (!(resourceBundle instanceof IFile)) return null; pm.worked(50); IPath propertyFilePath = resourceBundle.getFullPath(); ITextFileBufferManager manager = FileBuffers.getTextFileBufferManager(); try { manager.connect(propertyFilePath, LocationKind.IFILE, new SubProgressMonitor(pm, 25)); IDocument document = manager.getTextFileBuffer(propertyFilePath, LocationKind.IFILE).getDocument(); PropertyFileDocumentModel model = new PropertyFileDocumentModel(document); KeyValuePair oldPair = model.getKeyValuePair(fField.getElementName()); if (oldPair == null) return null; String value = oldPair.getValue(); KeyValuePair newPair = new KeyValuePair(fNewName, value); ReplaceEdit edit = model.replace(oldPair, newPair); if (edit == null) return null; fChange = new //$NON-NLS-1$ TextFileChange(//$NON-NLS-1$ "", //$NON-NLS-1$ (IFile) resourceBundle); fChange.setEdit(edit); String changeDescription = Messages.format(NLSMessages.NLSAccessorFieldRenameParticipant_changeDescription, new Object[] { fField.getElementName(), fNewName }); fChange.addTextEditChangeGroup(new TextEditChangeGroup(fChange, new TextEditGroup(changeDescription, edit))); ResourceChangeChecker checker = context.getChecker(ResourceChangeChecker.class); IResourceChangeDescriptionFactory deltaFactory = checker.getDeltaFactory(); deltaFactory.change((IFile) resourceBundle); } finally { manager.disconnect(propertyFilePath, LocationKind.IFILE, new SubProgressMonitor(pm, 25)); } } catch (JavaModelException e) { JavaPlugin.log(e); return RefactoringStatus.createErrorStatus(NLSMessages.NLSAccessorFieldRenameParticipant_error_description); } catch (CoreException e) { JavaPlugin.log(e); return RefactoringStatus.createErrorStatus(NLSMessages.NLSAccessorFieldRenameParticipant_error_description); } finally { pm.done(); } return new RefactoringStatus(); } @Override public Change createChange(IProgressMonitor pm) throws CoreException, OperationCanceledException { return fChange; } @Override protected boolean initialize(Object element) { fField = (IField) element; return true; } @Override public boolean initialize(RefactoringProcessor processor, Object element, RefactoringArguments arguments) { fNewName = ((RenameArguments) arguments).getNewName(); return super.initialize(processor, element, arguments); } /* * Be conservative, for every unit this returns true an AST will to be created! */ private static boolean isPotentialNLSAccessor(ICompilationUnit unit) throws JavaModelException { IType type = unit.getTypes()[0]; if (!type.exists()) return false; IField bundleNameField = getBundleNameField(type.getFields()); if (bundleNameField == null) return false; if (!importsOSGIUtil(unit)) return false; IInitializer[] initializers = type.getInitializers(); for (int i = 0; i < initializers.length; i++) { if (Modifier.isStatic(initializers[0].getFlags())) return true; } return false; } private static IField getBundleNameField(IField[] fields) { for (int i = 0; i < fields.length; i++) { if (//$NON-NLS-1$ "BUNDLE_NAME".equals(fields[i].getElementName())) return fields[i]; } return null; } private static boolean importsOSGIUtil(ICompilationUnit unit) throws JavaModelException { IImportDeclaration[] imports = unit.getImports(); for (int i = 0; i < imports.length; i++) { if (//$NON-NLS-1$ imports[i].getElementName().startsWith("org.eclipse.osgi.util.")) return true; } return false; } }
<filename>frontend/yang/schemas.service.ts<gh_stars>0 import { Injectable } from '@angular/core'; import { Http, Headers, Response, RequestOptions, URLSearchParams } from '@angular/http'; import { Observable } from 'rxjs/Observable'; import 'rxjs/add/operator/catch'; import 'rxjs/add/operator/map'; import { Schema } from '../inventory/schema'; @Injectable() export class SchemasService { public schemas: Schema[]; public activeSchema: string; constructor( private http: Http ) { this.loadSchemas(); this.activeSchema = localStorage.getItem('activeSchema'); if (!this.schemas) { this.schemas = null; } if (!this.activeSchema) { this.activeSchema = ""; } else if (!(this.activeSchema in this.schemas)) { if (this.schemas.length) { this.activeSchema = this.schemas[0]['name']; } else { this.activeSchema = ""; } } } storeSchemas() { if (this.schemas) { localStorage.setItem('schemas', JSON.stringify(this.schemas)); } else { localStorage.removeItem('schemas'); } } loadSchemas() { this.schemas = JSON.parse(localStorage.getItem('schemas')); } schemasKeys() { if (this.schemas) { return Object.keys(this.schemas); } } /* getSchemaKey(schema: Schema) { if (!schema) { return null; } else if ('revision' in schema) { return schema.name + '@' + schema.revision + '.yang'; } else { return schema.name + '.yang'; } } */ getActiveSchema(key: string = this.activeSchema): Schema { if (key in this.schemas) { return this.schemas[key]; } else { return null; } } changeActiveSchemaKey(key: string): Schema { if (key && (key in this.schemas)) { this.activeSchema = key; localStorage.setItem('activeSchema', this.activeSchema); } return this.schemas[key]; } getSchemas() { return this.http.get( '/netopeer/inventory/schemas' ) .map(( resp: Response ) => resp.json()).toPromise(); } show( key: string, schema: Schema) { let newSchema = true; if (key in this.schemas) { newSchema = false; schema = this.schemas[key]; } if (!('data' in schema)) { let params = new URLSearchParams(); params.set('key', key); let options = new RequestOptions({ search: params }); this.http.get('/netopeer/inventory/schema', options) .map((resp: Response) => resp.json()).toPromise().then(result => { if (result['success']) { schema['data'] = result['data']; this.storeSchemas(); } }); } if (newSchema) { this.schemas[key] = schema; this.storeSchemas(); } } close( key: string ) { let index = Object.keys( this.schemas ).indexOf( key ); if ( this.activeSchema == key ) { if ( index > 0 ) { this.changeActiveSchemaKey( Object.keys( this.schemas )[index - 1] ) } else if ( Object.keys( this.schemas ).length > 1 ) { this.changeActiveSchemaKey( Object.keys( this.schemas )[1] ) } else { this.activeSchema = null; localStorage.removeItem('activeSchema'); } } delete this.schemas[key]; this.storeSchemas(); } addSchema( schema: File ) { let headers = new Headers( { 'specific-content-type': '' } ); let options = new RequestOptions( { headers: headers } ); let input = new FormData(); input.append( "schema", schema ); return this.http.post( '/netopeer/inventory/schemas', input, options ) .map(( resp: Response ) => resp.json() ) .catch(( err: Response | any ) => Observable.throw( err ) ); } rmSchema(key: string) { let options = new RequestOptions( { body: JSON.stringify(key) } ); return this.http.delete( '/netopeer/inventory/schemas', options ) .map(( resp: Response ) => resp.json() ) .catch(( err: Response | any ) => Observable.throw( err ) ); } }
def parse_input_segment(ifile, player): magic, len = struct.unpack("4sI", ifile.read(8)) if magic != b"FINP": return None inputbuf = bytearray(ifile.read(len)) vinput = ie.InputElement.GetRootAsInputElement(inputbuf, 0) timestamp = datetime.fromtimestamp(vinput.Timestamp() / 1000000) typecode = vinput.TypeType() return dict( tick=vinput.Tick(), timestamp=str(timestamp), playercode=vinput.Playercode(), player=player.get(vinput.Playercode(), {"name": None})["name"], typeinfo=parse_type(vinput.Type(), typecode) )
def mi_sklearn( trace, var_names, disc_features=False, log2=True, n_neigh=20 ): assert len(var_names)==2, "var_names must contain exactly two elements" mi_nat = mutual_info_regression(trace.posterior[var_names[0]].values.flatten().reshape(-1,1), trace.posterior[var_names[1]].values.flatten(), discrete_features=disc_features, n_neighbors=n_neigh) result = mi_nat/np.log(2) if log2 else mi_nat return result
// Returns the inclusive left-most position adjust for clipped bases. int32_t SamRecord::get0BasedUnclippedStart() { myStatus = SamStatus::SUCCESS; if(myUnclippedStartOffset == -1) { parseCigar(); } return(myRecordPtr->myPosition - myUnclippedStartOffset); }
package com.google.android.gms.internal; import com.google.android.gms.common.internal.zzbo; import java.util.concurrent.Callable; import java.util.concurrent.FutureTask; final class zzcgj<V> extends FutureTask<V> implements Comparable<zzcgj> { private final String zzbsg; private /* synthetic */ zzcgg zzbsh; private final long zzbsi = zzcgg.zzbsf.getAndIncrement(); private final boolean zzbsj; zzcgj(zzcgg zzcgg, Runnable runnable, String str) { this.zzbsh = zzcgg; super(runnable, null); zzbo.zzu(str); this.zzbsg = str; this.zzbsj = false; if (this.zzbsi == Long.MAX_VALUE) { zzcgg.zzwF().zzyx().log("Tasks index overflow"); } } zzcgj(zzcgg zzcgg, Callable<V> callable, boolean z, String str) { this.zzbsh = zzcgg; super(callable); zzbo.zzu(str); this.zzbsg = str; this.zzbsj = z; if (this.zzbsi == Long.MAX_VALUE) { zzcgg.zzwF().zzyx().log("Tasks index overflow"); } } public final /* synthetic */ int compareTo(Object obj) { zzcgj zzcgj = (zzcgj) obj; if (this.zzbsj != zzcgj.zzbsj) { return this.zzbsj ? -1 : 1; } else { if (this.zzbsi < zzcgj.zzbsi) { return -1; } if (this.zzbsi > zzcgj.zzbsi) { return 1; } this.zzbsh.zzwF().zzyy().zzj("Two tasks share the same index. index", Long.valueOf(this.zzbsi)); return 0; } } protected final void setException(Throwable th) { this.zzbsh.zzwF().zzyx().zzj(this.zzbsg, th); if (th instanceof zzcgh) { Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), th); } super.setException(th); } }
import {injectable, inject} from 'inversify'; import {Inject} from '../common/Injectables'; import {Intent} from '../intents'; import {IntentResolution, Application} from '../../client/main'; import {FDC3Error, RaiseIntentError} from '../../client/errors'; import {Model} from '../model/Model'; import {APIToClientTopic, ReceiveIntentPayload} from '../../client/internal'; import {APIHandler} from '../APIHandler'; import {collateClientCalls, ClientCallsResult} from '../utils/helpers'; import {ResolverResult, ResolverHandlerBinding} from './ResolverHandler'; @injectable() export class IntentHandler { private readonly _model: Model; private readonly _resolver: ResolverHandlerBinding; private readonly _apiHandler: APIHandler<APIToClientTopic>; private _resolvePromise: Promise<IntentResolution> | null; constructor( @inject(Inject.MODEL) model: Model, @inject(Inject.RESOLVER) resolver: ResolverHandlerBinding, @inject(Inject.API_HANDLER) apiHandler: APIHandler<APIToClientTopic> ) { this._model = model; this._resolver = resolver; this._apiHandler = apiHandler; this._resolvePromise = null; } public async raise(intent: Intent): Promise<IntentResolution> { if (hasTarget(intent)) { return this.raiseWithTarget(intent); } else { return this.startResolve(intent, this.queueResolve.bind(this)); } } private async raiseWithTarget(intent: IntentWithTarget): Promise<IntentResolution> { const apps = await this._model.getApplicationsForIntent(intent.type, intent.context.type); const targetApp = apps.find((app) => app.name === intent.target); if (targetApp !== undefined) { // Target intent handles intent with given context, so fire return this.fireIntent(intent, targetApp); } else if (await this._model.existsAppForName(intent.target)) { // Target exists but does not handle intent with given context throw new FDC3Error( RaiseIntentError.TargetAppDoesNotHandleIntent, `App '${intent.target}' does not handle intent '${intent.type}' with context '${intent.context.type}'` ); } else { // Target does not exist throw new FDC3Error( RaiseIntentError.TargetAppNotAvailable, `Couldn't resolve intent target '${intent.target}'. No matching app in directory or currently running.` ); } } private async startResolve( intent: Intent, handleAppChoice: (intent: Intent, apps: Application[]) => Promise<IntentResolution> ): Promise<IntentResolution> { const apps: Application[] = await this._model.getApplicationsForIntent(intent.type, intent.context.type); if (apps.length === 0) { throw new FDC3Error(RaiseIntentError.NoAppsFound, 'No applications available to handle this intent'); } else if (apps.length === 1) { console.log(`App '${apps[0].name}' found to resolve intent '${intent.type}, firing intent'`); // Resolve intent immediately return this.fireIntent(intent, apps[0]); } else { console.log(`${apps.length} apps found to resolve intent '${intent.type}', delegating app choice'`); return handleAppChoice(intent, apps); } } private async queueResolve(intent: Intent, applications: Application[]): Promise<IntentResolution> { if (this._resolvePromise) { console.log(`Resolver showing, re-resolving intent '${intent.type}' when resolver closes'`); this._resolvePromise = this._resolvePromise.catch(() => {}).then(() => this.startResolve(intent, this.showResolver.bind(this))); } else { this._resolvePromise = this.showResolver(intent, applications); } const resolvePromise = this._resolvePromise.then((result) => { if (this._resolvePromise === resolvePromise) { this._resolvePromise = null; } return result; }, (error) => { if (this._resolvePromise === resolvePromise) { this._resolvePromise = null; } throw error; }); this._resolvePromise = resolvePromise; return resolvePromise; } private async showResolver(intent: Intent, applications: Application[]): Promise<IntentResolution> { // Show resolver const selection: ResolverResult | null = await this._resolver.handleIntent(intent, applications).catch((e) => { console.warn(e); return null; }); if (!selection) { throw new FDC3Error(RaiseIntentError.ResolverClosedOrCancelled, 'Resolver closed or cancelled'); } // Handle response console.log(`App ${selection.app.name} selected to resolve intent '${intent.type}', firing intent`); return this.fireIntent(intent, selection.app); } private async fireIntent(intent: Intent, appInfo: Application): Promise<IntentResolution> { const listeningWindows = await this._model.expectWindowsForApp( appInfo, (window) => window.hasIntentListener(intent.type), (window) => window.waitForReadyToReceiveIntent(intent.type) ); let data: unknown = undefined; if (listeningWindows.length > 0) { const payload: ReceiveIntentPayload = {context: intent.context, intent: intent.type}; const [result, returnData] = await collateClientCalls(listeningWindows.map((window) => { return this._apiHandler.dispatch(window.identity, APIToClientTopic.RECEIVE_INTENT, payload); })); data = returnData; if (result === ClientCallsResult.ALL_FAILURE) { throw new FDC3Error(RaiseIntentError.SendIntentError, 'Error(s) thrown by client attempting to handle intent'); } else if (result === ClientCallsResult.TIMEOUT) { throw new FDC3Error(RaiseIntentError.SendIntentTimeout, 'Timeout waiting for client to handle intent'); } } else { throw new FDC3Error(RaiseIntentError.SendIntentNoHandler, `No intent handler added for intent: ${intent.type}`); } const resolution: IntentResolution = { source: appInfo.name, version: '1.0.0', data }; // Handle next queued intent console.log(`Finished intent: ${intent.type}`, resolution); return resolution; } } interface IntentWithTarget extends Intent { // Overwrite optional `target` from Intent, making it mandatory target: string; } // Guard to help narrow down Intent into IntentWithTarget function hasTarget(intent: Intent): intent is IntentWithTarget { return !!intent.target; }
/** * @secjs/orm * * (c) <NAME> <<EMAIL>> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ import { Model } from '../Model' import { Is } from '@secjs/utils' import { DatabaseContract } from '@secjs/database' import { ModelQueryBuilder } from './ModelQueryBuilder' import { HasOneContract } from '../Contracts/HasOneContract' import { BelongsToContract } from '../Contracts/BelongsToContract' import { ManyToManyContract } from '../Contracts/ManyToManyContract' import { RelationContractTypes } from '../Types/RelationContractTypes' import { RelationContractGenerator } from './RelationContractGenerator' import { NotMappedColumnException } from '../Exceptions/NotMappedColumnException' export class ModelGenerator { private readonly DB: DatabaseContract private readonly Model: typeof Model constructor(model: typeof Model, DB: DatabaseContract) { this.DB = DB this.Model = model } async generate(flatData: any | any[]) { let model = this.flatDataToInstance(flatData) const includedRelations = this.Model.getIncludedRelations() for (const includedRelation of includedRelations) { model = await this.includeRelation(model, includedRelation) } return model } flatDataToInstance(flatData: any | any[]) { const populateInstance = (data, instance) => { const SubClassModel = instance.class const columnDictionary = SubClassModel.columnDictionary Object.keys(data).forEach(key => { if (key === '__v') return if (!columnDictionary[key]) { throw new NotMappedColumnException(key, SubClassModel.name) } instance[columnDictionary[key]] = data[key] }) return instance } if (Is.Array(flatData)) { const models = [] // @ts-ignore flatData.forEach(d => models.push(populateInstance(d, new this.Model()))) return models } // @ts-ignore return populateInstance(flatData, new this.Model()) } private async includeRelation( model: typeof Model | typeof Model[], relation: RelationContractTypes, ) { if (Is.Array(model)) { for (const d of model) { const index = model.indexOf(d) model[index] = await this[relation.relationType](d, relation as any) } return model } return this[relation.relationType](model, relation as any) } private async hasOne( model: typeof Model, relation: HasOneContract, ): Promise<typeof Model> { const RelationModel = relation.model() const primaryKey = relation.primaryKey const foreignKey = relation.foreignKey const propertyName = relation.propertyName const query = new ModelQueryBuilder(RelationModel, this.DB) // Execute client callback if it exists if (relation.callback) await relation.callback(query) model[propertyName] = await query .where({ [foreignKey]: model[primaryKey] }) .get() return model } private async hasMany( model: typeof Model, relation: HasOneContract, ): Promise<typeof Model> { const RelationModel = relation.model() const primaryKey = relation.primaryKey const foreignKey = relation.foreignKey const propertyName = relation.propertyName const query = new ModelQueryBuilder(RelationModel, this.DB) // Execute client callback if it exists if (relation.callback) await relation.callback(query) model[propertyName] = await query .where({ [foreignKey]: model[primaryKey] }) .getMany() return model } private async belongsTo( model: typeof Model, relation: BelongsToContract, ): Promise<typeof Model> { relation = new RelationContractGenerator() // @ts-ignore .setModel(model.class) .setRelationModel(relation.model) .belongsTo(relation.propertyName, relation, true) const RelationModel = relation.model() const primaryKey = relation.primaryKey const foreignKey = relation.foreignKey const propertyName = relation.propertyName const query = new ModelQueryBuilder(RelationModel, this.DB) // Execute client callback if it exists if (relation.callback) await relation.callback(query) model[propertyName] = await query .where({ [primaryKey]: model[foreignKey] }) .get() return model } private async manyToMany( model: typeof Model, relation: ManyToManyContract, ): Promise<typeof Model> { relation = new RelationContractGenerator() // @ts-ignore .setModel(model.class) .setRelationModel(relation.model) .manyToMany(relation.propertyName, relation, true) const RelationModel = relation.model() const propertyName = relation.propertyName const pivotTableName = relation.pivotTableName const relationPrimaryKey = relation.relationPrimaryKey const pivotRelationForeignKey = relation.pivotRelationForeignKey const localPrimaryKey = relation.localPrimaryKey const pivotLocalForeignKey = relation.pivotLocalForeignKey const query = new ModelQueryBuilder(RelationModel, this.DB) // Using DB here because there is no PivotModel const pivotTableData = await this.DB.buildTable(pivotTableName) .buildWhere({ [pivotLocalForeignKey]: model[localPrimaryKey], }) .findMany() // @ts-ignore model.$extras = pivotTableData const relationIds = pivotTableData.map(d => d[pivotRelationForeignKey]) // Set DB table to RelationModel again this.DB.buildTable(RelationModel.table) // Execute client callback if it exists if (relation.callback) await relation.callback(query) model[propertyName] = await query // @ts-ignore .whereIn(relationPrimaryKey, relationIds) .getMany() return model } }
/** * Adds security groups to the master node of an existing EMR Cluster. * * @param request the EMR master security group add request * * @return the added EMR master security groups * @throws Exception if there were any errors adding the security groups to the cluster master. */ protected EmrMasterSecurityGroup addSecurityGroupsToClusterMasterImpl(EmrMasterSecurityGroupAddRequest request) throws Exception { validateAddSecurityGroupsToClusterMasterRequest(request); String accountId = request.getAccountId(); AwsParamsDto awsParamsDto = emrHelper.getAwsParamsDtoByAcccountId(accountId); NamespaceEntity namespaceEntity = namespaceDaoHelper.getNamespaceEntity(request.getNamespace()); EmrClusterDefinitionEntity emrClusterDefinitionEntity = emrClusterDefinitionDaoHelper.getEmrClusterDefinitionEntity(request.getNamespace(), request.getEmrClusterDefinitionName()); List<String> groupIds = null; String clusterName = emrHelper.buildEmrClusterName(namespaceEntity.getCode(), emrClusterDefinitionEntity.getName(), request.getEmrClusterName()); try { groupIds = emrDao.addEmrMasterSecurityGroups(emrHelper.getActiveEmrClusterId(request.getEmrClusterId(), clusterName, request.getAccountId()), request.getSecurityGroupIds(), awsParamsDto); } catch (AmazonServiceException ex) { handleAmazonException(ex, "An Amazon exception occurred while adding EMR security groups: " + herdStringHelper.buildStringWithDefaultDelimiter(request.getSecurityGroupIds()) + " to cluster: " + clusterName); } return createEmrClusterMasterGroupFromRequest(namespaceEntity.getCode(), emrClusterDefinitionEntity.getName(), request.getEmrClusterName(), groupIds); }
package cn.xy.leetcode.easy.linkedList; import cn.xy.utils.ListNode; /** * @author XiangYu * @create2021-04-06-21:33 请判断一个链表是否为回文链表。 * <p> * 示例 1: * <p> * 输入: 1->2 * 输出: false * 示例 2: * <p> * 输入: 1->2->2->1 * 输出: true * 进阶: * 你能否用 O(n) 时间复杂度和 O(1) 空间复杂度解决此题? */ public class A0234PalindromeLinkedList { public boolean isPalindrome(ListNode head) { //排除只有一个元素的情况 if(head == null || head.next == null) { return true; } ListNode slow = head, fast = head; ListNode pre = null; while(fast != null && fast.next != null) { fast = fast.next.next; ListNode temp = slow.next; slow.next = pre; pre = slow; slow = temp; } //奇数情况下, if(fast != null) { slow = slow.next; } while(pre != null && slow != null) { if(pre.val != slow.val) { return false; } pre = pre.next; slow = slow.next; } return true; } }
import sys, math readline = sys.stdin.readline s = readline().split()[0] def z_Func(s): n = len(s) l,r = 0,0 z = [0 for _ in range(n)] for i in range(1,n): if i <= r: z[i] = min(r - i + 1,z[i - l]) while i + z[i] < n and s[z[i]] == s[i + z[i]]: z[i] += 1 if i + z[i] - 1 > r: l,r = i,i + z[i] - 1 return z tp = z_Func(s) j = 0 m = 0 for i in range(len(tp)): if tp[i] == len(tp) - i and tp[i] <= m: j = i break m = max(m,tp[i]) if j: print(s[:len(tp) - j]) else: print('Just a legend')
#!/usr/bin/env python from __future__ import annotations import argparse import contextlib import datetime import os import pandas as pd import select import shlex import subprocess as sp import time from pathlib import Path from typing import Optional def memory_t(value): if isinstance(value, int): return value elif value.lower().endswith("g"): return int(value[:-1]) * 1_000_000_000 elif value.lower().endswith("m"): return int(value[:-1]) * 1_000_000 elif value.lower().endswith("k"): return int(value[:-1]) * 1000 else: return int(value) def _parse_args(): parser = argparse.ArgumentParser(allow_abbrev=False) parser.add_argument("results_csv", type=Path) parser.add_argument("artifact_path", type=Path) parser.add_argument( "-pf", "--properties_filename", type=str, default="properties.csv" ) parser.add_argument( "-n", "--ntasks", type=int, default=float("inf"), help="The max number of running verification tasks.", ) parser.add_argument( "-T", "--time", default=-1, type=float, help="The max running time in seconds." ) parser.add_argument( "-M", "--memory", default=-1, type=memory_t, help="The max allowed memory in bytes.", ) return parser.parse_known_args() @contextlib.contextmanager def lock(filename: Path, *args, **kwargs): lock_filename = filename.with_suffix(".lock") try: while True: try: lock_fd = os.open(lock_filename, os.O_CREAT | os.O_WRONLY | os.O_EXCL) break except IOError as e: pass yield finally: os.close(lock_fd) os.remove(lock_filename) def wait(pool, timeout=float("inf")): start_t = time.time() while timeout < 0 or time.time() - start_t < timeout: for index, task in enumerate(pool): if task.poll() is not None: stdout_lines = task.stdout.readlines() for line in stdout_lines: print(f"{{{task.problem_id} (STDOUT)}}: {line.strip()}") task.stdout_lines.extend(stdout_lines) stderr_lines = task.stderr.readlines() for line in stderr_lines: print(f"{{{task.problem_id} (STDERR)}}: {line.strip()}") task.stderr_lines.extend(stderr_lines) return pool.pop(index) for (name, stream, lines, buffer) in [ # ("STDOUT", task.stdout, task.stdout_lines, task.stdout_buffer), ("STDERR", task.stderr, task.stderr_lines, task.stderr_buffer), ]: while True: ready, _, _ = select.select([stream], [], [], 0) if not ready: break byte = stream.read(1) if not byte: break buffer[0] += byte buffered_lines = buffer[0].split("\n") buffer[0] = buffered_lines[-1] for line in buffered_lines[:-1]: lines.append(line) print(f"{{{task.problem_id} ({name})}}: {line.strip()}") for index, task in enumerate(pool): if task.poll() is not None: stdout_lines = task.stdout.readlines() for line in stdout_lines: print(f"{{{task.problem_id} (STDOUT)}}: {line.strip()}") task.stdout_lines.extend(stdout_lines) stderr_lines = task.stderr.readlines() for line in stderr_lines: print(f"{{{task.problem_id} (STDERR)}}: {line.strip()}") task.stderr_lines.extend(stderr_lines) return pool.pop(index) raise RuntimeError("Timeout while waiting for task completion.") def parse_verification_output(stdout_lines, stderr_lines): total_time: Optional[float] = None falsification_time: Optional[float] = None resmonitor_lines = [line for line in stderr_lines if "(resmonitor)" in line] resmonitor_result_line = resmonitor_lines[-1] falsification_start_time = [ datetime.datetime.strptime(" ".join(line.split()[1:3]), "%Y-%m-%d %H:%M:%S,%f") for line in stdout_lines if "Starting Falsifier" in line ] computed_falsification_time = None if len(falsification_start_time) > 0: final_time = datetime.datetime.strptime( " ".join(resmonitor_lines[-1].split()[1:3]), "%Y-%m-%d %H:%M:%S,%f" ) computed_falsification_time = ( final_time - falsification_start_time[0] ).total_seconds() if "finished successfully" in resmonitor_result_line: try: result_lines = [] at_result = False for line in stdout_lines: if line.strip() == "dnnf": at_result = True elif ( at_result and (" result:" in line) or (" total time:" in line) or (" falsification time:" in line) ): result_lines.append(line.strip()) result = result_lines[0].split(maxsplit=1)[-1] falsification_time = float(result_lines[1].split()[-1]) total_time = float(result_lines[2].split()[-1]) except Exception as e: result = f"VerificationRunnerError({type(e).__name__})" elif "Out of Memory" in resmonitor_result_line: result = "outofmemory" total_time = float(resmonitor_lines[-2].split()[-3][:-2]) falsification_time = computed_falsification_time elif "Timeout" in resmonitor_result_line: result = "timeout" total_time = float(resmonitor_lines[-2].split()[-3][:-2]) falsification_time = computed_falsification_time else: result = "!" print(" result:", result) print( " computed falsification time:", computed_falsification_time, ) print(" falsification time:", falsification_time) print(" total time:", total_time) results = { "Result": result, "TotalTime": total_time, "FalsificationTime": falsification_time, } return results def update_results(results_csv, task, results): with lock(results_csv): df = pd.read_csv(results_csv) for key, value in results.items(): df.at[(df["ProblemId"] == task.problem_id), key] = value df.to_csv(results_csv, index=False) def main(args, extra_args): with lock(args.results_csv): if not args.results_csv.exists(): with open(args.results_csv, "w+") as f: f.write("ProblemId,Result,FalsificationTime,TotalTime\n") properties = set() property_df = pd.read_csv(args.artifact_path / args.properties_filename) for row in property_df.itertuples(): properties.add(row.problem_id) pool = [] while len(properties) > 0: with lock(args.results_csv): df = pd.read_csv(args.results_csv) for problem_id in df["ProblemId"]: properties.discard(problem_id) if len(properties) == 0: break problem_id = properties.pop() df = df.append({"ProblemId": problem_id}, ignore_index=True) df.to_csv(args.results_csv, index=False) property_filename = ( property_df[(property_df["problem_id"] == problem_id)]["property_filename"] .unique() .item() ) network_names = ( property_df[(property_df["problem_id"] == problem_id)]["network_names"] .item() .split(":") ) network_filenames = ( property_df[(property_df["problem_id"] == problem_id)]["network_filenames"] .item() .split(":") ) networks = " ".join( [f"--network {n} {fn}" for n, fn in zip(network_names, network_filenames)] ) resmonitor = f"python {Path(__file__).absolute().parent}/resmonitor.py" resmonitor_args = f"{resmonitor} -M {args.memory} -T {args.time}" extra_args_str = " ".join(extra_args) falsifier_args = ( f"python -m dnnf {property_filename} {networks} {extra_args_str} -v" ) run_args = f"{resmonitor_args} {falsifier_args}" print(run_args) proc = sp.Popen( shlex.split(run_args), stdout=sp.PIPE, stderr=sp.PIPE, encoding="utf8", bufsize=1, cwd=args.artifact_path, ) proc.problem_id = problem_id proc.stdout_buffer = [""] proc.stderr_buffer = [""] proc.stdout_lines = [] proc.stderr_lines = [] pool.append(proc) while len(pool) >= args.ntasks: finished_task = wait(pool, timeout=2 * args.time) print("FINISHED:", " ".join(proc.args)) results = parse_verification_output( finished_task.stdout_lines, finished_task.stderr_lines ) update_results(args.results_csv, finished_task, results) while len(pool): finished_task = wait(pool, timeout=2 * args.time) print("FINISHED:", " ".join(proc.args)) results = parse_verification_output( finished_task.stdout_lines, finished_task.stderr_lines ) update_results(args.results_csv, finished_task, results) if __name__ == "__main__": main(*_parse_args())
/* Flirt, an SWF rendering library Copyright (c) 2004-2006 <NAME> <<EMAIL>> All rights reserved. http://www.opaque.net/flirt/ This code is distributed under the two-clause BSD license. Read the LICENSE file or visit the URL above for details */ #ifndef DD_PLAYER_DRAWABLE_H_INCLUDED #define DD_PLAYER_DRAWABLE_H_INCLUDED #include "../dd.h" typedef struct _ddDrawable ddDrawable; #include "character.h" #include "../render/image.h" #include "../render/rect.h" #include "../render/matrix.h" #include "../render/cxform.h" #include "../render/updatelist.h" /* when we move the timeline, each display item is marked if it's still in the frame. Then we clear out the unmarked items.. */ #define DRAWABLE_MARK (1<<0) #define DRAWABLE_NEEDSUPDATE (1<<1) struct _ddDrawable { ddCharacterType type; ddCharacter* character; void (*getUpdateList)(struct _ddDrawable* d, ddUpdateList* list, ddMatrix matrix); void (*drawInImage)(struct _ddDrawable* d, ddImage* image, ddUpdateList* list, ddMatrix matrix, ddCXform cXform, ddRect clipRect); void (*destroy)(struct _ddDrawable* d); int flags; int depth; int maskLevel; ddDrawable* maskShape; // pointers for linked list ddDrawable* next; ddDrawable* last; ddMatrix matrix; ddRect bounds; ddCXform cXform; }; static inline void ddDrawable_init(ddDrawable* d) { d->flags = 0; d->matrix = ddIdentityMatrix; d->bounds = ddInvalidRect; d->cXform = ddEmptyCXform; d->maskLevel = -1; d->maskShape = NULL; d->next = NULL; d->last = NULL; } static inline void ddDrawable_getUpdateList(ddDrawable* d, ddUpdateList* list, ddMatrix matrix) { d->getUpdateList(d, list, matrix); if ( d->flags & DRAWABLE_NEEDSUPDATE ) ddUpdateList_includeRect(list, d->bounds); d->flags = 0; } static inline void ddDrawable_drawInImage(ddDrawable* d, ddImage* image, ddUpdateList* list, ddMatrix matrix, ddCXform cXform, ddRect clipRect) { d->drawInImage(d, image, list, matrix, cXform, clipRect); } static inline void dd_destroyDrawable(ddDrawable* d) { d->destroy(d); } static inline void ddDrawable_setMaskLevel(ddDrawable* d, int level) { d->maskLevel = level; } void ddDrawable_setMaskDrawable(ddDrawable* d, ddDrawable* mask); void ddDrawable_linkDrawableAfter(ddDrawable* left, ddDrawable* right); void ddDrawable_linkDrawableBefore(ddDrawable* left, ddDrawable* right); void ddDrawable_replace(ddDrawable* old, ddDrawable* new); int ddDrawable_hitTest(ddDrawable* d, fixed x, fixed y); void ddDrawable_setMatrix(ddDrawable* d, ddMatrix matrix); ddMatrix ddDrawable_getMatrix(ddDrawable* d); void ddDrawable_setCXform(ddDrawable* d, ddCXform cXform, boolean timeline); ddCXform ddDrawable_getCXform(ddDrawable* d); static inline int ddDrawable_getDepth(ddDrawable* d) { return d->depth; } static inline void ddDrawable_setNeedsUpdate(ddDrawable* d) { d->flags |= DRAWABLE_NEEDSUPDATE; } static inline void ddDrawable_setMark(ddDrawable* d) { d->flags |= DRAWABLE_MARK; } static inline void ddDrawable_clearMark(ddDrawable* d) { d->flags &= ~DRAWABLE_MARK; } static inline int ddDrawable_getMark(ddDrawable* d) { return (d->flags & DRAWABLE_MARK); } #endif /* DD_PLAYER_DRAWABLE_H_INCLUDED */
#define VMA_IMPLEMENTATION #ifdef _DEBUG # define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 # define VMA_DEBUG_MARGIN 16 # define VMA_DEBUG_DETECT_CORRUPTION 1 #endif #include <vk_mem_alloc.h> #ifdef __WINDOWS__ #include <windows.h> #include <vulkan/vulkan_win32.h> #else #define GLFW_INCLUDE_NONE #define GLFW_INCLUDE_VULKAN #include <GLFW/glfw3.h> #endif #include "vk-utils.h" #include <array> #include <iostream> uint32_t getFormatSize(VkFormat format, uint32_t& outDivisor) { outDivisor = 1; switch (format) { case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: case VK_FORMAT_BC1_RGB_UNORM_BLOCK: case VK_FORMAT_BC1_RGB_SRGB_BLOCK: case VK_FORMAT_BC4_UNORM_BLOCK: case VK_FORMAT_BC4_SNORM_BLOCK: outDivisor = 2; return 1; case VK_FORMAT_BC2_UNORM_BLOCK: case VK_FORMAT_BC2_SRGB_BLOCK: case VK_FORMAT_BC3_UNORM_BLOCK: case VK_FORMAT_BC3_SRGB_BLOCK: case VK_FORMAT_BC5_SNORM_BLOCK: case VK_FORMAT_BC6H_UFLOAT_BLOCK: case VK_FORMAT_BC5_UNORM_BLOCK: case VK_FORMAT_BC6H_SFLOAT_BLOCK: case VK_FORMAT_BC7_UNORM_BLOCK: case VK_FORMAT_BC7_SRGB_BLOCK: return 1; case VK_FORMAT_R8G8B8_UNORM: return 3; case VK_FORMAT_R8G8B8A8_UNORM: return 4; case VK_FORMAT_R32G32_SFLOAT: return 8; case VK_FORMAT_R32G32B32_SFLOAT: return 12; default: assert(false); // please implement me. return 0; }; } uint32_t getFormatSize(VkFormat format) { uint32_t unused; return getFormatSize(format, unused); } bool hasStencilComponent(VkFormat format) { switch (format) { case VK_FORMAT_D32_SFLOAT_S8_UINT: case VK_FORMAT_D24_UNORM_S8_UINT: return true; default: return false; } } bool hasDepthComponent(VkFormat format) { switch (format) { case VK_FORMAT_D32_SFLOAT: case VK_FORMAT_D32_SFLOAT_S8_UINT: case VK_FORMAT_D24_UNORM_S8_UINT: return true; default: return false; } } uint32_t findMemoryType(VkPhysicalDevice device, uint32_t typeFilter, VkMemoryPropertyFlags properties) { VkPhysicalDeviceMemoryProperties memProperties; vkGetPhysicalDeviceMemoryProperties(device, &memProperties); for (uint32_t i = 0ul; i < memProperties.memoryTypeCount; i++) if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties)) return i; return 0; } VkFormat findSupportedFormat(VkPhysicalDevice device, const std::vector<VkFormat>& candidates, VkImageTiling tiling, VkFormatFeatureFlags features) { for (VkFormat format : candidates) { VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(device, format, &props); if (tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & features) == features) return format; else if (tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & features) == features) return format; } return VK_FORMAT_UNDEFINED; } VkCommandPool createCommandPool(VkDevice device, VkCommandPoolCreateFlags flags, int queueFamilyIndex) { VkCommandPoolCreateInfo cmdPoolInfo = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO}; cmdPoolInfo.flags = flags; cmdPoolInfo.queueFamilyIndex = queueFamilyIndex; VkCommandPool outPool; VK_CHECK(vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &outPool)); return outPool; } std::vector<VkCommandBuffer> allocateCommandBuffers(VkDevice device, VkCommandPool pool, VkCommandBufferLevel level, uint32_t count) { std::vector<VkCommandBuffer> commandBuffers(count); VkCommandBufferAllocateInfo cmdInfo = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO}; cmdInfo.commandPool = pool; cmdInfo.level = level; cmdInfo.commandBufferCount = count; VK_CHECK(vkAllocateCommandBuffers(device, &cmdInfo, commandBuffers.data())); return commandBuffers; } VkDescriptorSet allocateDescriptorSet(VkDevice device, VkDescriptorPool pool, VkDescriptorSetLayout layout) { VkDescriptorSetAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO }; allocInfo.descriptorPool = pool; allocInfo.descriptorSetCount = 1; allocInfo.pSetLayouts = &layout; VkDescriptorSet outDescriptorSet; VK_CHECK(vkAllocateDescriptorSets(device, &allocInfo, &outDescriptorSet)); return outDescriptorSet; } std::vector<VkDescriptorSet> allocateDescriptorSets(VkDevice device, VkDescriptorPool pool, const VkDescriptorSetLayout* layouts, uint32_t layoutCount) { VkDescriptorSetAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO }; allocInfo.descriptorPool = pool; allocInfo.descriptorSetCount = layoutCount; allocInfo.pSetLayouts = layouts; std::vector<VkDescriptorSet> outDescriptorSets(layoutCount); VK_CHECK(vkAllocateDescriptorSets(device, &allocInfo, outDescriptorSets.data())); return outDescriptorSets; } VkShaderModule createShaderModule(VkDevice device, size_t codeSize, const uint32_t* codePtr) { VkShaderModuleCreateInfo info = { VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO }; info.codeSize = codeSize; info.pCode = codePtr; VkShaderModule vkShaderModule; VK_CHECK(vkCreateShaderModule(device, &info, nullptr, &vkShaderModule)); return vkShaderModule; }; VkDescriptorSetLayout createDescriptorSetLayout( VkDevice device, VkDescriptorSetLayoutCreateFlags flags, const VkDescriptorSetLayoutBinding* bindings, const VkDescriptorBindingFlags* bindingFlags, uint32_t bindingCount) { VkDescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO }; bindingFlagsInfo.bindingCount = bindingCount; bindingFlagsInfo.pBindingFlags = bindingFlags; VkDescriptorSetLayoutCreateInfo layoutInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO }; layoutInfo.pNext = &bindingFlagsInfo; layoutInfo.flags = flags; layoutInfo.bindingCount = bindingCount; layoutInfo.pBindings = bindings; VkDescriptorSetLayout layout; VK_CHECK(vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr, &layout)); return layout; } VkDescriptorUpdateTemplate createDescriptorUpdateTemplate( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo& createInfo) { VkDescriptorUpdateTemplate descriptorTemplate; vkCreateDescriptorUpdateTemplate(device, &createInfo, nullptr, &descriptorTemplate); return descriptorTemplate; } void copyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) { VkBufferCopy copyRegion = {}; copyRegion.srcOffset = 0ull; copyRegion.dstOffset = 0ull; copyRegion.size = size; vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, &copyRegion); } std::tuple<VkBuffer, VmaAllocation> createBuffer( VmaAllocator allocator, VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags flags, const char* debugName) { VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufferInfo.size = size; bufferInfo.usage = usage; bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VmaAllocationCreateInfo allocInfo = {}; allocInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; allocInfo.usage = (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) ? VMA_MEMORY_USAGE_GPU_ONLY : VMA_MEMORY_USAGE_UNKNOWN; allocInfo.requiredFlags = flags; allocInfo.memoryTypeBits = 0ul; // memRequirements.memoryTypeBits; allocInfo.pUserData = (void*)debugName; VkBuffer outBuffer; VmaAllocation outBufferMemory; VK_CHECK(vmaCreateBuffer( allocator, &bufferInfo, &allocInfo, &outBuffer, &outBufferMemory, nullptr)); return std::make_tuple(outBuffer, outBufferMemory); } std::tuple<VkBuffer, VmaAllocation> createBuffer( VkCommandBuffer commandBuffer, VmaAllocator allocator, VkBuffer stagingBuffer, VkDeviceSize bufferSize, VkBufferUsageFlags usage, VkMemoryPropertyFlags memoryFlags, const char* debugName) { assert(bufferSize > 0); VkBuffer outBuffer; VmaAllocation outBufferMemory; if (stagingBuffer) { std::tie(outBuffer, outBufferMemory) = createBuffer( allocator, bufferSize, usage | VK_BUFFER_USAGE_TRANSFER_DST_BIT, memoryFlags, debugName); copyBuffer(commandBuffer, stagingBuffer, outBuffer, bufferSize); } else { std::tie(outBuffer, outBufferMemory) = createBuffer( allocator, bufferSize, usage, memoryFlags, debugName); } return std::make_tuple(outBuffer, outBufferMemory); } std::tuple<VkBuffer, VmaAllocation> createStagingBuffer( VmaAllocator allocator, const void* srcData, size_t srcDataSize, const char* debugName) { auto bufferData = createBuffer( allocator, srcDataSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, debugName); auto& [bufferHandle, memoryHandle] = bufferData; void* data; VK_CHECK(vmaMapMemory(allocator, memoryHandle, &data)); memcpy(data, srcData, srcDataSize); vmaUnmapMemory(allocator, memoryHandle); return bufferData; } VkBufferView createBufferView(VkDevice device, VkBuffer buffer, VkBufferViewCreateFlags flags, VkFormat format, VkDeviceSize offset, VkDeviceSize range) { VkBufferViewCreateInfo viewInfo = { VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO }; viewInfo.flags = flags; viewInfo.buffer = buffer; viewInfo.format = format; viewInfo.offset = offset; viewInfo.range = range; VkBufferView outBufferView; VK_CHECK(vkCreateBufferView(device, &viewInfo, nullptr, &outBufferView)); return outBufferView; } void transitionImageLayout(VkCommandBuffer commandBuffer, VkImage image, VkFormat format, VkImageLayout oldLayout, VkImageLayout newLayout, uint32_t mipLevels) { VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER }; barrier.oldLayout = oldLayout; barrier.newLayout = newLayout; barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.image = image; if (hasDepthComponent(format)) { barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; if (hasStencilComponent(format)) barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; } else { barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; } barrier.subresourceRange.baseMipLevel = 0ul; barrier.subresourceRange.levelCount = mipLevels; barrier.subresourceRange.baseArrayLayer = 0ul; barrier.subresourceRange.layerCount = 1; VkPipelineStageFlags sourceStage = {}; VkPipelineStageFlags destinationStage = {}; if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { barrier.srcAccessMask = 0ul; barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | // VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | // VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | // VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { barrier.srcAccessMask = 0ul; barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; destinationStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_GENERAL && newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; destinationStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_GENERAL) { barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_GENERAL) { barrier.srcAccessMask = 0ul; barrier.dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; destinationStage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { barrier.srcAccessMask = 0ul; barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; destinationStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | // VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | // VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | // VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_GENERAL && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; sourceStage = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; destinationStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | // VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | // VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | // VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; sourceStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; sourceStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { barrier.srcAccessMask = 0ul; barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ; barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT ; } else if ( oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT ; } else if ( oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT; } else if ( oldLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT; barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT; destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT; } else { assert(false); // not implemented yet barrier.srcAccessMask = 0ul; barrier.dstAccessMask = 0ul; } vkCmdPipelineBarrier( commandBuffer, sourceStage, destinationStage, 0, 0, nullptr, 0, nullptr, 1, &barrier); } void copyBufferToImage( VkCommandBuffer commandBuffer, VkBuffer buffer, VkImage image, uint32_t width, uint32_t height, uint32_t mipLevels, const uint32_t* mipOffsets, uint32_t mipOffsetsStride) { std::vector<VkBufferImageCopy> regions(mipLevels); for (uint32_t mipIt = 0ul; mipIt < mipLevels; mipIt++) { uint32_t mipWidth = width >> mipIt; uint32_t mipHeight = height >> mipIt; auto& region = regions[mipIt]; region.bufferOffset = *(mipOffsets + mipIt * mipOffsetsStride); region.bufferRowLength = 0ul; region.bufferImageHeight = 0ul; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.mipLevel = mipIt; region.imageSubresource.baseArrayLayer = 0ul; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.imageExtent = {mipWidth, mipHeight, 1}; } vkCmdCopyBufferToImage( commandBuffer, buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data()); } std::tuple<VkImage, VmaAllocation> createImage2D( VmaAllocator allocator, uint32_t width, uint32_t height, uint32_t mipLevels, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags memoryFlags, const char* debugName, VkImageLayout initialLayout) { VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; imageInfo.imageType = VK_IMAGE_TYPE_2D; imageInfo.extent.width = width; imageInfo.extent.height = height; imageInfo.extent.depth = 1; imageInfo.mipLevels = mipLevels; imageInfo.arrayLayers = 1; imageInfo.format = format; imageInfo.tiling = tiling; imageInfo.usage = usage; imageInfo.initialLayout = initialLayout; imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageInfo.flags = {}; VmaAllocationCreateInfo allocInfo = {}; allocInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; allocInfo.usage = (memoryFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) ? VMA_MEMORY_USAGE_GPU_ONLY : VMA_MEMORY_USAGE_UNKNOWN; allocInfo.requiredFlags = memoryFlags; allocInfo.memoryTypeBits = 0ul; // memRequirements.memoryTypeBits; allocInfo.pUserData = (void*)debugName; VkImage outImage; VmaAllocation outImageMemory; VmaAllocationInfo outAllocInfo; VK_CHECK(vmaCreateImage( allocator, &imageInfo, &allocInfo, &outImage, &outImageMemory, &outAllocInfo)); return std::make_tuple(outImage, outImageMemory); } std::tuple<VkImage, VmaAllocation> createImage2D( VkCommandBuffer commandBuffer, VmaAllocator allocator, VkBuffer stagingBuffer, uint32_t width, uint32_t height, uint32_t mipLevels, const uint32_t* mipOffsets, uint32_t mipOffsetsStride, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags memoryFlags, const char* debugName, VkImageLayout initialLayout) { assert(stagingBuffer); auto result = createImage2D( allocator, width, height, mipLevels, format, tiling, usage | VK_IMAGE_USAGE_TRANSFER_DST_BIT, memoryFlags, debugName, initialLayout); const auto& [outImage, outImageMemory] = result; transitionImageLayout( commandBuffer, outImage, format, initialLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, mipLevels); copyBufferToImage(commandBuffer, stagingBuffer, outImage, width, height, mipLevels, mipOffsets, mipOffsetsStride); return result; } VkImageView createImageView2D( VkDevice device, VkImageViewCreateFlags flags, VkImage image, VkFormat format, VkImageAspectFlags aspectFlags, uint32_t mipLevels) { VkImageViewCreateInfo viewInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO }; viewInfo.flags = flags; viewInfo.image = image; viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; viewInfo.format = format; viewInfo.subresourceRange.aspectMask = aspectFlags; viewInfo.subresourceRange.baseMipLevel = 0ul; viewInfo.subresourceRange.levelCount = mipLevels; viewInfo.subresourceRange.baseArrayLayer = 0ul; viewInfo.subresourceRange.layerCount = 1; viewInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; viewInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; viewInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; viewInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; VkImageView outImageView; VK_CHECK(vkCreateImageView(device, &viewInfo, nullptr, &outImageView)); return outImageView; } VkSampler createSampler(VkDevice device, const VkSamplerCreateInfo& createInfo) { VkSampler outSampler; VK_CHECK(vkCreateSampler(device, &createInfo, nullptr, &outSampler)); return outSampler; } std::vector<VkSampler> createSamplers(VkDevice device, const std::vector<VkSamplerCreateInfo>& createInfos) { std::vector<VkSampler> outSamplers; outSamplers.reserve(createInfos.size()); for (const auto& createInfo : createInfos) outSamplers.emplace_back(createSampler(device, createInfo)); return outSamplers; } VkFramebuffer createFramebuffer( VkDevice device, VkRenderPass renderPass, uint32_t attachmentCount, const VkImageView* attachments, uint32_t width, uint32_t height, uint32_t layers) { VkFramebufferCreateInfo info = { VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO }; info.renderPass = renderPass; info.attachmentCount = attachmentCount; info.pAttachments = attachments; info.width = width; info.height = height; info.layers = layers; VkFramebuffer outFramebuffer; VK_CHECK(vkCreateFramebuffer(device, &info, nullptr, &outFramebuffer)); return outFramebuffer; } VkRenderPass createRenderPass( VkDevice device, const std::vector<VkAttachmentDescription>& attachments, const std::vector<VkSubpassDescription>& subpasses, const std::vector<VkSubpassDependency>& subpassDependencies) { VkRenderPassCreateInfo renderPassInfo = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO }; renderPassInfo.attachmentCount = static_cast<uint32_t>(attachments.size()); renderPassInfo.pAttachments = attachments.data(); renderPassInfo.subpassCount = static_cast<uint32_t>(subpasses.size()); renderPassInfo.pSubpasses = subpasses.data(); renderPassInfo.dependencyCount = static_cast<uint32_t>(subpassDependencies.size()); renderPassInfo.pDependencies = subpassDependencies.data(); VkRenderPass outRenderPass; VK_CHECK(vkCreateRenderPass(device, &renderPassInfo, nullptr, &outRenderPass)); return outRenderPass; } VkRenderPass createRenderPass( VkDevice device, VkPipelineBindPoint bindPoint, VkFormat colorFormat, VkAttachmentLoadOp colorLoadOp, VkAttachmentStoreOp colorStoreOp, VkImageLayout colorInitialLayout, VkImageLayout colorFinalLayout, VkFormat depthFormat, VkAttachmentLoadOp depthLoadOp, VkAttachmentStoreOp depthStoreOp, VkImageLayout depthInitialLayout, VkImageLayout depthFinalLayout) { std::vector<VkAttachmentDescription> attachments; VkAttachmentDescription& colorAttachment = attachments.emplace_back(); colorAttachment.format = colorFormat; colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT; colorAttachment.loadOp = colorLoadOp; colorAttachment.storeOp = colorStoreOp; colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; colorAttachment.initialLayout = colorInitialLayout; colorAttachment.finalLayout = colorFinalLayout; VkAttachmentReference colorAttachmentRef = {}; colorAttachmentRef.attachment = 0ul; colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = bindPoint; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &colorAttachmentRef; VkAttachmentReference depthAttachmentRef = {}; if (depthFormat != VK_FORMAT_UNDEFINED) { VkAttachmentDescription& depthAttachment = attachments.emplace_back(); depthAttachment.format = depthFormat; depthAttachment.samples = VK_SAMPLE_COUNT_1_BIT; depthAttachment.loadOp = depthLoadOp; depthAttachment.storeOp = depthStoreOp; depthAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; depthAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; depthAttachment.initialLayout = depthInitialLayout; depthAttachment.finalLayout = depthFinalLayout; depthAttachmentRef.attachment = 1; depthAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; subpass.pDepthStencilAttachment = &depthAttachmentRef; } VkSubpassDependency dependency = {}; dependency.srcSubpass = VK_SUBPASS_EXTERNAL; dependency.dstSubpass = 0ul; dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; dependency.srcAccessMask = {}; dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; return createRenderPass(device, attachments, std::make_vector(subpass), std::make_vector(dependency)); } VkPipelineLayout createPipelineLayout( VkDevice device, const VkDescriptorSetLayout* descriptorSetLayouts, uint32_t descriptorSetLayoutCount, const VkPushConstantRange* pushConstantRanges, uint32_t pushConstantRangeCount) { VkPipelineLayoutCreateInfo pipelineLayoutInfo = { VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO }; pipelineLayoutInfo.setLayoutCount = descriptorSetLayoutCount; pipelineLayoutInfo.pSetLayouts = descriptorSetLayouts; pipelineLayoutInfo.pushConstantRangeCount = pushConstantRangeCount; pipelineLayoutInfo.pPushConstantRanges = pushConstantRanges; VkPipelineLayout layout; VK_CHECK(vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr, &layout)); return layout; } VkSurfaceKHR createSurface(VkInstance instance, void* view) { VkSurfaceKHR surface; #ifdef __WINDOWS__ auto vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>( vkGetInstanceProcAddr(instance, "vkCreateWin32SurfaceKHR")); assert(vkCreateWin32SurfaceKHR); VkWin32SurfaceCreateInfoKHR surfaceCreateInfo = { VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR }; surfaceCreateInfo.hinstance = GetModuleHandle(NULL); surfaceCreateInfo.hwnd = *reinterpret_cast<HWND*>(view); VK_CHECK(vkCreateWin32SurfaceKHR(instance, &surfaceCreateInfo, nullptr, &surface)); #else VK_CHECK(glfwCreateWindowSurface(instance, reinterpret_cast<GLFWwindow*>(view), nullptr, &surface)); #endif return surface; } VmaAllocator createAllocator(VkInstance instance, VkDevice device, VkPhysicalDevice physicalDevice, VkFlags flags) { auto vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)vkGetInstanceProcAddr( instance, "vkGetBufferMemoryRequirements2KHR"); assert(vkGetBufferMemoryRequirements2KHR != nullptr); auto vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)vkGetInstanceProcAddr( instance, "vkGetImageMemoryRequirements2KHR"); assert(vkGetImageMemoryRequirements2KHR != nullptr); VmaVulkanFunctions functions = {}; functions.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties; functions.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties; functions.vkAllocateMemory = vkAllocateMemory; functions.vkFreeMemory = vkFreeMemory; functions.vkMapMemory = vkMapMemory; functions.vkUnmapMemory = vkUnmapMemory; functions.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges; functions.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges; functions.vkBindBufferMemory = vkBindBufferMemory; functions.vkBindImageMemory = vkBindImageMemory; functions.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements; functions.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements; functions.vkCreateBuffer = vkCreateBuffer; functions.vkDestroyBuffer = vkDestroyBuffer; functions.vkCreateImage = vkCreateImage; functions.vkDestroyImage = vkDestroyImage; functions.vkGetBufferMemoryRequirements2KHR = vkGetBufferMemoryRequirements2KHR; functions.vkGetImageMemoryRequirements2KHR = vkGetImageMemoryRequirements2KHR; VmaAllocator allocator; VmaAllocatorCreateInfo allocatorInfo = {}; allocatorInfo.flags = flags; allocatorInfo.physicalDevice = physicalDevice; allocatorInfo.device = device; allocatorInfo.pVulkanFunctions = &functions; allocatorInfo.instance = instance; vmaCreateAllocator(&allocatorInfo, &allocator); return allocator; } VkDescriptorPool createDescriptorPool(VkDevice device) { constexpr uint32_t maxDescriptorCount = 128; constexpr uint32_t maxInlineBlockSizeBytes = 64; VkDescriptorPoolSize poolSizes[] = { {VK_DESCRIPTOR_TYPE_SAMPLER, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, maxDescriptorCount}, {VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, maxInlineBlockSizeBytes}}; VkDescriptorPoolCreateInfo poolInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO }; poolInfo.poolSizeCount = static_cast<uint32_t>(sizeof_array(poolSizes)); poolInfo.pPoolSizes = poolSizes; poolInfo.maxSets = maxDescriptorCount * static_cast<uint32_t>(sizeof_array(poolSizes)); poolInfo.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT // VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT VkDescriptorPool outDescriptorPool; VK_CHECK(vkCreateDescriptorPool(device, &poolInfo, nullptr, &outDescriptorPool)); return outDescriptorPool; } VkResult checkFlipOrPresentResult(VkResult result) { switch (result) { case VK_SUCCESS: break; case VK_SUBOPTIMAL_KHR: std::cout << "warning: flip/present returned VK_SUBOPTIMAL_KHR"; break; case VK_ERROR_OUT_OF_DATE_KHR: std::cout << "warning: flip/present returned VK_ERROR_OUT_OF_DATE_KHR"; break; default: throw std::runtime_error("Invalid error code."); } return result; }
<reponame>leavesster/flat-web import React, { FC } from "react"; import { PluginProps } from "white-web-sdk"; /** simple wrapper to locate the element on the whiteboard */ export const Transformer: FC<Pick<PluginProps<unknown, unknown>, "size" | "scale">> = ({ size: { width, height }, scale, children, }) => { scale = scale || 1; return ( <div style={{ width: width / scale, height: height / scale, transform: `scale(${scale})`, display: "flex", transformOrigin: "top left", }} > {children} </div> ); };
Sequential broncho-alveolar lavages reflect distinct pulmonary compartments: clinical and research implications in lung transplantation Background Bronchoalveolar lavage (BAL) has proven to be very useful to monitor the lung allograft after transplantation. In addition to allowing detection of infections, multiple BAL analytes have been proposed as potential biomarkers of lung allograft rejection or dysfunction. However, BAL collection is not well standardized and differences in BAL collection represent an important source of variation. We hypothesized that there are systematic differences between sequential BALs that are relevant to BAL analysis. Methods As part of 126 consecutive bronchoscopies in lung transplant recipients, two sequential BALs (BAL1 and BAL2) were performed in one location during each bronchoscopy by instilling and suctioning 50 ml of normal saline twice into separate containers. Cell concentration, viability and differentials, Surfactant Protein-D (SP-D), Club Cell Secretory Protein (CCSP), and levels of CXCL10, IL-10, CCL2, CCL5, VEGF-C, RAGE, CXCL9, CXCL1, IL-17A, IL-21, PDGF, and GCSF were compared between BAL1 and BAL2. Results Total cell concentration did not differ between BAL1 and BAL2; however, compared to BAL2, BAL1 had more dead cells, epithelial cells, neutrophils, and higher concentrations of airway epithelium-derived CCSP and inflammatory markers. BAL2 had a higher concentration of SP-D compared to BAL1. Conclusion In this study performed in lung transplant recipients, we show that sequential BALs represent different lung compartments and have distinct compositions. BAL1 represents the airway compartment with more epithelial cells, neutrophils, and epithelium-derived CCSP. Conversely, BAL2 samples preferentially the distal bronchoalveolar space with greater cell viability and higher SP-D. Our findings illustrate how the method of BAL collection can influence analyte concentrations and further emphasize the need for a standardized approach in translational research involving BAL samples. Electronic supplementary material The online version of this article (10.1186/s12931-018-0786-z) contains supplementary material, which is available to authorized users. Background Bronchoalveolar lavage (BAL) is a technique widely used in pulmonary medicine and lung transplantation to diagnose lung infections and other processes or evaluate treatment effects . Examination of the cellular composition and protein constituents in the BAL provides a unique window into the microenvironment of the lung. In lung transplantation, BAL proteins have been proposed as potential biomarkers of acute rejection and chronic lung allograft dysfunction (CLAD) . However, small sample sizes, lack of control for potential confounders and lack of standardization related to BAL collection and handling have all been proposed as sources of variability between studies . Although BAL has been used as a research tool in lung transplantation for decades, the technique varies markedly between centers. In an informal survey conducted by our group among 25 lung transplant centers from 14 countries, BAL collection ranged from 1 to 6 sequential lavages of 20-100 ml each with inconsistent pooling prior to analysis (Additional file 1: Table S1). In an attempt to create a common approach to BAL collection, BAL standardization guidelines were published by the European Respiratory Society in 1999 , and guidelines specific to patients with interstitial lung diseases were put forth by the American Thoracic Society in 2012 . While these documents set an important precedent, they leave room for significant variability in BAL collection and processing. Neither the optimal total volume nor the number of aliquots to be instilled has been established. Patient selection and sample collection This is a retrospective single-center cohort study based on prospectively collected BAL samples and clinical information, approved by the Institutional Research Ethics Board. The study population consisted of all consented lung transplant recipients at Toronto General Hospital who underwent a bronchoscopy between August and October 2015. Immunosuppression, antimicrobial prophylaxis, and treatment of acute rejection were administered for all patients in accordance with the Toronto Lung Transplant Program protocol as described previously . CLAD was defined as a sustained (at least 3 weeks) and irreversible decline in FEV1 to ≤80% of the post-transplant baseline, which was itself defined as the average of the two highest FEV1 values at least 3 weeks apart, in the absence of other etiologies . BAL samples from 126 consecutive bronchoscopies were collected and analyzed as detailed below. Bronchoscopy procedure and BAL collection At our center, scheduled surveillance bronchoscopies are carried out at 0.5, 1.5, 3, 6, 9, 12, 18 and 24 months posttransplant. Additional diagnostic bronchoscopies are performed if clinically indicated. Bronchoscopies were conducted via the oropharyngeal route under conscious sedation. Supplemental oxygen was provided to maintain an oxygen saturation of > 90% (intubation was not routinely performed). Pharyngeal anesthesia with 4% lidocaine was applied to the upper airways prior to the bronchoscopy, and intravenous Midazolam and Fentanyl were administered prior to the bronchoscopy for sedation. In addition, 1% lidocaine was administered to the trachea and mainstem airways during the bronchoscopy for local anesthesia of the airway mucosa. Blood pressure, heart rate, oxygen saturation, electrocardiogram and consciousness level of the patient were continuously monitored. After passing through the upper airways, avoiding suctioning as much as possible so as not to contaminate the bronchoscope, an initial airway inspection was carried out. As part of our institutional protocol aimed to measure markers of aspiration in the airways, a large airway bronchial wash (LABW) was performed in the mainstem bronchus with instillation and subsequent suctioning of 20 ml of normal saline (this sample was not assessed in this study). The bronchoscope was then placed in a wedged position within the targeted segment; Per protocol, when no particular location was targeted, BAL sampling was conducted in the right middle lobe or left upper lobe (preferentially the lingula) of the lung allograft, as suggested by the ATS/ERS guidelines . In the case of localized disease processes, the targeted segment was chosen based on radiological examination or visual inspection. After achieving a wedged position with the bronchoscope, 50 ml of normal saline were instilled and then suctioned while maintaining the wedged position (BAL1). This procedure was immediately repeated once again (BAL2). BAL1 and BAL2 samples were collected into separate containers, and the return volumes were recorded. From BAL1, 10 ml was sent for general clinical microbiologic analysis. From BAL2, 10 ml was sent for clinical cytology analysis. All remaining fluid was processed and stored for research. Transbronchial biopsies (if any) were performed after BALs. Suctioning throughout the procedure was performed using a wall-mounted suction system (see protocol in Fig. 1). Processing of BAL samples After separation of clinical samples, as described above, BAL1 and BAL2 were placed on ice and processed within three hours of sample collection. Cell concentrations, differentials, and cell viability were measured as described below, in aliquots of fresh whole (raw) BAL samples that were separated prior to further processing. Subsequently, BAL1 and BAL2 samples were centrifuged for 20 min at 3184G at 4°C. The supernatant was carefully transferred into separate tubes and stored at − 80°C. The cell pellets were stored as part of our ongoing biobanking and not used in this study. Assessment of cell counts Cell concentration for all 126 BAL1-BAL2 sample pairs were assessed using an automatic Vi-Cell XR Cell Viability Analyzer (Beckman Coulter, Mississauga, ON, Canada). For confirmation, a manual cell count using trypan blue dead cell exclusion on a hemocytometer was performed on nine randomly selected sample pairs with quantification of ciliated epithelial cells. Cytospin preparations of 6 of these sample pairs were made: 150uL of the whole (raw) BAL was loaded into a Cytospin filter attached to a charged glass slide. The Cytospin filters with glass slides were then centrifuged at 800 rpm for 3 min (Shandon Cytospin 4 Centrifuge, ThermoFisher). The slides were then air dried and stained with HARLECO® Hemacolor® (EMD Chemicals, NJ, USA). Percentages of polymorphonuclear granulocytes, eosinophils, macrophages, and lymphocytes were quantified. All manual sample analyses were performed by experienced operators blinded to sample group. BAL protein analysis The first 20 paired BALs of the cohort were arbitrarily chosen for analysis of CCSP and SP-D. Stored frozen BAL supernatants were thawed on ice. CCSP and SP-D were measured by enzyme-linked immunosorbent assay (ELISA) (DuoSet® ELISA; R&D Systems, Minneapolis, MN, USA). All samples and standards were run in duplicate according to manufacturers' protocols. The 15 pairs of BAL that were collected sequentially after the first 20 (and were therefore not previously thawed) were used to assess inflammatory markers: A custom multiplex bead kit (R&D Systems) was used to measure CXCL10, IL-10, CCL2, CCL5, VEGF-C, RAGE, CXCL9, CXCL1, IL-17A, IL-21, PDGF and GCSF based on manufacturer's instructions. Samples and standards were run in duplicate. Biomarker concentrations were obtained using a Bio-Plex® MAGPIX™ Multiplex reader (Bio-Rad Laboratories, Hercules, CA). For all analytes, any value falling below the lower limit of detection was assigned a level of 0 ng/ml. Statistical analyses Comparisons of the paired BAL samples were performed using a non-parametric Wilcoxon matched-pairs signed rank test. Results are shown as median . Spearman correlation was used to evaluate the pairing of BAL1 and BAL2, yielding a Spearman's correlation coefficient (r). All statistical analyses were performed with GraphPad Prism version 5.01 software (GraphPad Software, La Jolla, CA, USA). A p-value < 0.05 was considered statistically significant. Characteristics of patients and bronchoscopies One hundred twenty-two patients underwent 126 sequential bronchoscopies. Baseline characteristics are detailed in Table 1 and clinical characteristics at the time of the bronchoscopies are outlined in Table 2. Data for the overall cohort as well as for each sub-analysis cohort is shown separately. The majority of the BAL samples were obtained during surveillance bronchoscopies and were obtained from the RML. These proportions were similar between the different sub-cohorts. Less than a quarter of the BALs were infected. After initial airway inspection, a bronchoalveolar lavage (BAL) is performed by wedging the bronchoscope in a lung segment, instilling 50 mL of saline followed by aspiration (BAL1), and repeating the instillation of another 50 mL of saline followed by aspiration (BAL2). BAL1 and BAL2 samples are collected into separate containers. 10 mL of each BAL1 and BAL2 were sent to the clinical laboratory. The remaining volume of each of BAL1 and BAL2 was transferred to the research laboratory for further analysis The volume of recovered BAL fluid was higher for BAL2 than for BAL 1 Fluid aspirated after the second 50 ml instillation was significantly higher compared to the first instillation (15.5 ml (13.4, 18.4) vs. 25 ml (21.0, 30.0), for BAL1 and BAL2 respectively, p < 0.0001) (Fig. 2). In light of the potential impact of CLAD status on volume recovery, we investigated differences between BAL1 and BAL2 separately among CLAD and No CLAD patients. Once again, BAL2 demonstrated higher return volume compared to BAL1 in both group subsets (CLAD: 14 ml (12.0, 18.5) vs. 22 (20,30) for BAL1 and BAL2 respectively, p < 0.0001) (Fig. 2). Differential levels of inflammatory markers were shown in the two BAL fractions In a separate random set of paired BAL fractions, inflammatory markers CXCL10, IL-10, CCL2, CCL5, VEGF-C, RAGE, CXCL9, CXCL1, IL-17A, IL-21, PDGF, and GCSF were shown to be overall lower in BAL2 with different proteins following distinct patterns (Additional file 1: Table S3 and Fig. 6). Furthermore, the level of variability between samples appeared considerably lower in BAL2. As is frequently seen with BAL cytokine levels, analyte values were below detection in a subset of samples: concentrations of CCL5, CXCL9, IL-17A, IL-21 and PDGF were undetectable in greater than 50% of the samples and interpretation of this data is therefore limited. Nevertheless, all analytes were detectable in at least some samples. The percentage of undetectable analytes did not differ between BAL1 and BAL2. Almost all markers were lower in BAL2 compared to BAL1 for all paired samples, except for one, which consistently followed the opposite pattern. Although we cannot prove this, the outlier BAL pair may have resulted from an inadvertent switch of the two BAL fractions between sample collection and analysis. Given this rationale, while we included all BAL pairs in our primary analysis, we also compared BAL1 to BAL2 after exclusion of the outlier: when excluding the outlier, levels of VEGF-C, CXCL9, IL-17A, and IL-21 were statistically lower in BAL2, compared to BAL1. Correlation between BAL1 and BAL2 We assessed the correlation between BAL1 and BAL2 for all parameters. For example, there was a statistically significant correlation between BAL1 and BAL2 volumes (r = 0.58, p < 0.0001), meaning that if a patient had a high volume return in BAL1, they were likely to have a high return in BAL2 as well. Similarly, there was a statistically significant correlation between BAL1 and BAL2 total cell concentrations, cell viability, macrophage concentration, CCSP, SP-D and most of the other measured proteins. For each analysis, r and p values are reported in Additional file 1: Tables S2 and S3. Discussion In this study, we assessed the cellular and soluble protein composition of sequential BAL fractions in lung transplant recipients. We showed that there are systematic differences between BAL1 and BAL2, with BAL1 preferentially reflecting the airway compartment and BAL2 composition being consistent with the distal bronchoalveolar space. To the best of our knowledge, this is the first study delineating unique compositions of sequential BALs in lung transplantation. BAL cells and proteins have long been studied in the context of pulmonary diseases. In lung transplantation, in particular, BAL parameters have been shown to correlate with allograft dysfunction . However, concerns exist regarding the lack of standardization of BAL techniques, which hinders the potential clinical utility of measured BAL components and limits comparison and a b c Fig. 3 Cell Concentration and cell viability in sequential bronchoalveolar lavages performed in lung transplant recipients. Two sequential bronchoalveolar lavages (BALs) were performed in consecutive lung transplant recipients. Cell concentration was measured using an automated cell counter. Cell concentrations did not differ between BAL1 and BAL2 (p = 0.09), which was also confirmed by manual cell count in nine paired BALs (p = 0.2) (a), (b), however cell viability was significantly higher in BAL2 (p < 0.05) (c). The results of this study demonstrate reproducible differences between sequential fractions of BAL. The volume recovery was consistently higher for BAL2 independent of other factors such as CLAD status, known to influence volume recovery . Further, BAL1 is enriched with dead cells, airway epithelial cells, and neutrophils. This suggests that the first fraction may be a good representation of the airway compartment. BAL2 appears to better represent the more distal part of the lung, including the alveoli: it contains less airway epithelial cells and more macrophages (although the macrophage difference is not statistically significant). The concept of sequential BALs representing distinct compartments of the lung has been brought forth in the past in a study by Kelly et al. who was able to directly visualize the increasingly distal anatomical distribution of three sequential 60 ml aliquots of saline using a radio-opaque dye in a subtraction imaging technique . This is further supported by our finding of higher CCSP levels and lower SP-D levels in BAL1 compared to BAL2. CCSP is a major secretory product of club cells found in the airway epithelium, making it a useful marker of the airway compartment. Conversely, SP-D is primarily synthesized and secreted by alveolar type II cells that line the alveolar spaces potentially explaining the higher level found in BAL2. Unlike CCSP and SP-D, CXCL10, IL-10, CCL2, CCL5, VEGF-C, RAGE, CXCL9, CXCL1, IL-17A, IL-21, PDGF, and GCSF, that have previously been shown to correlate with clinical outcomes in lung transplant recipients, are not known to be secreted preferentially in the proximal airways or distal alveolar space. The higher levels of these proteins in BAL1 may reflect a more significant contribution of the distal airways to their production or merely a dilution effect in BAL2. Of note, the protein analyses were performed on a small and randomly selected subset of samples and are not representative of any specific post lung-transplant conditions. Our observations regarding the cellular and soluble components of sequential BALs are consistent with several studies carried out in non-transplant populations in the 1980's . A small volume lavage (less than 20 ml) in the mainstem bronchus or a segmental bronchus recovered more epithelial cells and neutrophils, while a larger lavage volume of 20-100 ml in a segmental bronchus recovered more alveolar macrophages , which is in line with our data. Unlike the higher level of alveolar lymphocytes that was described by Lam et al. in BAL from a segmental bronchus , we did not detect any particular trend in the proportion of lymphocytes between BAL1 and BAL2. As in some of the prior studies in nontransplant patients , our results show a decrease in protein concentration in successive BAL fractions. A possible explanation could be the influence of dilution and volume returns , as volumes recovered from BAL2 were consistently higher than BAL1. However, a preferential sampling of different lung compartments also likely plays a role as SP-D levels were higher in BAL2. Our results add to the literature by validating and expanding findings from earlier smaller studies in a large cohort of lung transplant patients undergoing mostly surveillance bronchoscopies. Furthermore, these findings are directly applicable to the care of both lung transplant recipients as well as to patients with other pulmonary conditions. The significant differences in cellular composition and variations in soluble proteins between BAL1 and BAL2 lead to the important conclusion that sequential BALs should not be used interchangeably. Inconsistent data collection (i.e., one study using BAL1 for analysis and the other using BAL2 or pooled sequential BALs) would make comparisons between studies unreliable. Standardization of collection, processing and reporting methods is essential for clear communication among medical professionals and researchers. Several limitations warrant review in discussion of this work. The results described in this study should be interpreted in the context of a center-specific protocolized BAL collection method as described herein. We were not able to assess the impact of other BAL practices such as instillation of a higher number of fractions or different volumes, pooled BAL1 and 2 as opposed to BAL1 and BAL2, the use of different BAL locations within the same lung or between lungs, or variability in sample handling, on the difference in composition between BAL1 and BAL2. In addition, according to our protocol a large airway bronchial wash (LABW) of the mainstem bronchus is performed prior to BAL. As shown by others, the LABW composition differs from BAL in cell and protein composition . It is possible that that performing a LABW prior to retrieving BAL1 may influence the composition of BAL1 by introducing more dead cells and higher protein levels. A separate study to compare BAL composition in the presence or absence of LABW is necessary to address this question. Furthermore, since microbiology analysis is performed only on BAL1 at our center, we were not able to compare the pathogen recovery between BAL1 and BAL2, which could be valuable information. An important issue in BAL protein analysis is the normalization of proteins diluted by saline relative to the return. Different methods have been proposed such as normalization to albumin or urea in BAL versus serum; published data suggests that albumin is not a reliable marker while urea may be useful for that purpose . Since there is no consensus about an optimal normalization strategy and given that the majority of publications on BAL proteins in lung transplantation do not use a normalization approach, we reported simple unadjusted concentrations of the protein elements and did not employ any correction methods to account for changes in dilutions between BAL1 and BAL2. In light of the critical importance that dilution may have on BAL composition, a standardized dilution should be used consistently as a part of a BAL collection protocol. We acknowledge that biochemical analyses in this study were done on small sample size. In spite of the low patient numbers in some analyses, we were able to detect changes between BAL1 and BAL2 which are consistent with previous reports described in biochemical analyses of sequential bronchial lavages from healthy volunteers . Another point deserving consideration is that this study population only included lung transplant patients: The study design did not include a comparison population (patients with other pulmonary conditions or alternatively healthy volunteers), which hinders generalizations of our observations to other patient populations. Additionally, we acknowledge that specific post-transplant complications, such as CLAD status, presence of acute rejection or infection, degree of immunosuppression, and others, may alter cell and protein composition as well as BAL fluid recovery. Our study was not focused on assessing the relationship between these factors and the BAL analytes. Given our paired study design with subjects acting as their own controls, comparing BAL1 to BAL2 in each individual subject, we were able to minimize the potential confounding effects of clinical variables on the primary analysis. Conclusion BAL cell composition and protein concentrations in lung transplant recipients are influenced by regional sampling and dilution factors that characterize different sequential BAL fractions. The increasing interest in BAL as a research tool in pulmonary translational research merits standardization of its collection, processing, bio-banking, and thorough description in manuscripts. Consensus guidelines for the collection and processing of BAL are needed for greater uniformity in future study protocol development.
/* * Licensed to Elasticsearch B.V. under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch B.V. licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import React from 'react'; import { BehaviorSubject } from 'rxjs'; import { createMemoryHistory, History, createHashHistory } from 'history'; import { AppRouter, AppNotFound } from '../ui'; import { EitherApp, MockedMounterMap, MockedMounterTuple } from '../test_types'; import { createRenderer, createAppMounter, createLegacyAppMounter, getUnmounter } from './utils'; import { AppStatus } from '../types'; describe('AppContainer', () => { let mounters: MockedMounterMap<EitherApp>; let history: History; let appStatuses$: BehaviorSubject<Map<string, AppStatus>>; let update: ReturnType<typeof createRenderer>; const navigate = (path: string) => { history.push(path); return update(); }; const mockMountersToMounters = () => new Map([...mounters].map(([appId, { mounter }]) => [appId, mounter])); const setAppLeaveHandlerMock = () => undefined; const mountersToAppStatus$ = () => { return new BehaviorSubject( new Map( [...mounters.keys()].map(id => [ id, id.startsWith('disabled') ? AppStatus.inaccessible : AppStatus.accessible, ]) ) ); }; beforeEach(() => { mounters = new Map([ createAppMounter('app1', '<span>App 1</span>'), createLegacyAppMounter('legacyApp1', jest.fn()), createAppMounter('app2', '<div>App 2</div>'), createLegacyAppMounter('baseApp:legacyApp2', jest.fn()), createAppMounter('app3', '<div>Chromeless A</div>', '/chromeless-a/path'), createAppMounter('app4', '<div>Chromeless B</div>', '/chromeless-b/path'), createAppMounter('disabledApp', '<div>Disabled app</div>'), createLegacyAppMounter('disabledLegacyApp', jest.fn()), ] as Array<MockedMounterTuple<EitherApp>>); history = createMemoryHistory(); appStatuses$ = mountersToAppStatus$(); update = createRenderer( <AppRouter history={history} mounters={mockMountersToMounters()} appStatuses$={appStatuses$} setAppLeaveHandler={setAppLeaveHandlerMock} /> ); }); it('calls mount handler and returned unmount function when navigating between apps', async () => { const app1 = mounters.get('app1')!; const app2 = mounters.get('app2')!; let dom = await navigate('/app/app1'); expect(app1.mounter.mount).toHaveBeenCalled(); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /app/app1 html: <span>App 1</span> </div></div>" `); const app1Unmount = await getUnmounter(app1); dom = await navigate('/app/app2'); expect(app1Unmount).toHaveBeenCalled(); expect(app2.mounter.mount).toHaveBeenCalled(); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /app/app2 html: <div>App 2</div> </div></div>" `); }); it('can navigate between standard application and one with custom appRoute', async () => { const standardApp = mounters.get('app1')!; const chromelessApp = mounters.get('app3')!; let dom = await navigate('/app/app1'); expect(standardApp.mounter.mount).toHaveBeenCalled(); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /app/app1 html: <span>App 1</span> </div></div>" `); const standardAppUnmount = await getUnmounter(standardApp); dom = await navigate('/chromeless-a/path'); expect(standardAppUnmount).toHaveBeenCalled(); expect(chromelessApp.mounter.mount).toHaveBeenCalled(); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /chromeless-a/path html: <div>Chromeless A</div> </div></div>" `); const chromelessAppUnmount = await getUnmounter(standardApp); dom = await navigate('/app/app1'); expect(chromelessAppUnmount).toHaveBeenCalled(); expect(standardApp.mounter.mount).toHaveBeenCalledTimes(2); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /app/app1 html: <span>App 1</span> </div></div>" `); }); it('can navigate between two applications with custom appRoutes', async () => { const chromelessAppA = mounters.get('app3')!; const chromelessAppB = mounters.get('app4')!; let dom = await navigate('/chromeless-a/path'); expect(chromelessAppA.mounter.mount).toHaveBeenCalled(); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /chromeless-a/path html: <div>Chromeless A</div> </div></div>" `); const chromelessAppAUnmount = await getUnmounter(chromelessAppA); dom = await navigate('/chromeless-b/path'); expect(chromelessAppAUnmount).toHaveBeenCalled(); expect(chromelessAppB.mounter.mount).toHaveBeenCalled(); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /chromeless-b/path html: <div>Chromeless B</div> </div></div>" `); const chromelessAppBUnmount = await getUnmounter(chromelessAppB); dom = await navigate('/chromeless-a/path'); expect(chromelessAppBUnmount).toHaveBeenCalled(); expect(chromelessAppA.mounter.mount).toHaveBeenCalledTimes(2); expect(dom?.html()).toMatchInlineSnapshot(` "<div><div> basename: /chromeless-a/path html: <div>Chromeless A</div> </div></div>" `); }); it('should not mount when partial route path matches', async () => { mounters.set(...createAppMounter('spaces', '<div>Custom Space</div>', '/spaces/fake-login')); mounters.set(...createAppMounter('login', '<div>Login Page</div>', '/fake-login')); history = createMemoryHistory(); update = createRenderer( <AppRouter history={history} mounters={mockMountersToMounters()} appStatuses$={mountersToAppStatus$()} setAppLeaveHandler={setAppLeaveHandlerMock} /> ); await navigate('/fake-login'); expect(mounters.get('spaces')!.mounter.mount).not.toHaveBeenCalled(); expect(mounters.get('login')!.mounter.mount).toHaveBeenCalled(); }); it('should not mount when partial route path has higher specificity', async () => { mounters.set(...createAppMounter('login', '<div>Login Page</div>', '/fake-login')); mounters.set(...createAppMounter('spaces', '<div>Custom Space</div>', '/spaces/fake-login')); history = createMemoryHistory(); update = createRenderer( <AppRouter history={history} mounters={mockMountersToMounters()} appStatuses$={mountersToAppStatus$()} setAppLeaveHandler={setAppLeaveHandlerMock} /> ); await navigate('/spaces/fake-login'); expect(mounters.get('spaces')!.mounter.mount).toHaveBeenCalled(); expect(mounters.get('login')!.mounter.mount).not.toHaveBeenCalled(); }); it('should not remount when changing pages within app', async () => { const { mounter, unmount } = mounters.get('app1')!; await navigate('/app/app1/page1'); expect(mounter.mount).toHaveBeenCalledTimes(1); // Navigating to page within app does not trigger re-render await navigate('/app/app1/page2'); expect(mounter.mount).toHaveBeenCalledTimes(1); expect(unmount).not.toHaveBeenCalled(); }); it('should not remount when going back within app', async () => { const { mounter, unmount } = mounters.get('app1')!; await navigate('/app/app1/page1'); expect(mounter.mount).toHaveBeenCalledTimes(1); // Hitting back button within app does not trigger re-render await navigate('/app/app1/page2'); history.goBack(); await update(); expect(mounter.mount).toHaveBeenCalledTimes(1); expect(unmount).not.toHaveBeenCalled(); }); it('should not remount when when changing pages within app using hash history', async () => { history = createHashHistory(); update = createRenderer( <AppRouter history={history} mounters={mockMountersToMounters()} appStatuses$={mountersToAppStatus$()} setAppLeaveHandler={setAppLeaveHandlerMock} /> ); const { mounter, unmount } = mounters.get('app1')!; await navigate('/app/app1/page1'); expect(mounter.mount).toHaveBeenCalledTimes(1); // Changing hash history does not trigger re-render await navigate('/app/app1/page2'); expect(mounter.mount).toHaveBeenCalledTimes(1); expect(unmount).not.toHaveBeenCalled(); }); it('should unmount when changing between apps', async () => { const { mounter, unmount } = mounters.get('app1')!; await navigate('/app/app1/page1'); expect(mounter.mount).toHaveBeenCalledTimes(1); // Navigating to other app triggers unmount await navigate('/app/app2/page1'); expect(unmount).toHaveBeenCalledTimes(1); }); it('calls legacy mount handler', async () => { await navigate('/app/legacyApp1'); expect(mounters.get('legacyApp1')!.mounter.mount.mock.calls[0]).toMatchInlineSnapshot(` Array [ Object { "appBasePath": "/app/legacyApp1", "element": <div />, "onAppLeave": [Function], }, ] `); }); it('handles legacy apps with subapps', async () => { await navigate('/app/baseApp'); expect(mounters.get('baseApp:legacyApp2')!.mounter.mount.mock.calls[0]).toMatchInlineSnapshot(` Array [ Object { "appBasePath": "/app/baseApp", "element": <div />, "onAppLeave": [Function], }, ] `); }); it('displays error page if no app is found', async () => { const dom = await navigate('/app/unknown'); expect(dom?.exists(AppNotFound)).toBe(true); }); it('displays error page if app is inaccessible', async () => { const dom = await navigate('/app/disabledApp'); expect(dom?.exists(AppNotFound)).toBe(true); }); it('displays error page if legacy app is inaccessible', async () => { const dom = await navigate('/app/disabledLegacyApp'); expect(dom?.exists(AppNotFound)).toBe(true); }); });
<filename>openjdk/jdk/test/java/lang/management/ThreadMXBean/ThreadExecutionSynchronizer.java /* * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * * @summary Thiseclass is used to synchronize execution off two threads. * @author <NAME> */ import java.util.concurrent.Semaphore; public class ThreadExecutionSynchronizer { private boolean waiting; private Semaphore semaphore; public ThreadExecutionSynchronizer() { semaphore = new Semaphore(1); waiting = false; } // Synchronizes two threads execution points. // Basically any thread could get scheduled to run and // it is not possible to know which thread reaches expected // execution point. So whichever thread reaches a execution // point first wait for the second thread. When the second thread // reaches the expected execution point will wake up // the thread which is waiting here. void stopOrGo() { semaphore.acquireUninterruptibly(); // Thread can get blocked. if (!waiting) { waiting = true; // Wait for second thread to enter this method. while(!semaphore.hasQueuedThreads()) { try { Thread.sleep(20); } catch (InterruptedException xx) {} } semaphore.release(); } else { waiting = false; semaphore.release(); } } // Wrapper function just for code readability. void waitForSignal() { stopOrGo(); goSleep(50); } void signal() { stopOrGo(); goSleep(50); } private static void goSleep(long ms) { try { Thread.sleep(ms); } catch (InterruptedException e) { e.printStackTrace(); System.out.println("Unexpected exception."); } } }
def snapshot(self) -> types.Labfile: lf = self.file.copy(deep=True) return lf
<gh_stars>1-10 module Jaml.Generator.Ast ( astToJS ) where import Text.Regex.Posix import Jaml.Types import Jaml.Generator.Util (bite) import Jaml.Generator.Node (nodeToJS) {- All Generator funs generate (return) Strings of JavaScript. -} astToJS :: String -> Ast -> String astToJS idPrefix (Ast depth node kids) = case node of CodeComment _ -> "" otherwise -> concat [ indent , nodeToJS idPrefix node , postOpen node kids , "\n" , concat $ map (astToJS idPrefix) kids , close node indent kids ] where indent = take (2*depth) (repeat ' ') {--------------------------} {- At end of node's line. -} postOpen :: Node -> [Ast] -> String -- Beginning of JS function. postOpen (TemplateTag _ _ _) _ = " {\n var _J=Jaml(), _j=new _J();" -- Whether to put whitespace around innerHTML. postOpen (Tag _ _ (_,InGator) _) _ = bite postOpen (Tag _ _ _ _) _ = "" -- If SINGLE line of JS lacks final ';', add one. postOpen (SilentJS s) [] = if s =~ ";[:blank:]*$" then "" else ";" -- If start of MULTIPLE lines of JS, and lacks final '{', add one. postOpen (SilentJS s) _ = if s =~ "\\{[:blank:]*$" then "" else " {" -- *Comment, NoisyJs, Text postOpen _ _ = "" {-------------------------------------} {- Close node (unless self-closing). -} close :: Node -> String -> [Ast] -> String -- End of JS function. close (TemplateTag _ _ _) _ _ = " return _j.v();\n};\n" -- If MULTIPLE lines of JS, close the block. close (SilentJS _) _ [] = "" close (SilentJS _) indent _ = indent ++ "}\n" -- HTML comments can be multiline. close (HtmlComment _) indent _ = indent ++ "_j.s('-->');\n" close (IeConditionalComment _) indent _ = indent ++ "_j.s('<![endif]-->');\n" -- Add gator "bite(s)" if necessary. close (Tag _ _ (OutGator,_) SelfClosing) _ _ = bite close (Tag _ _ _ SelfClosing) _ _ = "" close (Tag name _ (outG,inG) _ ) indent _ = concat [ indent , if inG == InGator then bite else "" , "_j.ns('</" ++ name ++ ">');" , if outG == OutGator then bite else "" , "\n" ] -- CodeComment, NoisyJs, Text close _ _ _ = ""
A year ago Opec decided to maintain market share to battle competition from North America and the strategy worked, but not without some short-term pain When the Organization of Petroleum Exporting Countries (Opec) meets Friday in Vienna, the cartel will be celebrating a victory of sorts, one that has cost its members dearly. A year ago Opec, whose members include Saudi Arabia, United Arab Emirates and Venezuela, was facing increasing competition from North America. Alarmed by the growing supply of oil from non-traditional producers using fracking to access oil in shale deposits like the Bakken field in North Dakota, Opec was determined to stop this competition. Instead of trying to support prices, it decided to maintain market share. Oil price falls as Saudi Arabia pushes Opec cartel to hold production levels Read more The theory was a sharp price drop would kill these expensive-to-operate shale-oil producers and Opec would regain its crown as supplier of choice. The strategy certainly helped bring down prices. By 31 December 2014, prices for both Brent and the West Texas Intermediate crude oil slid about 50% from their 2014 high. In 2015, prices continued falling. Now they hover just off 2009 lows, around $44 a barrel for Brent and $41 for WTI. The problem for Opec, and oil producers in general, is prices remained weak for much longer than expected. Only recently has US shale-oil production slowed, which is why most oil-market watchers expect Opec to maintain its focus and continue with its expensive plan to undermine the competition. “Is Saudi Arabia now going take a U-turn just as it is about to see its strategy work? Given the fact their motives behind it was to get rid of the high-cost producers out there … [it is] unlikely they will take a U-turn at this time,” said Abhishek Deshpande, chief oil market analyst at Natixis. In other words, if Saudi Arabia wants to play the long game and smother the competition, it has to take more short-term pain. There’s been a 60% fall in the number of oil rigs, but firms are still operating the most productive wells. That’s why output is down only 500,000 barrels a day from the April peak, averaging 9.1m daily in October, said the Energy Information Agency, part of the US Energy Department. However, that’s still above the 8.7m pumped daily in 2014 and they estimate 2016 fourth-quarter average production around 8.8m daily. The slow reduction in US oil production was a surprise to many, said Rob Haworth, senior investment strategist at US Bank Wealth Management, one reason why prices are still down. Opec’s decision reverberated worldwide. News reports cite about 250,000 layoffs globally, and the EIA said international and US oil producers wrote down $38bn in assets in the third quarter of 2015, the largest for any quarter since at least 2008 for the 46 companies they track. In a research note, UBS analysts said capital expenditures in the US energy exploration and production sector fell about 40% year-over-year in 2015 and are expected to fall another 28% year-over-year in 2016. Opec members also felt the sting, as many of them use their oil money to support social programs, Deshpande said. Saudi Arabia’s cost of production is probably as low as $10 a barrel, but he said they need prices closer to $100 a barrel to fund their budget. In July, the Saudis issued domestic bonds for the first time since 2007 to help plug a budget deficit that’s around $100bn, he said. Economically weaker Opec members like Venezuela and Nigeria are in worse straits. US production is slowing, but Opec members are contributing to a global supply glut. Their stated production target is 30m barrels daily, but the International Energy Agency said they pumped 31.76m barrels in October. That extra output accounts for much of the 1.5m barrels of oil that exceeds demand. That target may be raised slightly to accommodate the return of Indonesia, which produces about 900,000 barrels a day, but considering Opec has consistently overproduced, the target is relatively meaningless. The global glut may worsen in the next few months, said Bart Melek, head of commodity strategy at TD Securities. Demand slows in the winter and next year, once sanctions on Iran are lifted, they’ll start exporting more oil, he said. Consensus estimates suggest Iran could pump as much as 500,000 barrels daily. That won’t make Saudi Arabia happy, and gives them even more reason to want to maintain market share, he said. Persistent low prices into 2016 may make it harder for shale-oil producers to restart because of funding problems, even if prices rebound, Melek said. “I’m speculating, but I imagine Opec would like to see that industry’s ability to [find] funding erode a bit more,” he said. For consumers, low oil values keeps gas prices subdued. The EIA said US retail regular-grade gasoline prices averaged $2.09 per gallon as of 23 November, the lowest since 2008. With oil prices expected to stay low, “the good news is that drivers will continue to pay less”, Haworth said. Consumers, for once, are the winners in this global clash of oil powers.
// WithEmptyAzureCredentials sets azure credentials to empty values func (sc *InstallationDataCreator) WithEmptyAzureCredentials() *InstallationDataCreator { sc.installationData.AzureBrokerClientID = "" sc.installationData.AzureBrokerClientSecret = "" sc.installationData.AzureBrokerSubscriptionID = "" sc.installationData.AzureBrokerTenantID = "" return sc }
/** * Waits on sigint to be called then deals with it properly */ void* wait_on_sigint(void* thisServer) { struct ServerData* server = (struct ServerData*) thisServer; while (1) { while (sem_wait(&sigintCalled)) { if (errno == EINTR) { errno = 0; } else { break; } } initialise_server(server, server->statFile); } }
/** * PUT to update an existing entity. * * @param projectCode the code of the project in which to work * @param code the code of the entity to update * @param dtoToCreateOrUpdate the entity to update * @return the ResponseEntity with status 200 (OK) and with body the updated entity, or with status 400 (Bad Request) if the entity is not * valid, or with status 500 (Internal Server Error) if the entity couldn't be updated */ @PutMapping("/{code}") @Timed public ResponseEntity<SeverityDTO> createOrUpdate(@PathVariable String projectCode, @PathVariable String code, @Valid @RequestBody SeverityDTO dtoToCreateOrUpdate) { dtoToCreateOrUpdate.setCode(code); try { final UpsertResultDTO<SeverityDTO> result = service.createOrUpdate(projectService.toId(projectCode), dtoToCreateOrUpdate); final boolean isNew = result.getOperation() == Upsert.INSERT; final String newCode = result.getUpsertedDto().getCode(); return ResponseEntity .status(isNew ? HttpStatus.CREATED : HttpStatus.OK) .headers(isNew ? HeaderUtil.entityCreated(NAME, newCode) : HeaderUtil.entityUpdated(NAME, newCode)) .body(result.getUpsertedDto()); } catch (BadRequestException e) { return ResponseUtil.handle(e); } }
<filename>src/Logic/Expression/Internal.hs<gh_stars>0 {-# LANGUAGE NoImplicitPrelude #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE ViewPatterns #-} module Logic.Expression.Internal( Internal, true, false, variable, xor, and, ands, xors, or, equals, implies, not, ors, -- Satisfiability isSat, interpretations, assign, identifiers, ) where import qualified Data.Set as S import Data.Maybe import qualified Data.Vector as V import Data.Bits hiding (xor) import qualified Data.Bits as B import Prelude hiding (and,or,not) import qualified Prelude as P {- | The internal representation of a logical expression encodes expresions in algebraic normal form as a vector of integers. Each element in the Vector represents a distinct term in an exclusive disjunction clause. Each term is encodes a conjunction of variables as an integer, indicating the presence of variable i as a factor by setting the ith bit in the integer. The literal True is encoded as any vector consisting of all zeros, while false is encoded as the empty vector. -} type Internal = V.Vector Integer -- | O(1) - construct the expression representing logical falsity. false :: Internal {-# INLINE false #-} false = V.empty -- | O(1) - construct the expression representing logical truth. true :: Internal {-# INLINE true #-} true = V.singleton 0 -- | O(1) - construct an expression representing a logical variable given the -- integer identifier for the symbol. variable :: Int -> Internal {-# INLINE variable #-} variable = V.singleton . (2^) -- | O(n^2) - construct the disjunction of two expressions. Terms will -- be arranged in descending term order. or :: Internal -> Internal -> Internal {-# INLINE or #-} or p q = (p `and` q) `xor` p `xor` q -- | O(n) - construct the material equivalence of two expressions. Terms will -- be arranged in descending term order. equals :: Internal -> Internal -> Internal {-# INLINE equals #-} equals p q = not (p `xor` q) -- | O(n^2) - construct the material equivalence of two expressions. Terms will -- be arranged in descending term order. implies :: Internal -> Internal -> Internal {-# INLINE implies #-} implies p q = (p `and` q) `xor` p `xor` true -- | O(n) - construct the logical negation of an expression. Terms will -- be arranged in descending term order. not :: Internal -> Internal {-# INLINE not #-} not p = p `xor` true -- | O(n^2) - construct the n-ary disjunctin of a list of expressions. Terms -- will be expressed in descending term order. ors :: [ Internal ] -> Internal {-# INLINE ors #-} ors = foldr or false -- | O(n) - construct the exclusive disjunction of two expressions. Terms will -- be arranged in descending term order. xor :: Internal -> Internal -> Internal {-# INLINE xor #-} xor p q = case V.null p of True -> q _ -> case V.null q of True -> p _ -> case compare x y of GT -> x `V.cons` xor (V.unsafeTail p) q LT -> y `V.cons` xor p (V.unsafeTail q) EQ -> xor (V.unsafeTail p) (V.unsafeTail q) where x = V.unsafeHead p y = V.unsafeHead q -- | O(n^2) - construct the n-ary exclusive disjunction of a list of expressions xors :: [Internal] -> Internal {-# INLINE xors #-} xors = foldr xor false -- | O(n^2) - construct the logical conjunction of two expressions and :: Internal -> Internal -> Internal {-# INLINE and #-} and p q = case V.null p of True -> V.empty _ -> case V.null q of True -> V.empty _ -> V.reverse (heapMul Nothing (V.empty) (V.reverse p) q) where -- Alternately (non-strictly) storing and retreiving elements -- in an intermediate heap. Elements can be retrieved when -- either there are no more elements left to store, or when -- the element being stored is greater than the least element -- in the heap. --heapMul :: Maybe BitVec -> M.Map BitVec Int -> V.Vector BitVec -> V.Vector BitVec -> V.Vector BitVec {-# INLINE heapMul #-} heapMul z h xxs ys = if V.null xxs then flush h else case (Just x > z && P.not (isNothing z)) of True -> case retrieve h of Left (!a,z',h') -> a `V.cons` heapMul z' h' xxs ys Right (z', h') -> heapMul z' h' xxs ys False -> let (z',h') = store h ys x in heapMul z' h' xs ys where x = V.unsafeHead xxs xs = V.unsafeTail xxs -- Multiply each term in ys by x, storing the term products in the heap. -- update the least element store h ys !x = let h' = V.foldr (\y z -> let m = y .|. x -- multiplying a two monomials can be achieved by -- bitwise or of their integer encodings in alter (\ zz -> case zz of Nothing -> Just 1 Just cnt -> Just (cnt+1)) m z ) h ys -- find the new least element in the heap. it should be -- either the least term in ys * x, or the previous least element z' = fst . V.unsafeHead $ h' -- O(log n) in (Just z', h') -- O(log(n)) Retrieve the least element from the heap and update the -- least elem retrieve retrieve h = let (m,cnt) = V.unsafeHead h h' = V.unsafeTail h z' = if V.null h' then Nothing else Just . fst $ V.unsafeHead h' -- O(log(n)) in case (cnt `P.mod` 2) of 0 -> Right (z',h') _ -> Left (m, z', h') -- O(n) Flush the elements from the heap. Keep only the elements where -- the incidence is odd because (x `xor` x = false). flush h =V.map fst . (V.filter (\(_,cnt) -> cnt `P.mod` 2 == 1)) $ h -- | O(n^3) - construct the n-ary logical conjunction of a list of expressions ands :: [Internal] -> Internal {-# INLINE ands #-} ands = foldr and true -------------------------------------------------------------------------------- -- BOOLEAN SATISFIABILITY -------------------------------------------------------------------------------- {- | O(2^n). Test for satisfiability. -} isSat :: [ Internal ] -> Bool isSat = P.not . S.null . interpretations {- | Replace all occurances of a set of variables with a boolean literal. -} assign :: Internal -> Integer -> Bool -> Internal assign expr vars True = V.foldr (xor . V.singleton) false $ V.map (\ x -> (x .&. vars) `B.xor` x) expr assign expr vars False = V.filter ((==0) . (.&. vars)) expr {- | O(2^n) Given an expression, p, obtain a list xs of expressions such that @or xs == p@ and for each x in xs then @x and p@ is true. This function uses the DPLL algorithm. -} interpretations :: [Internal] -> S.Set (S.Set (Int,Bool)) interpretations (S.fromList->exprs0 ) = aux exprs0 where aux :: S.Set Internal -> S.Set (S.Set (Int,Bool)) aux exprs = if S.null exprs then S.singleton S.empty else case V.length expr of 0 -> S.empty -- The expression is False. Nothing will satisfy it. 1 -> -- If there is only one term, then all the identifiers in -- it must be true. let poss = S.map (assumeMany (V.unsafeHead expr)) exprs' zs = S.fromList $ fmap (,True) $ identifiers expr in S.map (S.union zs) $ aux poss _ -> let ident = getIdent expr poss = S.map (assume ident True) exprs negs = S.map (assume ident False) exprs in S.map (S.insert (ident,False)) (aux negs) `S.union` S.map (S.insert (ident,True)) (aux poss) where (expr,exprs') = S.deleteFindMin exprs -- find an identifier to test. getIdent :: Internal -> Int getIdent x = go (V.unsafeHead x) 0 where go n i = if n `testBit` i then i else go n (i+1) {- | O(n) Substitute a boolean literal for all occurances of a variable within a proposition. -} assume :: Int -> Bool -> Internal -> Internal assume !ident !b expr = if b then foldr xor false (fmap (V.singleton.(`clearBit` ident)) expr) else V.filter (P.not.(`testBit` ident)) expr {- | O(n) Set a bunch of variables to true at the same time. This is faster than setting them individually with assume. -} assumeMany :: Integer -> Internal -> Internal assumeMany !idents expr = foldr xor false (fmap (V.singleton.(.&. complement idents)) expr) -------------------------------------------------------------------------------- -- Bound variables -------------------------------------------------------------------------------- {- | O(n). Return an integer whose bits represent the indicies of the bound variables an expression. -} variables :: Internal -> Integer variables = V.foldr (.|.) 0 {- | O(n). Return a list whose elements are the indicies of the bound variables in an expression. The list will be in ascending order. -} identifiers :: Internal -> [Int] identifiers expr = let vars = variables expr in [ident | ident <- takeWhile (\i->vars>=2^i) [0..], testBit vars ident] -- | O(log n) - insert, delete or modify an element in an ascending list of -- key value pairs. This function is used by "and" to store intermediate values -- but is not exported from this module. alter :: (Maybe Int -> Maybe Int) -> Integer -> V.Vector (Integer,Int) -> V.Vector (Integer,Int) {-# INLINE alter #-} alter f k0 vs = case V.null vs of -- Special case for the empty vector. We know that the element wont be -- found, so we don't have to search. True -> case f Nothing of Just !v' -> V.singleton (k0,v') _ -> vs -- If the vector is not empty, then we need to search for an existing element -- with the specified monomial key and perform the desired alteration on that -- element. _ -> go 0 (V.length vs - 1) where {-# INLINE [0] go #-} go !l !h = case l==h of -- When the upper and lower bounds are equal, then our search is over. True -> let (k,v) = vs `V.unsafeIndex` l -- does the found key, k, match the search key, k0. If it does -- then alter the value stored at that location, otherwise, -- possibly add a new element in case compare k0 k of GT -> case f Nothing of -- The search key is less than the found key. Since we -- are maintining the list in DESCENDING order of the keys, -- we will either add the element after the found element -- or do nothing. Just v' -> let (ls,rs) = V.splitAt (l+1) vs in ls V.++ (k0,v') `V.cons` rs _ -> vs EQ -> case f $ Just v of Just v' -> let (ls,rs) = V.splitAt (l) vs in ls V.++ (k0,v') `V.cons` (V.tail rs) Nothing -> let (ls,rs) = V.splitAt (l) vs in ls V.++ V.unsafeTail rs LT -> case f Nothing of Just v' -> let (ls,rs) = V.splitAt (l) vs in ls V.++ (k0,v') `V.cons` rs _ -> vs False -> let i = (l+h) `div` 2 in case compare k0 (fst $ vs V.! i) of LT -> go l (max (i-1) l) EQ -> go i i GT -> go (min (i+1) h) h
<reponame>atla/owndnd package entities //Character header data is an abbrevated version of the character sheet type Character struct { *Entity Name string `json:"name"` Description string `json:"description"` Race Race `json:"race"` Class Class `json:"class"` } // NewCharacter creates a new character func NewCharacter() *Character { return &Character{} }
CD4 T lymphocyte proliferative responses to hepatitis C virus (HCV) antigens in patients coinfected with HCV and human immunodeficiency virus who responded to anti-HCV treatment. CD4 T lymphocyte proliferative responses to hepatitis C virus (HCV) antigens were evaluated before and during an anti-HCV regimen (interferon-alpha2a and ribavirin) in 36 patients coinfected with HCV and human immunodeficiency virus (HIV), to determine whether immune responses against HCV antigens are present in such patients, whether these responses are modified by anti-HCV treatment, and whether they are correlated with treatment efficacy. The CD4 responses against HCV antigens (primarily core antigens) detected at study entry in one-half of the patients did not correlate with anti-HCV treatment efficacy. Of 36 patients, 8 had patterns of persistent immune response to infection by genotypes 3 or 4 that were significantly correlated with sustained virologic response. Persistent immunologic reactivity and sustained virologic response coexisted only in patients infected with genotype 3. These findings suggest that HCV genotype may influence specific immune response, which, in turn, is implicated in virologic control.
// BumpMinor bumps the minor version of the caller func (v *Version) BumpMinor() { v.Minor++ v.Patch = 0 v.PreRelease = "" v.BuildMetaData = "" }
<filename>linux-4.14.90-dev/linux-4.14.90/drivers/clk/meson/gxbb-aoclk.h /* * Copyright (c) 2017 BayLibre, SAS * Author: <NAME> <<EMAIL>> * * SPDX-License-Identifier: GPL-2.0+ */ #ifndef __GXBB_AOCLKC_H #define __GXBB_AOCLKC_H /* AO Configuration Clock registers offsets */ #define AO_RTI_PWR_CNTL_REG1 0x0c #define AO_RTI_PWR_CNTL_REG0 0x10 #define AO_RTI_GEN_CNTL_REG0 0x40 #define AO_OSCIN_CNTL 0x58 #define AO_CRT_CLK_CNTL1 0x68 #define AO_RTC_ALT_CLK_CNTL0 0x94 #define AO_RTC_ALT_CLK_CNTL1 0x98 struct aoclk_gate_regmap { struct clk_hw hw; unsigned bit_idx; struct regmap *regmap; spinlock_t *lock; }; #define to_aoclk_gate_regmap(_hw) \ container_of(_hw, struct aoclk_gate_regmap, hw) extern const struct clk_ops meson_aoclk_gate_regmap_ops; struct aoclk_cec_32k { struct clk_hw hw; struct regmap *regmap; spinlock_t *lock; }; #define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw) extern const struct clk_ops meson_aoclk_cec_32k_ops; #endif /* __GXBB_AOCLKC_H */
def _get_narratives_from_wsobjs(self, params, token): params = self._process_parameters(params) ws_narrs = self.metrics_dbi.list_ws_narratives( minT=params['minTime'], maxT=params['maxTime']) ws_ids = [wnarr['workspace_id'] for wnarr in ws_narrs] wsobjs = self.metrics_dbi.list_user_objects_from_wsobjs( params['minTime'], params['maxTime'], ws_ids) ws_narrs1 = [] for wsn in ws_narrs: for obj in wsobjs: if wsn['workspace_id'] == obj['workspace_id']: if wsn['name'] == obj['object_name']: wsn['object_id'] = obj['object_id'] wsn['object_version'] = obj['object_version'] break elif ':' in wsn['name']: wts = wsn['name'].split(':')[1] if '_' in wts: wts = wts.split('_')[1] p = re.compile(wts, re.IGNORECASE) if p.search(obj['object_name']): wsn['object_id'] = obj['object_id'] wsn['object_version'] = obj['object_version'] break if wsn.get('object_id'): wsn['last_saved_by'] = wsn.pop('username') ws_nm, wsn['nice_name'], wsn['n_ver'] = \ self._map_ws_narr_names(wsn['workspace_id']) wsn.pop('narr_keys') wsn.pop('narr_values') ws_narrs1.append(wsn) return {'metrics_result': ws_narrs1}
// GetAction retrieves the action specified in manual override if any func (s *ExperimentSpec) GetAction() ActionType { if s.ManualOverride != nil { return s.ManualOverride.Action } return ActionType("") }
/** * This class abstract the 'step' related processing and let * task script class process task specific logic. * * @author wangqi * */ public class TaskStep { private static final Logger logger = LoggerFactory.getLogger(TaskStep.class); /** * The Task script should call this method if the user's action * is compatible with task's target. This method will check if * current step reaches the limit and do the rest thing. * * @param task * @param user * @return */ public static boolean step(TaskPojo task, User user) { return step(task, user, 1); } /** * The Task script should call this method if the user's action * is compatible with task's target. This method will check if * current step reaches the limit and do the rest thing. * * @param task * @param user * @return */ public static boolean step(TaskPojo task, User user, int stepUnit) { return step(task, user, stepUnit, false); } /** * The Task script should call this method if the user's action * is compatible with task's target. This method will check if * current step reaches the limit and do the rest thing. * 'absolute' means if the stepUnit is a absolute value rather than a relative value. * * @param task * @param user * @param stepUnit * @param absolue * @return */ public static boolean step(TaskPojo task, User user, int stepUnit, boolean absolute) { boolean result = false; String taskId = task.getId(); TaskManager manager = TaskManager.getInstance(); int totalStep = task.getStep(); int currentStep = stepUnit; if ( !absolute ) { String currentStepString = manager.queryTaskSpecificData(user, taskId, Field.STEP); if ( currentStepString != null ) { currentStep = StringUtil.toInt(currentStepString, 1) + stepUnit; } } if ( currentStep < totalStep ) { manager.storeTaskSpecificData(user, taskId, Field.STEP, currentStep); } else { manager.finishTask(user, task.getId()); result = true; } if ( logger.isDebugEnabled() ) { logger.debug("User {} task: id:{},name:{} step: {}, total:{}", new Object[]{user.getRoleName(), task.getId(), task.getName(), currentStep, totalStep}); } sendNotifyToClient(user, task, currentStep, totalStep); return result; } /** * For the rank type achievement, the step value is reversed. Rank 1 is * the highest number to reach. * * @param task * @param user * @param stepUnit * @return */ public static boolean reverseStep(TaskPojo task, User user, int stepUnit) { boolean result = false; String taskId = task.getId(); TaskManager manager = TaskManager.getInstance(); int totalStep = task.getStep(); int currentStep = stepUnit; if ( currentStep > totalStep ) { /** * Check if the task step changes. * 2012-08-07 */ String currentStepString = manager.queryTaskSpecificData(user, taskId, Field.STEP); if ( !String.valueOf(currentStep).equals(currentStepString) ) { manager.storeTaskSpecificData(user, taskId, Field.STEP, currentStep); sendNotifyToClient(user, task, currentStep, totalStep); } } else { manager.finishTask(user, task.getId()); result = true; sendNotifyToClient(user, task, totalStep, totalStep); } if ( logger.isDebugEnabled() ) { logger.debug("User {} reverse task: id:{},name:{} step: {}, total:{}", new Object[]{user.getRoleName(), task.getId(), task.getName(), currentStep, totalStep}); } return result; } /** * Check if the user reach current level. It is a pure utility method. * and does not call finishTask method. * * @param task * @param user * @param currentLevel * @return */ public static boolean level(TaskPojo task, User user, int currentLevel ) { int targetLevel = task.getCondition1(); if ( currentLevel == targetLevel ) { return true; } else { return false; } } /** * Send the task or achievement notification to clients. * @param task * @param currentStep * @param totalStep * @param reverse */ public static void sendNotifyToClient(User user, TaskPojo task, int currentStep, int totalStep ) { if ( task.getType() == TaskType.TASK_ACHIVEMENT ) { //notify the achievement progress BseFinishAchievement.Builder achievement = BseFinishAchievement.newBuilder(); achievement.setId(task.getId()); //int percent = Math.round(currentStep * 1.0f / totalStep * 100); //if ( percent > 100 ) percent = 100; //achievement.setPercent(percent); achievement.setPercent(currentStep); GameContext.getInstance().writeResponse(user.getSessionKey(), achievement.build()); } else { BseModiTask.Builder modiBuilder = BseModiTask.newBuilder(); modiBuilder.setTaskID(StringUtil.toInt(task.getId(), 0)); modiBuilder.setStep(currentStep); XinqiMessage xinqiMsg = new XinqiMessage(); xinqiMsg.payload = modiBuilder.build(); GameContext.getInstance().writeResponse(user.getSessionKey(), xinqiMsg); } } }
The idea that the multiverse is more akin to an art project than a science experiment (or an art experiment, if you’re so inclined) is one of those Occult themes that typically gets dismissed by both overly scientific and religious types alike, even though it quite inarguably resonates now more than ever. One of the stranger aspects of human psychology that we essentially avoid touching in typical academic or spiritual discourse involves the fact that your average person now consumes roughly a hundred thousand times more art in a given year than they did even a mere century ago. We used to rely on mediums like galleries, plays, symphonies, and libraries to dispense our art, most of which weren’t super accessible to people who weren’t wealthy or close to an urban center. Now the fact that the internet and cable television beam recreational distractions into our homes 24/7 seems almost like a trivial afterthought. You can debate the quality of what in my mind are crappy creative endeavors like staged reality television, but you can’t deny the fact that even TV commercials are getting increasingly surreal. More to the point: even the most logically wired hard-nosed materialist probably spends most of his time working a boring job while fantasizing about catching up on whatever show he’s been neglecting on Netflix the second he has a free minute to relax (or staring at Facebook; friend me). So why is that? Why is it that we’re increasingly and quite unconsciously abandoning the boring confines of the material world in favor of immersive fantasy realms? Why do our lives now involve things like rock concerts, marathon TV binges, video game addiction, and movie star crushes? Why do religious people continually oppose the evolution of these mediums? Why do scientists often view the world from the creepy lens of unconscious matter while spending their free time meticulously planning outfits for the next Comicon? When you look at the universe as an art experiment, all of these things begin to fall into place. Art was the purpose all along. You can gawk at the horrors of the modern world in abject disgust, but what you can’t deny is that it IS freaking entertaining. It’s great art, pure and simple. Unpredictable. Mystifying. Bizarre. Never a dull fucking moment. Earlier this year, I read the piece Carl Sagan penned in defense of marijuana and was sort of shocked at his admission that he didn’t truly understand art until he started getting high. And that would be the significance of things like psychedelic drugs. They have the ability to place even non-creative types in the surrealist headspaces of the higher realms, to help them understand the hyper-liminal states of consciousness associated with creation itself. Of course, I’m an a musician as well as an admitted music geek, and debating what makes an album truly psychedelic is a discussion that can go on forever and hints at the subjective nature of reality itself (another topic we all love to avoid). To clear the air, all of these albums were obviously at least partially inspired by things like psilocybin, LSD, and weed, regardless of genre. For the first time this year, I actually included the signifier of “recreational” or “ritualistic” with hallucinogens, and when relating to albums, they can be used in either capacity. I personally use them for both. A recreational album would be one that you’re going to throw on while taking bong rips and relaxing. A ritualistic one might be something you’d use to calculatedly come into contact with the larger spiritual mysteries of the universe. 2014 was yet another year packed with so many mind-fryingly awesome jams that I couldn’t even begin to keep track of them all in a hundred lifetimes; so with that in mind, here were 15 of my faves in easy-to-digest listicle format. (Editor’s Note: The focus of this article represents only one writer’s opinion on the multi-varied world of psychedelic music, with a focus on his tastes in metal and hip-hop. We dare not otherwise quantify what complements psychedelic states in a more over-arching absolute way! Enjoy, and you can also see past installments of Psychoactive Soundscapes here!) &nsbp; 15. Helms Alee – Sleepwalking Sailors (Sargent House) __ Recreational Despite being one of the best live bands in Seattle, I must confess that Helms Alee’s previous two outings were only really great in small doses, which I tend to geek out on 3 or 4 songs before getting bored and moving on. Granted, those 3 or 4 songs are genius – but this is their first album that slays all the way through, and weirdly, it works not necessarily because of the monumental amount of fuzz cascading through Ben Verellen’s self-made amplifiers, but more because of the increased focus on songwriting. And that’d be the thing; you’ve just got to give points for originality. I honestly don’t know what other band sounds even sort of like Helms Alee. It’s like sludged out stoner surf rocky indie metal/pop with 3 sets of alternating male and female vocals who all growl and croon in equal measure. There are even some melodic Pollardian micro-songs tying the more brutal cuts together. Nothing like it out there, and it gets a few bonus points for: a. having the hottest/most talented drummer in the known universe (who also sings and plays guitar in the band Lozen) and b. because Verellen is also a drummer who put out another fantastically debauched album in 2014 with Constant Lovers, on the predictably solid Good to Die label. Also worth checking out. 14. Ex Astra – Ex Astra (Self-Released) __ Ritualistic I know very little about New York’s Ex Astra, other than that I had a conversation with one of the guys in the band early in 2014, and he forwarded me a link to a bunch of Dr. Strange comics from the ’70s. That always wins points, but lots of people forward me their music and I only write about the shit that’s both excellent and trippy, which Ex Astra certainly is in equal measures. Great music to wig out to, and not the sort of strain that compels the listener to ignore their dark side, but rather the sort that inspires growth-inducing shadow work. While not brutal, metally, or confrontational in any way, there’s a dark beauty coursing through this disc’s Middle Eastern scale work, ethereal female vocals, echoing guitars, and table-heavy drum meditations. It approximates the aura of a star-filled winter night sky rather than a sunny summer day at the beach. Certain minor key, Eastern-influenced sounds effectively invoke a sensation of spiritual awe or what King Missile appropriately referred to as “Mystical Shit” back in the day. This is an entire album of that feeling – like you’re continually on the verge of piercing through the veil of higher understanding. If I was going to take bong rips and meditate while sitting in the lotus position, I’d just throw these jams on and lose myself to the feeling. Come to think of it, I’m not sure why I’m not doing that right now. &nsbp; 13. Garek Jon Druss – The Celestial Din (Debacle Records) __ Ritualistic Ahhh, drone. The genre that is almost always at least somewhat cool by virtue of fucking with the time space perception of the listener. But how do you separate the wheat from the chaff when it comes to drone music? Answer: I have no fucking clue. What I do know, though, is that this is the most interesting drone release that found its way onto my computer this year, partially because of what makes it different. It starts out in typically elongated synth tone territory and eventually throws some tastefully delayed minimal beats into the picture, which slowly get mind-fuckingly tribal and then subside while the underlying drone remains. The next two songs are remixes of the initial track by other notable drone practitioners. Pete Swanson’s pulsates in a hypnotic/robotic manner while Ben Chisholm’s twists artificial string arrangements into various multi-dimensional cut-up configurations. All of this is sufficiently headtrippy. Yet another out there record from the Debacle imprint, which has become my local Seattle go-to when the hankering for something electronic and abnormal strikes my fancy. Extra props to 2014’s Debacle Fest, which was the strangest and best-attended experimental shows I’ve been to in quite some time. If someone were to ask me what the common theme was with all the acts at the 2014 Debacle Fest, the only thing I could come up with would be: well, they were all fucking weird. No, really, that seemed to be the theme. Mad props. &nsbp; 12. Run the Jewels – Run the Jewels 2 (Mass Appeal) __ Recreational I’ve said it once, and I’ll say it a million times; El-P basically ruined hip-hop for me back in 2002 when he dropped Fantastic Damage. It sounded roughly a decade ahead of its time both lyrically and production-wise and made all the bling rap bullshit surrounding it in the mainstream seem beyond embarrassing. As El rightfully declares on RTJ2, “Every bar of that bitch shit you spit is your fucking prison.” Word. 2014 was the year that the mainstream finally caught up to Jaime Meline, and thank fucking god. It’s incredibly appropriate that Rage Against the Machine’s Zach De La Rocha guests here because this album often gives off the exact same vibes that made RATM so great back in the ’90s: preaching about the inherent corruption of our economic system while simultaneously making your head nod uncontrollably. Just try and listen to RTJ2 (or “The Battle of Los Angeles”, for that matter) without involuntarily popping your neck back and forth and raising your hand in a Black Panther gesture of solidarity. It’s impossible. The saddest thing about Rage is that they broke up right before W. weaseled his way into office (i.e. at the exact moment we needed them most). The good news is the RTJ made the perfect protest album at the perfect time, which seemed almost prescient. The fact that they ended up in on tour in St. Louis on the very night the bullshit grand jury verdict for Darren Wilson was delivered is beyond synchronous. Now the bad news: As high as this album soars at times (with say, “Oh My Darling Don’t Cry”, which is one of the best tracks ever made period), I personally didn’t find it to have the replay value of previously produced El-P LP’s. It sort of pumps you up with the first four tracks and then fails to maintain the same energy throughout, but this probably also has to do with the frequency with which I listened to Cancer 4 Cure, R.A.P. Music, and RTJ1 in the last 2 years. I suppose that’s my way of telling you to go pick up all of those albums stat. And does it lose a few points for that “dick in her mouth all day” song? Yeah, it absolutely does. Sorry. 11. Earth – Primitive and Deadly (Southern Lord Records) __ Ritualistic To tell you the truth, Earth’s last few mellow albums were both an incredible execution of vision while simultaneously not being something I found myself wanting to spin very often. For longtime heroin addicts, I had to respect the effect they created, which was very much in tune with the psychoactive properties of that particular drug in that they gave you the auditory sensation of nodding off blissfully without any of the painful withdrawals. But therein lies the problem. After about the third time I put on The Bees Made Honey in the Lion’s Skull and found myself falling asleep within 15 minutes, I had to resolve to not listen to it in any circumstances where I had to get anything done. It just isn’t actually very often when I want to willfully fall asleep while listening to music. Which is why this album’s return to heavier, more distorted territory works so well as it keeps the glacial pacing of their tracks from unintentionally sending me off to dreamsville. Oh, and vocals, by people like the legendary Mark Lanegan and the crazy talented Rabia Shaheen Qazi from Rose Windows, who threw down on the album’s finest track, “From the Zodiacal Light”. It’s amazing what a little bit of songwriting chops can bring to the table, and it wasn’t until I bought this that I realized, wow, I now have like 7 Earth albums in my collection. I can resoundingly say that this is the best of those albums, which is quite a statement from a band that’s been around over 20 years and done enough drugs to kill a small planet. I’m sure Randall Dunn’s production (he always seems to have his hand in at least something on this list) has a great deal to do with how excellent this all turned out. 10. Mastodon – Once More ‘Round the Sun (Reprise Records) __ Recreational 2014 will also go down as the year metal gods Mastodon released their finest album to date while simultaneously alienating half their fanbase. You know what I think Mastodon’s second best album is? The Hunter. Me and the ‘Don clearly have similar tastes, which I think could probably be best demonstrated by the unbelievably awesome album art they chose for this beast (Editor’s Note: Created by the amazing Skinner; see our interview with him here), as well as the fact that their music has been continually progressing towards more melodic/trippy territory for years. The problem with that, of course, is that a ton of their fans would love to see them get bleaker and more growly. I’ve even seen people on Facebook straight-up bash their new direction and boldly point out that they peaked with 2004’s Leviathan. Funny story on that. Because I was grooving on this album so much, I threw on Leviathan at some point in 2014 and found myself pretty “meh” about the whole endeavor. Lord, the whole “80’s thrash metal is awesome” thing that was going on in the early 2000s. No, that shit always sort of sucked. Sorry. It’s white trashier than most country and nu metal being terrible wasn’t really a reason to resurrect it, but Mastodon got their start riding that train and thankfully decided to move onward to bigger and better things. I don’t blame them at all. Having played in an angry metal band for 4 years and spending a ton of my time focusing my psychic energy on all the things that pissed me off about society, I can decidedly say that the whole exercise was demonstrably terrible for my mental health. Maybe Mastodon started sensing that as well. Your art is an extension of your persona and from a magickal perspective, songs with a chorus of, “This time, This time, Things will turn out just fine” could actually be used as a banishing ritual for a clever mage. It’d work particularly well for the person who wrote it. Anywho, I do find it more than odd that the metal police have turned on these guys for gasp, writing better songs. It’s not like Once More’ Round the Sun isn’t heavy. It’s pulverizingly freaking heavy. It’s just that they also thrash out in some major keys and you can sing along to it too. How terrible. Death to knuckle-dragging meatheads who say dumb shit like, “Death to false metal”. 9. Saiga – Steppenlord (Self Released) __ Recreational I can’t believe I’m actually saying this, but the stoner and doom metal genres have gotten so saturated in the last decade that even I’m sort of getting to the point where I’m a bit bored with it all. Don’t get me wrong; I’ll gladly listen to even a run-of-the-mill doom or stoner metal band over pretty much anything considered “indie rock” in this day and age, but simultaneously, there’s certainly a bit of undeniable stagnation going on in those scenes. Which is why Saiga is a breath of fresh air, as according to their Bandcamp page, their music “contains lots of genres, all of them stoned” — which I have to admit is a pretty fair assessment. It’s not like these dudes are reinventing the wheel with their weeded out instrumetal, but they do it with an undeniable verve lacking in most of what could be considered stoner rock these days. The fact of the matter is, this shit destroys from the jump. Choppier than a sailboat ride in a hurricane, and you know what, it’s also just fucking fun. The prototypical rehashed Sleep riffs are all there, but contorted in clever new configurations that never would have occurred to your average Sabbath-worshipping basement dweller. The guitar tone is sick. The rhythm section air tight. Everything fits together exactly as it should, and you know what, I’m glad there’s no dude yelping about dragons and wizards and bunch of other D&D bullshit on top of it. The guitars continually bending spaceward are more than enough to compensate for the lack of vocals. This is one of the more promising debuts I’ve heard in a while, and only time will tell whether their unique brand of psychedelic Prague rock can be kept in Czech… or if they’ll find a wider audience outside their home country. I apologize heartily to anyone that just read that last sentence. Steppenlord by Saiga &nsbp; Radio Vril – Prom Ocean (Self-Released) __ Ritualisic A lot of musicians these days cop an Occult pose as a marketing gimmick while lacking even a basic understanding of the actual concepts underlying the craft. Then there’s acts like Radio Vril out of beautiful Battle Creek, Michigan, who not only make Occult art, but live the shit for real. It’s funny because years before I ever started playing around with things like sex magick, my first explorations into hyper-liminal states of consciousness involved making super freaky sampler driven electro weirdness designed specifically to fuck with my own head. Radio Vril lives to explore these states with a devotion known by a rare few. As far as I can tell, the guy put out what, like 5 different albums this year? I couldn’t even keep track of them all, but of the few I had time for, this was the most compelling. It starts off with pummeling house beats and samples referencing Choronzon, the guardian of the abyss, then just keeps the super freak train rolling for the duration, wisely detouring into minimal ambient patches to mix things up from time to time. What’s great about this is that in true Occult fashion (rather than the Satan-worshipping hard pose typically associated with poseur Occult nonsense), most of the Vril’s stuff that I’ve heard strikes a great balance between the dark and the light, the chaos and the order. This shit isn’t bummer vibes at all, but rather just trippy trance music designed to calculatedly elevate one’s consciousness. A lot of the non-stop keyboard sequencing action is major key and rather uplifting, in all honesty, making it maybe one of the better albums to ritualistically trip out to offered here. Guy clearly digs through the internets to find mystic-related samples and drops them with expert precision. He even takes the Addams Family theme and somehow loops it into a compelling groove, which isn’t something I would think possible, but it totally works. Proudly proclaiming himself witch house, which is a genre I thought sort of died years ago, Vril thankfully puts the witchy vibes to the forefront of that equation. Take the right drugs, recite the proper incantations, and this album will take you to the stars. Auditory sorcery at its finest. &nsbp; Guided By Voices – Cool Planet / Motivational Jumpsuit Guided By Voices Inc. __ Recreational Yeah, yeah. I know, Guided By Voices aren’t a psychedelic band by most forms of conventional measurement. True — but at the same time Robert Pollard is weirder than probably everyone else on this list combined. Let’s review. Since reforming the “classic lineup” of GBV in 2012, they managed to put out 6 full-length albums and an EP before breaking up again a mere two years later. And the thing is, excluding the inaugural inconsistency of Let’s Go Eat the Factory, they’re all pretty solid. The common knock is that these guys peaked 20 years ago, but I don’t really see it. I dig the new shit just as much and fuck, what else do you want from a band you’re a fan of? 6 goddamn new albums. Watching Bob talk shit about this astounding feat on stage in Seattle last summer was priceless. An absolute master of the cut-up approach to making art, Pollard’s hyper-productivity has spawned a whole new level of meta-strangeness in that nearly all his hardcore fans make cut-up mixes of their favorite tracks, which I had never actually done before 2012. 2014 marked the year I made my second 23 song album of GBV hits solely from their output from 2013-2014 alone, called English Motivational Planet. Of course, this is the second greatest hits album of their stuff I’ve made now, so in just what these guys did from 2012-2014, I’ve now assembled 2 separate 23 song greatest hits collages just to make sense of it all. And it’s not like these dudes are programming beats; they’re writing Surrealist pop songs. Unbelievable, and 2014 also marked the year Pollard released EAT 10, the tenth and best edition of his collage art in graphic novel format. Listen to Motivational Jumpsuit and Cool Planet (Cool Planet is slightly better in my mind) back-to-back while thumbing through that slowly, and you’ll be wandering into a foreign dimension of exquisite inner weirdness, I promise. Seven That Spells – The Death and Resurrection of Krautrock: IO (Sulatron Records) __ Ritualistic Seven That Spells is a band that I don’t honestly know that much about other than that they’re simultaneously amazing, from Croatia, and describe themselves by saying shit like: “Beyond. We are the dogs of the western Jazz society looking for dope.” Despite the fact that this album is touting itself as krautrock, what makes it so exceptional is that they’re not just trying to rehash Can or Neu! albums like a lot of their retro-minded contemporaries. While not really metal in any discernable way, they do seem to be a bit metal-ish just in terms of the chops necessary to pull this shit off. The first tune starts with a Middle Eastern-hued wormhole riff pattern that repeats itself a thousand or so times while the rhythm section does calculated acrobatics behind it. A lot of psychedelic rock and culture in general gets pegged as being lazy (which it often is), and what makes this great is its extreme level of focus and precision. Even when they’re veering into ambient chanting passages, nothing lingers too long or seems out of place. It’s just a manner of guiding the listener’s trip through various roller coaster dives into the blissful enchantment that lives where prog and trance collide. This album is so good I actually bought another one of theirs, which almost reminds me of a Boredoms drum circle noise extravaganza. One circular track feeding on itself like a serpent eating its tale for nearly an hour. Ahh, you’ve got to love the information age. These guys have roughly 5 other full lengths I’m now going to have to procure at some point, and they seem like the type of act that’s just going to continue to flood the market. Krautrock is over; Croat rock’s the new thing. I thought you people knew. &nsbp; Shabazz Palaces – Lese Majesty (Sub Pop Records) __ Recreational If you want to talk about acts that exist in their own constellations entirely, Shabazz Palaces would certainly be one that goes to the top of the list. It’s like hip-hop channeled from the trippiest regions of the astral plane and then reassembled meticulously in the boring confines of skin world after 30 consecutive bong rips. The evolution of trip-hop, really. So much rap puts too much focus on the MC and sometimes the coolest shit lets the uniqueness of the beats do more of the talking. This whole album just exudes such a chill vibe of stoned out visionary transmission. “Mimicking Gods”, as they put it at one point, elucidating hidden and ancient Occult concepts underlying the mystery of the entire creative process. While most rappers are spitting about how hard they are, Shabazz rap about how cool they dress and dance and being from outer space in general. Dude’s come off like they live on such a higher plane that they simply could not give a fuck if they tried. We could all probably learn something by looking up to the heavens at their dope-beat-powered UFO every now and again and aspiring to ascend to that level of not-give-a-fuckness. &nsbp; 4. Hail Mary Mallon – Bestiary (Rhymesayers) __ Recreational Also coming from a dimension comprised entirely from batshit lyricism and tripped out sci-fi effects come Hail Mary Mallon, the pairing of Aesop Rock, Rob Sonic, and DJ Wiz. Shit is so far beyond mind blowing its essentially indescribable. The funny thing is, I never would have known this bastion of divine madness even existed if not for following Aes on Twitter (@Thad_McKraken; follow me) and seeing some posts. Never even heard of Rob Sonic before, but wow, do the 2 make a formidable duo who finish each other’s absurdist non-sequiturs with a preternatural ease. There are so many arcane references and inside jokes running through this beast you’ll take away new mesmerizing trinkets on every run through, and that’d be the thing. I went from liking this to having it suck me in and consume my entire unconscious process for a period of about 3 weeks straight. Powerful hypnotic linguistics afoot in the HMM camp, to say the least. The scratching clinic put on by DJ Wiz puts the whole package well over the edge of incredible. In my mind, this is the best thing Aesop Rock has ever done, and I went out and bought a Rob Sonic disc because of it as well. While cool, what separates this project is the level of hilarity and self-deprecation, which works well for both of MC’s. Most rappers throw down about money and women; these guys talk smack about how they can out weird you any day of the week, or as Sonic puts it at one point: “protocol overall is lederhosen.” Even the skit that ties the disc together, which I normally hate in rap albums, is brief and funny enough that it adds to the flow rather than dragging it down. Also, just look at that album cover. I’ve seen that exact shit on mushrooms. Same color scheme and everything. Just sayin’. Earthling Society – England Have My Bones (Riot Season) __ Ritualistic Any band that starts off an album with an 11-minute track named after a channeled discarnate entity (Aiwass) is going to win points with me. A band that starts off an album with a track named after a freaky discarnate entity that actually sounds like something you’d play to summon a freaky discarnate entity is going to end up being one of my faves of the year, no problem. England’s Earthling Society: yet another band who have apparently put out a ton of records and been around for over a decade that I’m just now catching on to. I can’t speak for their entire discography, but this album is all kinds of awesome. It’s not really much more than prototypical neo psych jam band stuff, but thankfully, they don’t skimp on things like Sonic Youth-style noise guitar blasts and overly affected vocals. The thing that’s really odd about this album actually is that while being mainly in the background and indecipherable, the vocals really do add an atmospheric layer of import to the package that elevates it to the higher stratospheres of pleasantly disorienting. A lot of variety, too. No two tracks truly sound that much alike, but they all go on forever and give off the exact vibes of ritualistic higher dimensional spirit summoning. If you were to say, set aside a day to take massive amounts of hallucinogens in an attempt to make contact with your holy guardian alien, this album will get you about as close as it gets. &nsbp; 2. Anthroprophh – Outside the Circle (Rocket Recordings) __ Ritualistic I actually tried to structure this list so the super freakiest albums got extra pull in the ratings, which is why you’re finding this at number 2. Some bands dabble in psychedelia; acts like Anthroprophh go all in. Shit takes things to an entirely new level of flagrantly bananas. Songs start up in garage rock sputters and quickly head Hawkwind inner spaceward, just to vanish as soon as they started in an echoing vaporized puff of smoke, delicious smoke. New structures warp into your headspace replacing them, this time in extended bleats of deliciously over the top pulsating noise. All vocals are run through way more FX pedals than normal people would ever consider tasteful and end up sounding like Captain Beefheart from the year 3000. Guitars crackle and yelp in possessed wah solos from the other side. Is that some sort of backwards masked EVP radio interlude tying the hooks together? Yep. Again, this is not something to like mellow out to and chill to. This is something to force your headspace into another mode entirely. As far as fucking up your program, Anthroprophh are the real deal and just when you think the rapid fire cut up structure is getting a bit overwhelming, they lock into an extend groove or trip you out with some acoustic guitars and your consciousness is somehow stretched even further. No band went more out of their way to bend your shit heavenward than these dudes did here in 2014, end of story. &nsbp; The Future Sound of London – Environment five (FSOLdigital) __ Ritualistic Remember when people were getting all excited about that new Daft Punk album and I was like, what, that mediocre group from the ’90s? Then people were freaking out about the new Aphex Twin, and don’t get me wrong, I love the Richard D. James album and the Selective Ambient Works stuff as much as the next guy, but I also have a few other albums by him that are passable at best. Haven’t even picked up Syro yet because of that. On the other hand, the one electronic act from the ’90s that I hold in almost godlike regard is The Future Sound of London. Seriously, if you like psychedelic music of any variety and don’t own the “holy trinity” of FSOL albums that is Lifeforms, ISDN, and Dead Cities, just go download those now. No really. Absolute freaking classics. Untouchable stuff. As a matter of fact, 2014 marked the 20th anniversary of the aforementioned Lifeforms, and me randomly stumbling on an article about that lead me to the ontologically jarring news that they were releasing an entire album of new material a month later, their first since Dead Cities. What I eventually found out however, is that this being their first album of new material is a bit of misnomer, as it’s called Environment 5, after all. Yeah, it’s the 5th in a series but the others all included at least some radical reconfigurations of previously released material or something to that effect. I don’t entirely get it either, but what I do get is that 2014 was essentially the year I realized that one of my all-time favorite artists had 5 albums I’d never heard. But wait, there’s more. Yeah, I also found out that what I thought was their final disc, The Isness, was actually only released as an FSOL album in the US for marketing purposes and was supposed to come out under their alter ego Amorphous Androgynous, which explains a lot. as it didn’t sound much like their prior stuff. And oh hey, they released 2 albums under the AA name in 2013 as well, so yeah, essentially 7 new albums by these guys since 2007 after a 5-year hiatus. There are reasons bands have record labels, and me not knowing about any of this until a few months ago is that reason. Is Environment five good? Yeah, it’s freaking amazing, but so are all the Environments discs. So are the other 2 Amorphous Androgynous albums I didn’t know existed, but the reason this is the number 1 album of the year actually has to do with how it caps the series. One day, I was working on some writing and made a nearly 5-hour playlist of Environments 1-5 in order. What’s fascinating is how 5 not only fits in, but simultaneously sort of pulls all its predecessors together like the rug that tied Lebowski’s room together. I wasn’t actually expecting that at all, but when it finally came on, I was mesmerized. Man, listening to the Environments series from start to finish in 1 or 2 sittings is a surefire way to transport yourself to surreal realms of inner contemplative astonishment. Highly recommended and what I mean by that is: get high and try it when you have 5 free hours to zone out. 20 years after dropping the ambient masterpiece that is Lifeforms, FSOL still sound a few decades ahead of their contemporaries thus living up to their cocky moniker by continuing to make raw shit for the heads for all the right reasons. The more things change, the more they stay the same, I suppose. The main difference is that now these guys are far more productive then they ever were in their supposed ’90s heyay. Who knew? www.fsoldigital.com
<filename>src/image.rs<gh_stars>10-100 use crate::*; pub struct Image { pub width: u32, pub height: u32, pub format: vk::Format, pub usage: vk::ImageUsageFlags, pub aspect_flags: vk::ImageAspectFlags, pub vk_image: vk::Image, pub image_view: vk::ImageView, pub opt_device_memory: Option<vk::DeviceMemory>, // None if we didn't manually allocate memory, e.g. in the case of swapchain images pub name: String, pub device: ash::Device, } impl Drop for Image { fn drop(&mut self) { unsafe { self.device.destroy_image_view(self.image_view, None); if let Some(mem) = self.opt_device_memory { self.device.destroy_image(self.vk_image, None); // Only destroy the image if we allocated it in the first place self.device.free_memory(mem, None); } } } } impl Image { #[allow(clippy::too_many_arguments)] pub fn new( name: &str, width: u32, height: u32, format: vk::Format, usage: vk::ImageUsageFlags, aspect_flags: vk::ImageAspectFlags, gpu: &Gpu, debug_utils: &DebugUtils, ) -> Image { let device = gpu.device.clone(); let image_create_info = vk::ImageCreateInfo::builder() .image_type(vk::ImageType::TYPE_2D) .format(format) .mip_levels(1) .array_layers(1) .samples(vk::SampleCountFlags::TYPE_1) .tiling(vk::ImageTiling::OPTIMAL) .usage(usage) .sharing_mode(vk::SharingMode::EXCLUSIVE) .extent(vk::Extent3D { width, height, depth: 1, }); let vk_image = unsafe { device .create_image(&image_create_info, None) .expect("Failed to create image.") }; let image_memory_requirement = unsafe { device.get_image_memory_requirements(vk_image) }; let memory_type_index = gpu .memory_properties .memory_types .iter() .enumerate() .position(|(i, &memory_type)| { (image_memory_requirement.memory_type_bits & (1 << i)) > 0 && memory_type .property_flags .contains(vk::MemoryPropertyFlags::DEVICE_LOCAL) }) .expect("Failed to find suitable memory type.") as u32; let memory_allocate_info = vk::MemoryAllocateInfo::builder() .allocation_size(image_memory_requirement.size) .memory_type_index(memory_type_index); let device_memory = unsafe { device .allocate_memory(&memory_allocate_info, None) .expect("Failed to allocate image memory.") }; unsafe { device .bind_image_memory(vk_image, device_memory, 0) .expect("Failed to bind image memory."); } let image_view = { let imageview_create_info = vk::ImageViewCreateInfo::builder() .view_type(vk::ImageViewType::TYPE_2D) .format(format) .subresource_range(vk::ImageSubresourceRange { aspect_mask: aspect_flags, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }) .image(vk_image); unsafe { gpu.device .create_image_view(&imageview_create_info, None) .expect("Failed to create Image View!") } }; debug_utils.set_image_name(vk_image, name); Image { width, height, format, usage, aspect_flags, vk_image, image_view, opt_device_memory: Some(device_memory), device, name: String::from(name), } } pub fn transition_image_layout( &self, old_layout: vk::ImageLayout, new_layout: vk::ImageLayout, command_buffer: vk::CommandBuffer, ) { let src_access_mask; let dst_access_mask; let source_stage; let destination_stage; if old_layout == vk::ImageLayout::UNDEFINED && new_layout == vk::ImageLayout::TRANSFER_DST_OPTIMAL { src_access_mask = vk::AccessFlags::empty(); dst_access_mask = vk::AccessFlags::TRANSFER_WRITE; source_stage = vk::PipelineStageFlags::TOP_OF_PIPE; destination_stage = vk::PipelineStageFlags::TRANSFER; } else if (old_layout == vk::ImageLayout::TRANSFER_DST_OPTIMAL || old_layout == vk::ImageLayout::UNDEFINED) && new_layout == vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL { src_access_mask = vk::AccessFlags::TRANSFER_WRITE; dst_access_mask = vk::AccessFlags::SHADER_READ; source_stage = vk::PipelineStageFlags::TRANSFER; destination_stage = vk::PipelineStageFlags::FRAGMENT_SHADER; } else { panic!("Unsupported layout transition!") } let image_barriers = [vk::ImageMemoryBarrier { s_type: vk::StructureType::IMAGE_MEMORY_BARRIER, p_next: ptr::null(), src_access_mask, dst_access_mask, old_layout, new_layout, src_queue_family_index: vk::QUEUE_FAMILY_IGNORED, dst_queue_family_index: vk::QUEUE_FAMILY_IGNORED, image: self.vk_image, subresource_range: vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }, }]; unsafe { self.device.cmd_pipeline_barrier( command_buffer, source_stage, destination_stage, vk::DependencyFlags::empty(), &[], &[], &image_barriers, ); } } pub fn new_from_image( gpu: &Gpu, path: &std::path::Path, command_pool: vk::CommandPool, name: &str, debug_utils: &DebugUtils, ) -> Image { use ::image::GenericImageView; let mut image_object = ::image::open(path).unwrap(); image_object = image_object.flipv(); let (image_width, image_height) = (image_object.width(), image_object.height()); let image_size = std::mem::size_of::<u8>() * image_width as usize * image_height as usize * 4; let image_data = image_object.to_rgba().into_raw(); if image_size == 0 { panic!("Failed to load image.") } let image = Image::new( name, image_width, image_height, vk::Format::R8G8B8A8_UNORM, // TODO: Derive format from file or take as an argument vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::SAMPLED, vk::ImageAspectFlags::COLOR, gpu, debug_utils, ); let staging_buffer = HostVisibleBuffer::new( "image_staging_buffer", image_size, vk::BufferUsageFlags::TRANSFER_SRC, gpu, debug_utils, ); staging_buffer.upload_data(&image_data, 0); let command_buffer = begin_single_use_command_buffer(&gpu.device, command_pool); image.transition_image_layout( vk::ImageLayout::UNDEFINED, vk::ImageLayout::TRANSFER_DST_OPTIMAL, command_buffer, ); // Copy buffer to image { let buffer_image_regions = [vk::BufferImageCopy { image_subresource: vk::ImageSubresourceLayers { aspect_mask: vk::ImageAspectFlags::COLOR, mip_level: 0, base_array_layer: 0, layer_count: 1, }, image_extent: vk::Extent3D { width: image_width, height: image_height, depth: 1, }, buffer_offset: 0, buffer_image_height: 0, buffer_row_length: 0, image_offset: vk::Offset3D { x: 0, y: 0, z: 0 }, }]; unsafe { gpu.device.cmd_copy_buffer_to_image( command_buffer, staging_buffer.vk_buffer, image.vk_image, vk::ImageLayout::TRANSFER_DST_OPTIMAL, &buffer_image_regions, ); } } image.transition_image_layout( vk::ImageLayout::TRANSFER_DST_OPTIMAL, vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, command_buffer, ); end_single_use_command_buffer(command_buffer, command_pool, gpu); image } }
// Copyright 2022 <NAME>. // All rights reserved. package main import ( "reflect" "sort" "testing" ) func TestLookupTable(t *testing.T) { table := newLookupTable() for _, f := range []struct { id fileID fprint []uint32 }{ {1, []uint32{0x44442222, 0x44441111, 0x33332222, 0x55553333}}, {2, []uint32{0x44442222, 0x44442222, 0x44441111, 0x55553333}}, {3, []uint32{0x33332222, 0x33331111, 0x33334444, 0x44442222}}, } { table.add(f.id, f.fprint) } for _, tc := range []struct { fprint []uint32 thresh int want []fileID }{ {[]uint32{0x44442222, 0x44441111, 0x33332222, 0x55553333}, 4, []fileID{1}}, {[]uint32{0x44441111, 0x44448888, 0x33331111, 0x55551111}, 4, []fileID{1}}, {[]uint32{0x44442222, 0x44441111, 0x33332222, 0x55553333}, 3, []fileID{1, 2}}, {[]uint32{0x44442222, 0x44441111, 0x33332222, 0x55553333}, 2, []fileID{1, 2, 3}}, {[]uint32{0x44442222, 0x44442222, 0x44442222, 0x44442222}, 4, []fileID{}}, {[]uint32{0x44442222, 0x44442222, 0x44442222, 0x44442222}, 3, []fileID{2}}, {[]uint32{0x99999999, 0x99999999, 0x99999999, 0x99999999}, 1, []fileID{}}, {[]uint32{0x33333333, 0x33333333, 0x33333333, 0x33333333}, 4, []fileID{}}, {[]uint32{0x33333333, 0x33333333, 0x33333333, 0x33333333}, 3, []fileID{3}}, {[]uint32{0x33333333, 0x33333333, 0x33333333, 0x33333333}, 2, []fileID{3}}, {[]uint32{0x33333333, 0x33333333, 0x33333333, 0x33333333}, 1, []fileID{1, 3}}, } { got := table.find(tc.fprint, tc.thresh) sort.Slice(got, func(i, j int) bool { return got[i] < got[j] }) if !reflect.DeepEqual(got, tc.want) { t.Errorf("find(%v, %d) = %v; want %v", tc.fprint, tc.thresh, got, tc.want) } } }
// String returns a textual (brief) representation of the Game func (g Game) String() string { return fmt.Sprintf("GameID:%d Champion Played: %d Won:%v KDA:%d/%d/%d", g.ID, g.Champion, g.Stats.Win, g.Stats.Kills, g.Stats.Death, g.Stats.Assists) }
/* * Reset the TODR based on the time value; used when the TODR * has a preposterous value and also when the time is reset * by the stime system call. Also called when the TODR goes past * TODRZERO + 100*(SECYEAR+2*SECDAY) (e.g. on Jan 2 just after midnight) * to wrap the TODR around. */ void resettodr() { #if NRTC > 0 struct clock_ymdhms dt; rtc_t rtc; int s; if (!clockinitted) return; clock_secs_to_ymdhms(time.tv_sec, &dt); rtc.rtc_century = dt.dt_year / 100; rtc.rtc_year = dt.dt_year % 100; rtc.rtc_month = dt.dt_mon; rtc.rtc_day = dt.dt_day; rtc.rtc_hour = dt.dt_hour; rtc.rtc_minute = dt.dt_min; rtc.rtc_second = dt.dt_sec; #if defined(DEBUG) && 0 printf("resettodr: %02d%02d/%02d/%02d %02d:%02d:%02d\n", rtc.rtc_century, rtc.rtc_year, rtc.rtc_month, rtc.rtc_day, rtc.rtc_hour, rtc.rtc_minute, rtc.rtc_second); #endif s = splclock(); rtc_write(&rtc); (void)splx(s); #endif }
/** * Find and select the next searchable matching text. * * @param reverse look forwards or backwards * @param pos the starting index to start finding from * @return the location of the next selected, or -1 if not found */ private static int findNext(boolean reverse, int pos) { boolean backwards = IS_BACKWARDS_CHECKBOX.isSelected(); backwards = backwards ? !reverse : reverse; String pattern = (String) FIND_FIELD.getSelectedItem(); if (pattern != null && pattern.length() > 0) { try { Document doc = textComponent.getDocument(); doc.getText(0, doc.getLength(), SEGMENT); } catch (Exception e) { e.printStackTrace(); } pos += textComponent.getSelectedText() == null ? (backwards ? -1 : 1) : 0; char first = backwards ? pattern.charAt(pattern.length() - 1) : pattern.charAt(0); char oppFirst = Character.isUpperCase(first) ? Character.toLowerCase(first) : Character.toUpperCase(first); int start = pos; boolean wrapped = WRAP_SEARCH_CHECKBOX.isSelected(); int end = backwards ? 0 : SEGMENT.getEndIndex(); pos += backwards ? -1 : 1; int length = textComponent.getDocument().getLength(); if (pos > length) { pos = wrapped ? 0 : length; } boolean found = false; while (!found && (backwards ? pos > end : pos < end)) { found = !MATCH_CASE_CHECKBOX.isSelected() && SEGMENT.array[pos] == oppFirst; found = found ? found : SEGMENT.array[pos] == first; if (found) { pos += backwards ? -(pattern.length() - 1) : 0; for (int i = 0; found && i < pattern.length(); i++) { char c = pattern.charAt(i); found = SEGMENT.array[pos + i] == c; if (!MATCH_CASE_CHECKBOX.isSelected() && !found) { c = Character.isUpperCase(c) ? Character.toLowerCase(c) : Character.toUpperCase(c); found = SEGMENT.array[pos + i] == c; } } } if (!found) { pos += backwards ? -1 : 1; if (pos == end && wrapped) { pos = backwards ? SEGMENT.getEndIndex() : 0; end = start; wrapped = false; } } } pos = found ? pos : -1; } return pos; }
Investigation of Boundary-Layer Separation for Lifting Surfaces Flow separation from lifting surfaces such as airfoils is undesirable as it deteriorates performance. For example, when airfoils that are designed for large Reynolds numbers are operated at smaller off-design Reynolds numbers, laminar separation can occur. Laminar separation typically leads to transition and reattachment. Transition is influenced by factors, such as free-stream turbulence and wall roughness. Transition and reattachment affect the circulation and, thereby, separation itself. We are employing computational fluid dynamics for investigating the fundamental mechanisms of separation and transition for lifting surfaces. Using highly-resolved direct numerical simulations, we are investigating fundamental aspects of separation and transition in the presence of free-stream turbulence for canonical separation bubbles. In parallel, we are carrying out hybrid turbulence model simulations of an entire airfoil at a larger chord Reynolds number. The combined approach will advance both physical understanding and modeling capabilities, and thus provide a solid platform for the development of separation control strategies for practical applications.
def extract_tree(infile, folder): def add_files(args, path, files): folder = args['folder'] root = args['root'] for filename in files: fullpath = os.path.join(path, filename) relpath = os.path.relpath(fullpath, root) if os.path.isdir(fullpath): if os.path.dirname(relpath) != '': folder.get_subfolder(os.path.dirname(relpath) + os.sep, create=True) elif not os.path.isfile(fullpath): continue if os.path.dirname(relpath) != '': folder.get_subfolder(os.path.dirname(relpath) + os.sep, create=True) folder.insert_path(os.path.abspath(fullpath), relpath) os.walk(infile, add_files, {'folder': folder, 'root': infile})
import { Hook } from '@oclif/config' import { getPluginInfo } from '../../commands/plugins/available' import chalk from 'chalk' const hook: Hook<'prerun'> = async function (opts) { // Only for test purpouses to avoid an error of undefined object if (!opts.Command || !opts.argv) return // Only plugins commands are affected by this hook if (!opts.Command.id.startsWith('plugins')) return if (opts.argv.length === 0) return if (['plugins:install', 'plugins:uninstall'].includes(opts.Command.id)) { let index = -1 let plugin const found = opts.argv.some(a => { index++ if (a.startsWith('-')) return false const p = getPluginInfo(a) if (p === undefined) this.error(`Unknown Commerce Layer CLI plugin: ${chalk.redBright(a)}: execute command '${chalk.italic(`${this.config.bin} plugins:available`)}' to get a list of all available plugins`) else plugin = p.plugin as string return true }) // Check tag flag if (found && plugin) { const tgIndex = opts.argv.indexOf('--tag') if (tgIndex > -1) { opts.argv[index] = plugin + '@' + opts.argv[tgIndex + 1] opts.argv.splice(tgIndex, 2) } else opts.argv[index] = plugin } } } export default hook
/** * Utility functions for doing superstep striping. * * We need to make sure that partitioning (which uses mod for distributing * data across workers) is independent from striping itself. So we are using * fastHash function below, taken from https://code.google.com/p/fast-hash/. */ public class StripingUtils { private StripingUtils() { } /* The MIT License Copyright (C) 2012 Zilong Tan ([email protected]) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Returns 32-bit hash of a given value. * * Fast and generally good hashing function, adapted from C++ implementation: * https://code.google.com/p/fast-hash/ */ public static int fastHash(long h) { h ^= h >> 23; h *= 0x2127599bf4325c37L; h ^= h >> 47; return ((int) (h - (h >> 32))) & 0x7fffffff; } /** * Returns number in [0, stripes) range, from given input {@code value}. */ public static int fastStripe(long value, int stripes) { return fastHash(value) % stripes; } /** * Fast hash-based striping for LongWritable IDs, returns a function * that for a given ID returns it's stripe index. */ public static Obj2IntFunction<LongWritable> fastHashStriping(final int stripes) { return new Obj2IntFunction<LongWritable>() { @Override public int apply(LongWritable id) { return fastStripe(id.get(), stripes); } }; } /** * Fast hash-based striping for LongWritable IDs, returns a function * that for a given stripe index returns a predicate checking whether ID is * in that stripe. */ public static Int2ObjFunction<Predicate<LongWritable>> fastHashStripingPredicate( final int stripes) { return new Int2ObjFunction<Predicate<LongWritable>>() { @Override public Predicate<LongWritable> apply(final int stripe) { return new Predicate<LongWritable>() { @Override public boolean apply(LongWritable id) { return fastStripe(id.get(), stripes) == stripe; } }; } }; } /** * Generate striped block, with given number of {@code stripes}, * using given {@code blockGenerator} to generate block for each stripe. * * @param stripes Number of stripes * @param blockGenerator Function given predicate representing whether * ID is in current stripe, should return Block * for current stripe * @return Resulting block */ public static Block generateStripedBlock( int stripes, Function<Predicate<LongWritable>, Block> blockGenerator) { return generateStripedBlockImpl( stripes, blockGenerator, StripingUtils.fastHashStripingPredicate(stripes)); } /** * Generate striped block, with given number of {@code stripes}, * using given {@code blockGenerator} to generate block for each stripe, * and using striping based on given {@code stripeSupplier}. * * @param stripes Number of stripes * @param blockGenerator Function given predicate representing whether * ID is in current stripe, should return Block * for current stripe * @param stripeSupplier Function given number of stripes, * generates a function that given stripe index, * returns predicate checking whether ID is in that * stripe. * @return Resulting block */ public static <I extends WritableComparable> Block generateStripedBlock( int stripes, Function<Predicate<I>, Block> blockGenerator, Int2ObjFunction<Int2ObjFunction<Predicate<I>>> stripeSupplier) { return generateStripedBlockImpl( stripes, blockGenerator, stripeSupplier.apply(stripes)); } /** * Stripe given block, by calling vertexSend only in it's corresponding * stripe. All other methods are called number of stripes times. * * @param stripes Number of stripes * @param block Block to stripe * @return Resulting block */ public static Block stripeBlockBySenders( int stripes, Block block) { return generateStripedBlockImpl( stripes, StripingUtils.<LongWritable>createSingleStripeBySendersFunction(block), StripingUtils.fastHashStripingPredicate(stripes)); } /** * Given a block, creates a function that will given a predicate filter * calls to vertexSend function based on that predicate. * * Useful to be combined with generateStripedBlock to stripe blocks. */ public static <I extends WritableComparable> Function<Predicate<I>, Block> createSingleStripeBySendersFunction(final Block block) { return new Function<Predicate<I>, Block>() { @Override public Block apply(final Predicate<I> stripePredicate) { return FilteringBlock.createSendFiltering( new SupplierFromVertex<I, Writable, Writable, Boolean>() { @Override public Boolean get(Vertex<I, Writable, Writable> vertex) { return stripePredicate.apply(vertex.getId()); } }, block); } }; } private static <I extends WritableComparable> Block generateStripedBlockImpl( int stripes, Function<Predicate<I>, Block> blockGenerator, Int2ObjFunction<Predicate<I>> stripeSupplier) { Preconditions.checkArgument(stripes >= 1); if (stripes == 1) { return blockGenerator.apply(new Predicate<I>() { @Override public boolean apply(I input) { return true; } }); } Block[] blocks = new Block[stripes]; for (int i = 0; i < stripes; i++) { blocks[i] = blockGenerator.apply(stripeSupplier.apply(i)); } return new SequenceBlock(blocks); } }
<reponame>LastKing/core package kitmw import ( "context" "errors" "fmt" "google.golang.org/grpc/codes" "github.com/DoNewsCode/core/unierr" "github.com/go-kit/kit/endpoint" ) // ErrorOption is an option that tunes the middleware returned by // MakeErrorConversionMiddleware type ErrorOption struct { AlwaysHTTP200 bool ShouldRecover bool } // MakeErrorConversionMiddleware returns a middleware that wraps the returned // error from next handler with a *unierr.Error. if a successful response is // returned from the next handler, this is a no op. If the error returned by next // handler is already a *unierr.Error, this decorates the *unierr.Error based on // ErrorOption. func MakeErrorConversionMiddleware(opt ErrorOption) endpoint.Middleware { return func(e endpoint.Endpoint) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (response interface{}, err error) { defer func() { if !opt.ShouldRecover { return } if er := recover(); er != nil { err = unierr.InternalErr(fmt.Errorf("panic: %s", er)) } }() response, err = e(ctx, request) if err != nil { var serverError *unierr.Error if !errors.As(err, &serverError) { serverError = unierr.UnknownErr(err) } if opt.AlwaysHTTP200 { serverError.HttpStatusCodeFunc = func(code codes.Code) int { return 200 } } // Brings kerr.SeverError to the uppermost level return response, serverError } return response, nil } } }
<reponame>vipoo/msxrc2014 #define __Z88DK_R2L_CALLING_CONVENTION #include "spike-ide.h" #include "msxdos.h" #include "partition.h" #include <stdarg.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> ioBuf buf; int8_t result; GPartInfo gResult; void main() { printf("spike-ide - ver: 0.4\r\n"); gResult.sectorCount = 0; uint8_t error = msxdosGpart(0x8F, 2, 1, 1, 0, false, &gResult); printf("type: %d, status: %d, size: %lu, start %lu, %lu, %d\r\n", gResult.typeCode, gResult.status, gResult.sectorCount, gResult.startingSector, gResult.partitionSector, sizeof(GPartInfo)); error = msxdosGpart(0x8F, 2, 1, 2, 0, false, &gResult); printf("type: %d, status: %d, size: %lu, start %lu, %lu, %d\r\n", gResult.typeCode, gResult.status, gResult.sectorCount, gResult.startingSector, gResult.partitionSector, sizeof(GPartInfo)); buf.lba0 = 0; buf.lba1 = 0; buf.lba2 = 0; buf.lba3 = 0; result = cfReadTest(&buf); if (result != 0) { printf("cfReadTest error %d\r\n", result); exit(0); } printf("--------------\r\n"); // partition entry 0 for (int i = 446; i < 446 + 16; i++) { printf("%02X ", buf.data[i]); } printf("\r\n"); // partition entry 1 for (int i = 446 + 16; i < 446 + 32; i++) { printf("%02X ", buf.data[i]); } // printf("Retriving Identity info\r\n"); // const uint8_t d = cfProbe(); // if (d == 0) { // printf("Compact Flash Module not detected\r\n"); // exit(0); // } // cfInit(); // result = cfReadIdentity(buf.data); // if (result != 0) { // printf("cfReadIdentity error %d\r\n", result); // exit(0); // } // IdeIdentity *x = (IdeIdentity *)buf.data; // x->ModelNumber[39] = 0; // x->SerialNumber[19] = 0; // printf("General Configuration %04X\r\n", x->GeneralConfiguration); // printf("Serial Number: %s\r\n", x->SerialNumber); // printf("Model Number: %s\r\n", x->ModelNumber); // printf("CurrentSectorsPerTrack %u\r\n", x->CurrentSectorsPerTrack); // printf("CurrentSectorCapacity %lu\r\n", x->CurrentSectorCapacity); // printf("Storage Capacity: %lu mega bytes\r\n", x->CurrentSectorCapacity * 512 / 1024 / 1024); // printf("CurrentSectorCapacity Offset: %d\r\n", offsetof(struct _IdeIdentity, CurrentSectorCapacity)); // printf("ModelNumber Offset: %d\r\n", offsetof(struct _IdeIdentity, ModelNumber)); // const uint32_t lastSector = x->CurrentSectorCapacity - 1; // buf.lba0 = lastSector; // buf.lba1 = (lastSector >> 8) & 0xFF; // buf.lba2 = (lastSector >> 16) & 0xFF; // buf.lba3 = (lastSector >> 24) & 0xFF; // printf("lba: %02X %02X %02X %02X\r\n", buf.lba0, buf.lba1, buf.lba2, buf.lba3); // result = cfReadTest(&buf); // if (result != 0) { // printf("cfReadTest error %d\r\n", result); // exit(0); // } // printf("--------------\r\n"); // for (int i = 0; i < 128; i++) { // if (i % 16 == 0) // printf("\r\n"); // printf("%02X ", buf.data[i]); // } // for (int i = 0; i < 512; i++) // buf.data[i] = buf.data[i] + 1; // result = cfWriteTest(&buf); // if (result != 0) { // printf("cfWriteTest error %d\r\n", result); // exit(0); // } }
Gad Rausing: Prehistoric boats and ships of North Western Europe. University of Lund: Institute of Archaeology, 1984. (Obtainable Liber Förlag, 205 10 Malmö, Sweden.) 144 pp., 45 figs. £5.50 in the seventeenth century and are now in the National Museum, Copenhagen, Neither the purpose of the book nor its intended audience is specifically stated. Careful study of the volume does not throw much light on these questions. It is thus difficult to offer constructive criticism of Gillon’s courageous attempt to provide an overview of African art in chronological perspective. A major problem is that his treatment of different subject-areas is very uneven. Rock art receives a 20-page chapter of its own, complete with a schematized and misleading distribution map. There are 12 illustrations of Saharan rock art, 3 from southern Africa and none from anywhere else. The impression is given that all rock art is ‘neolithic’, which is not so, and which means that no attempt is made to compare the later rock art with other contemporaneous art-forms. The important work of Vinnicombe and of Lewis-Williams on the interpretation of southern African rock paintings is cited in footnotes but not properly discussed. There is also a chapter on ancient Nubian art. The rest of the book deals with the art of sub-Saharan Africa’s iron-using peoples from the mid-1st millennium BC to recent times. West Africa receives 195 pages, arranged in nine chapters. The rest of the continent gets very short shrift: single chapters on ‘the southern Savanna’ (including the whole of the Congo basin) in 40 pages, Eastern Africa in 22 pages and Southern Africa in a mere 12 pages, very largely on the Lydenburg heads and Great Zimbabwe. The extent to which authorities are cited also varies enormously. As a result some important areas escape notice altogether, the Zande for example; while the unfortunate Lunda are dismissed as ‘not an art-producing people’ (p. 286). Art in this, the main section of the book, is considered within narrowly defined terms: very largely those of the western collector. Architecture is noted occasionally, but only in terms of large, permanent structures in Morocco, East Africa or the western sudan. Pottery and bodydecoration receive token mention, but most of the material considered is 3-dimensional sculpture. There is a strong tendency to emphasize the ‘fine’ or exceptional piece, the criteria for selection not being clearly evaluated. Some sort of selection is of course essential in a work of this kind, but the absence of stated criteria could seriously mislead the reader. Perhaps a greater problem is due to Gillon’s conceptual framework. This consists of a rigid division into compartments, the bounds of which are assumed to extend an unspecified distance into the past. Professor Roy Sieber, in his foreword, points out the difficulties implicit in a view of ‘cultural timelessness’ and the concept of ‘tribal style’, but Gillon gives little evidence of seriously facing up to either problem. Likewise, Sieber draws attention to the presence of two systems of art among the Mossi, belonging respectively to the indigenous population and to their rulers (and a very similar situation exists in northern Zambia) but this important point is not brought out in Gillon’s text. Crossreferencing between chapters is almost completely absent, so there is no demonstration of characteristics which transgress ‘tribal’ divisions. Function is discussed repeatedly but only in very general terms: ‘they are traditional carvings for ancestor and spirit cults, for initiation, medical and divination purposes’ (p. 292). Likewise, the whole question of symbolism receives little more than a hint. There are an unfortunate number of woolly statements, where one suspects the author has not wished to adjudicate between conflicting published views. For example: the ‘presence in southern Africa is said to date back to the middle or late Stone Age or to the beginning of the Iron Age’ (p. 51) ; ‘the use of beads as ornaments dates back to antiquity in Africa and other continents’ (p. 239). Despite some interesting arguments and some splendid illustrations, this is a disappointing book.
I say consensus, because among scientists there is no debate. Climate change is real, it is caused by us and it will cause devastating changes to our world if we don’t take decisive action. Canada’s leaders all accept that climate change is real, and they speak a good game on the need to address it. But their actions, and their commitments on key projects, are oftentimes diametrically opposed to their supposed understanding of the issues. Scientific consensus missing from Canada’s election campaign The day after the debate, star NDP candidate Linda McQuaig caused controversy and was accused of going against party policy when she told CBC’s Power & Politics that “a lot of the oilsands oil may have to stay in the ground if we're going to meet our climate change targets.” But all she did was paraphrase the latest scientific research. A study in the journal Nature looked at which carbon deposits worldwide need to be left in the ground to avoid “dangerous” climate change, and concluded that many known reserves, including 85 per cent of Canada’s tar sands, cannot be exploited. In October 2014 Mark Carney, former governor of the Bank of Canada and now governor of the Bank of England, told a World Bank seminar that the “vast majority of reserves are unburnable” if global warming is to be held below 2C. That’s the scientific reality, which all the electoral leaders, with the exception of the Greens’ Elizabeth May, did their best to ignore on Thursday night. There was much debate over Canada’s environmental assessment process, which both the NDP and Liberals say the Conservative government has gutted and left incapable of producing a fair evaluation. But the thrust of remarks by both NDP leader Tom Mulcair and Liberal leader Justin Trudeau was they would reform the environmental assessment process and thus win Canadians’ support for pipeline projects. That would be fine if the only concern of Canadians were the localized environmental impact of pipelines. But people are concerned about the climate impact of expanding the tar sands, (the output of which the Canadian Association of Petroleum Producers forecasts will increase by 140 per cent over the next 15 years), and expect leadership on the issue from the contenders to replace Prime Minister Stephen Harper. A 2014 poll showed Canadians split in half on pipeline projects such as Energy East and Keystone XL. Public opposition appears to be growing rapidly, as a poll released in April of this year found that support for Energy East had dropped to 36 per cent nationally. Nevertheless, none of the main party leaders are willing to reject these projects, despite the fact that their economic impact is overstated, their contribution to climate change is irrefutable and they are supported by only a third of the population. Taking a position is hard; promising an assessment is easy When I interviewed Bill McKibben in June, the founder of climate advocacy group 350.org, he told me he considers U.S. President Barack Obama a climate denier. “He accepts the climate science, says all the right things, but then he grants Shell a permit to go drill in the Arctic. That's a denial of the need to take dramatic, quick action.” I was reminded of his words on Thursday, when leaders of Canada’s political parties spoke of the urgency of acting on climate change, then proceeded to promise to do the exact opposite. “The fact is we need to restore public trust in our ability as a government to create a level playing field upon which proponents of a project can acquire social license, can gain the public trust from the communities it’ll touch,” said Trudeau. When needled by May on his refusal to take a clear position on pipelines, Mulcair shot back that “opposing these pipelines systematically in advance is just as wrong as supporting them in advance because, in both cases, what you need is an objective study.” That sounds reasonable, doesn’t it? And it is. An objective study would make sense if we wanted to assess an unknown like, say, the impact of a pipeline on the land it would cross. But we already have that rarest of commodities within the scientific community: consensus. Our best scientists, many of whom have been working on nothing else for decades, agree that the urgent need to transition off of fossil fuels means we can’t exploit and burn the majority of known reserves. The tar sands are at the top of the list of what can’t be burned, because they are more carbon intensive than other sources of oil. So when Trudeau argues that “the job of the prime minister is to get those resources to market and in the 21st century that means being smart and responsible about the environment,” he’s denying the scientific reality. Being smart and responsible about the environment means choosing not to get all of that particular resource to market. Canadians vastly overestimate the economic value of the tar sands to the economy, a 2014 poll showed. Non-conventional oil production accounts for only 2 per cent of national output, while manufacturing represents over 10 per cent, according to Statistics Canada. The poll found that “41 percent of Canadians think the oilsands contribution to the economy is between 6 and 24 times higher than it is.” Watching their performance in the debate, one wonders if the leaders of the three main federal parties are included in that 41 per cent. Ostriches with heads in the sand In an op-ed written for the Toronto Star in 2009, Gerald Butts, then president of WWF Canada and now a senior advisor to Liberal leader Justin Trudeau, called out “tar-sands supporters” and argued that the views of the national political leaders on the tar sands were “myopic.” “While the rest of the world searches for a low-carbon path to growth,” wrote Butts, “we are betting the national economy on a carbon footprint deeper than even conventional fossil fuels.” Like Butts, Trudeau and Mulcair are smart people and I have no doubt that they can read the science. They know that we can’t build more pipelines without expanding the tar sands to fill them, which scientists think will wreck our climate. They also know that any damage to the economy from restricting the expansion of the tar sands can be offset by transferring economic subsidies given to oil and gas companies to the renewable energy sector and joining countries such as Germany in positioning our economy to take advantage of the transition to renewable energy. But instead, all three leaders are acting like ostriches, planting their heads firmly in the sand and accusing anyone who raises the science of heresy. Which brings us to the fourth leader on stage Thursday, Elizabeth May. Ricochet’s editorial board live-tweeted the debate, and our consensus was that she won. She repeatedly grilled the other leaders on their contradictions and made clear and concise interventions throughout the evening. She showed that she belonged and should be invited to all remaining debates. But she leads a party with no chance of forming government, and on Friday clarified that she didn’t want to shut down the tar sands, and instead advocated refining the oil at home. There’s an argument to be made for doing so on economic grounds, but it doesn’t address the climate impact. Both Trudeau and Mulcair have spent months trying to speak out of both sides of their mouth on pipelines. Both have given French-language interviews that were interpreted to indicate their opposition to the Energy East pipeline, and both scrambled to clarify in English that they do not oppose the pipeline, although they don’t support it either. The fact of the matter is they have no position. Arguing that you’ll reserve judgement on the most scientifically studied subject on earth until you conduct a new and improved environmental assessment is disingenuous. The job of politicians is to take positions on controversial issues and then argue their case in an election campaign. By refusing to take a position on the most serious threat this country faces, our leaders are abdicating their responsibility to lead and denying the scientific reality of climate change.
<filename>src/token/TokenType.java package token; public enum TokenType { // Single-character tokens. LEFT_PAREN, RIGHT_PAREN, LEFT_BRACE, RIGHT_BRACE, COMMA, DOT, MINUS, PLUS, SEMICOLON, SLASH, STAR, // One or two character tokens. BANG, BANG_EQUAL, EQUAL, EQUAL_EQUAL, GREATER, GREATER_EQUAL, LESS, LESS_EQUAL, PLUS_PLUS, MINUS_MINUS, //Exponent STAR_STAR, //Special assignments PLUS_EQUAL, MINUS_EQUAL, STAR_EQUAL, SLASH_EQUAL, STAR_STAR_EQUAL, //Bitwise operations SHIFT_RIGHT, SHIFT_LEFT, LOGICAL_SHIFT_RIGHT, //Conditional QUESTION_MARK, COLON,ELVIS, // Literals. IDENTIFIER, STRING, NUMBER, CHAR, LIST, MODULE, NATIVE, // Keywords. CLASS, ELSE, FALSE, FUN, FOR, REPEAT, IF, NONE, OR, AND, XOR, EXTENDS, PRINT, RETURN, SUPER, THIS, TRUE, VAR, DO, WHILE, BREAK, CONTINUE, NIL, TEST, EOF, ARRAY, ARRAY_OPEN, ARRAY_CLOSE }
/** * Class for create the printer that write the converted content. * @author Cosmin Duna * */ public class ContentPrinterCreater { /** * Private constructor. */ private ContentPrinterCreater() { throw new IllegalStateException("Utility class"); } /** * Create a ContentPrinter according to given converter type. * @param converterType The type of converter. * @return The contentPrinter */ public static ContentPrinter create(String converterType){ if(ConverterTypes.XML_TO_JSON.equals(converterType) || ConverterTypes.YAML_TO_JSON.equals(converterType) || ConverterTypes.JSON_TO_YAML.equals(converterType)){ //return a printer that does't edit(indent) the content of conversion. return new SimpleContentPrinterImpl(); }else { //return a printer that prettify the content. return new PrettyContentPrinterImpl(); } } }
// process the dhcp packet before sending to server private void processDhcpPacket(PacketContext context, DHCP dhcpPayload) { ConnectPoint inPort = context.inPacket().receivedFrom(); Set<Interface> clientServerInterfaces = interfaceService.getInterfacesByPort(inPort); if (clientServerInterfaces.isEmpty()) { log.warn("Virtual interface is not configured on {}", inPort); return; } checkNotNull(dhcpPayload, "Can't find DHCP payload"); Ethernet packet = context.inPacket().parsed(); DHCP.MsgType incomingPacketType = dhcpPayload.getOptions().stream() .filter(dhcpOption -> dhcpOption.getCode() == OptionCode_MessageType.getValue()) .map(DhcpOption::getData) .map(data -> DHCP.MsgType.getType(data[0])) .findFirst() .orElse(null); checkNotNull(incomingPacketType, "Can't get message type from DHCP payload {}", dhcpPayload); switch (incomingPacketType) { case DHCPDISCOVER: Ethernet ethernetPacketDiscover = processDhcpPacketFromClient(context, packet, clientServerInterfaces); if (ethernetPacketDiscover != null) { writeRequestDhcpRecord(inPort, packet, dhcpPayload); handleDhcpDiscoverAndRequest(ethernetPacketDiscover); } break; case DHCPOFFER: Ethernet ethernetPacketOffer = processDhcpPacketFromServer(packet); if (ethernetPacketOffer != null) { writeResponseDhcpRecord(ethernetPacketOffer, dhcpPayload); handleDhcpOffer(ethernetPacketOffer, dhcpPayload); } break; case DHCPREQUEST: Ethernet ethernetPacketRequest = processDhcpPacketFromClient(context, packet, clientServerInterfaces); if (ethernetPacketRequest != null) { writeRequestDhcpRecord(inPort, packet, dhcpPayload); handleDhcpDiscoverAndRequest(ethernetPacketRequest); } break; case DHCPACK: Ethernet ethernetPacketAck = processDhcpPacketFromServer(packet); if (ethernetPacketAck != null) { writeResponseDhcpRecord(ethernetPacketAck, dhcpPayload); handleDhcpAck(ethernetPacketAck, dhcpPayload); } break; case DHCPRELEASE: break; default: break; } }
/** * @fileName : EditClassStudentPresenter.java * * @description : * * * @version : 1.0 * * @date: 03-Jul-2015 * * @Author tumbalam * * @Reviewer: */ public class EditClassStudentPresenter extends PresenterWidget<IsEditClassStudentView> implements EditClassStudentViewUiHandler{ private SimpleAsyncCallback<Map<String, String>> shareUrlGenerationAsyncCallback; private SimpleAsyncCallback<StudentsAssociatedListDo> collabAsyncCallback; private SimpleAsyncCallback<StudentsAssociatedListDo> membersActiveAsyncCallback; private SimpleAsyncCallback<ArrayList<CollaboratorsDo>> addMembersAsyncCallback; private int pageSize = 20; private int activeListPageNum=0; @Inject private ClasspageServiceAsync classpageServiceAsync; StudentsAssociatedListDo studentsAssociatedListDo; List<String> emailId; @Inject public EditClassStudentPresenter(EventBus eventBus,IsEditClassStudentView view){ super(eventBus, view); getView().setUiHandlers(this); } @Override public void onBind() { super.onBind(); setShareUrlGenerationAsyncCallback(new SimpleAsyncCallback<Map<String, String>>() { @Override public void onSuccess(Map<String, String> shortenUrl) { getView().setShortenUrl(shortenUrl); } }); setAddMembersAsyncCallback(new SimpleAsyncCallback<ArrayList<CollaboratorsDo>>() { @Override public void onSuccess(ArrayList<CollaboratorsDo> result) { getView().displayInvitationSuccessPopUp(result.size()); //If the same user email id is invited the result size will be "0" at that time we are enabling the invite button. for(int i=0;i<emailId.size();i++){ CollaboratorsDo collaboratorsDo =new CollaboratorsDo(); collaboratorsDo.setEmailId(emailId.get(i).toString().replaceAll("\"","")); collaboratorsDo.setEmail(emailId.get(i).toString().replaceAll("\"","")); result.add(collaboratorsDo); } if(result.size()==0){ getView().createAutoSuggestBox(); getView().getLblPleaseWait().setVisible(false); getView().getInviteButton().setEnabled(true); getView().getInviteButton().getElement().removeClassName("disabled"); getView().getInviteButton().setVisible(true); //getView().getPendingMembersList(); }else{ //Display pending members list. getView().displayPendingMembersList(result, true, result.size(),false,true); } } }); } @Override public void onReveal() { /*String classId = AppClientFactory.getPlaceManager().getRequestParameter(UrlNavigationTokens.CLASSPAGEID); if(classId != null){ generateShareLink(classId); getActiveMembersListByCollectionId(classId,pageSize*activeListPageNum, pageSize, "active",true,true,false); }*/ /*String pageType = AppClientFactory.getPlaceManager().getRequestParameter(UrlNavigationTokens.TEACHER_CLASS_SUBPAGE_VIEW,""); if(pageType.equalsIgnoreCase(UrlNavigationTokens.TEACHER_CLASS_CONTENT_SUB_REPORTS)) { getView().setReportView(); } else { getView().setRoasterView(); String classId = AppClientFactory.getPlaceManager().getRequestParameter(UrlNavigationTokens.CLASSPAGEID); if(classId != null){ generateShareLink(classId); getActiveMembersListByCollectionId(classId,pageSize*activeListPageNum, pageSize, "active",true,true,false); } }*/ } @Override public void onReset() { /*String pageType = AppClientFactory.getPlaceManager().getRequestParameter(UrlNavigationTokens.TEACHER_CLASS_SUBPAGE_VIEW,""); if(pageType.equalsIgnoreCase(UrlNavigationTokens.TEACHER_CLASS_CONTENT_SUB_REPORTS)) { getView().setReportView(); } else { getView().setRoasterView(); }*/ } @Override protected void onHide() { super.onHide(); } public void setClassDetails(ClasspageDo classpageDo) { getView().setClassView(classpageDo); } /** * This method is to get the shareUrlGenerationAsyncCallback */ public SimpleAsyncCallback<Map<String, String>> getShareUrlGenerationAsyncCallback() { return shareUrlGenerationAsyncCallback; } /** * This method is to set the shareUrlGenerationAsyncCallback */ public void setShareUrlGenerationAsyncCallback( SimpleAsyncCallback<Map<String, String>> shareUrlGenerationAsyncCallback) { this.shareUrlGenerationAsyncCallback = shareUrlGenerationAsyncCallback; } @Override public void generateShareLink(String classpageId) { try{ String courseId = AppClientFactory.getPlaceManager().getRequestParameter(UrlNavigationTokens.STUDENT_CLASSPAGE_COURSE_ID); Map<String, String> params = new HashMap<String, String>(); params.put("type", AppClientFactory.getPlaceManager().getCurrentPlaceRequest().getNameToken()); if(courseId != null){ params.put(UrlNavigationTokens.STUDENT_CLASSPAGE_COURSE_ID, courseId); } AppClientFactory.getInjector().getSearchService().getShortenShareUrl(classpageId, params, getShareUrlGenerationAsyncCallback()); }catch(Exception e){ AppClientFactory.printSevereLogger("EditClassStudentPresenter : generateShareLink :"+e.getMessage()); } } /* (non-Javadoc) * @see org.ednovo.gooru.client.mvp.classpage.teach.edit.student.EditClassStudentViewUiHandler#addStudents(java.lang.String, java.util.List) */ @Override public void addStudents(String classpageId, List<String> emailIds) { emailId=emailIds; getClasspageServiceAsync().inviteStudentToClass(classpageId, emailIds, getAddMembersAsyncCallback()); } /** * This method is to get the collabAsyncCallback */ public SimpleAsyncCallback<StudentsAssociatedListDo> getCollabAsyncCallback() { return collabAsyncCallback; } /** * This method is to set the collabAsyncCallback */ public void setCollabAsyncCallback(SimpleAsyncCallback<StudentsAssociatedListDo> collabAsyncCallback) { this.collabAsyncCallback = collabAsyncCallback; } /** * This method is to get the membersActiveAsyncCallback */ public SimpleAsyncCallback<StudentsAssociatedListDo> getMembersActiveAsyncCallback() { return membersActiveAsyncCallback; } /** * This method is to set the membersActiveAsyncCallback */ public void setMembersActiveAsyncCallback( SimpleAsyncCallback<StudentsAssociatedListDo> membersActiveAsyncCallback) { this.membersActiveAsyncCallback = membersActiveAsyncCallback; } /** * This method is to get the addMembersAsyncCallback */ public SimpleAsyncCallback<ArrayList<CollaboratorsDo>> getAddMembersAsyncCallback() { return addMembersAsyncCallback; } /** * This method is to set the addMembersAsyncCallback */ public void setAddMembersAsyncCallback(SimpleAsyncCallback<ArrayList<CollaboratorsDo>> addMembersAsyncCallback) { this.addMembersAsyncCallback = addMembersAsyncCallback; } /** * This method is to get the classpageServiceAsync */ public ClasspageServiceAsync getClasspageServiceAsync() { return classpageServiceAsync; } /** * This method is to set the classpageServiceAsync */ public void setClasspageServiceAsync(ClasspageServiceAsync classpageServiceAsync) { this.classpageServiceAsync = classpageServiceAsync; } /* (non-Javadoc) * @see org.ednovo.gooru.client.mvp.classpage.teach.edit.student.EditClassStudentViewUiHandler#getActiveMembersListByCollectionId(java.lang.String, int, int, java.lang.String, boolean, boolean) */ @Override public void getActiveMembersListByCollectionId(String classCode,int offSet, int pageSize, String statusType,final boolean increasePageNum, final boolean getPendingMembers,final boolean isNew) { getClasspageServiceAsync().getActiveAssociatedStudentInClassListByCode(classCode, offSet, pageSize, statusType, new SimpleAsyncCallback<StudentsAssociatedListDo>() { @Override public void onSuccess(StudentsAssociatedListDo result) { //Display all members in active list. studentsAssociatedListDo=result; getView().displayActiveMembersList(result.getSearchResult(), isNew, result.getTotalHitCount(),increasePageNum); if(getPendingMembers){ getView().getPendingMembersList(); } } }); } @Override public void getMembersListByCollectionId(String classCode, int offSet, int pageSize, String statusType,final boolean increasePageNum,final boolean isNew) { getClasspageServiceAsync().getAssociatedPendingStudentListByCode(classCode, offSet, pageSize, statusType, new SimpleAsyncCallback<StudentsAssociatedListDo>() { @Override public void onSuccess(StudentsAssociatedListDo result) { //Display all members in pending list. getView().displayPendingMembersList(result.getSearchResult(),isNew, result.getTotalHitCount(),increasePageNum,false); } }); } /* (non-Javadoc) * @see org.ednovo.gooru.client.mvp.classpage.teach.edit.student.EditClassStudentViewUiHandler#removePendingUserFromCalss(org.ednovo.gooru.application.shared.model.content.ClasspageDo, java.lang.String, int, boolean, org.ednovo.gooru.client.mvp.classpage.teach.edit.student.MembersViewVc) */ @Override public void removePendingUserFromCalss(final ClasspageDo classpageDo, String emailId, final int pendingOffSet, final boolean pendingFlag,final MembersViewVc membersViewVc) { AppClientFactory.getInjector().getClasspageService().removePendingStudentFromClass(classpageDo.getClassUid(), classpageDo.isVisibility(), emailId, new SimpleAsyncCallback<Void>() { @Override public void onSuccess(Void result) { getView().removePendingUserWidget(membersViewVc,pendingFlag); } }); } /* (non-Javadoc) * @see org.ednovo.gooru.client.mvp.classpage.teach.edit.student.EditClassStudentViewUiHandler#removeActiveUserFromClass(org.ednovo.gooru.application.shared.model.content.ClasspageDo, java.lang.String, int, boolean, org.ednovo.gooru.client.mvp.classpage.teach.edit.student.MembersViewVc) */ @Override public void removeActiveUserFromClass(final ClasspageDo classpageDo, String emailId, int pendingOffSet, final boolean pendingFlag,final MembersViewVc membersViewVc) { AppClientFactory.getInjector().getClasspageService().removeActiveStudentFromClass(classpageDo.getClassUid(), classpageDo.isVisibility(), emailId, new SimpleAsyncCallback<Void>() { @Override public void onSuccess(Void result) { getView().removePendingUserWidget(membersViewVc,pendingFlag); } }); } /* (non-Javadoc) * @see org.ednovo.gooru.client.mvp.classpage.teach.edit.student.EditClassStudentViewUiHandler#getMembersDetails() */ @Override public void getMembersDetails() { String classId = AppClientFactory.getPlaceManager().getRequestParameter(UrlNavigationTokens.CLASSPAGEID); if(classId != null){ generateShareLink(classId); getActiveMembersListByCollectionId(classId,pageSize*activeListPageNum, pageSize, "active",true,true,false); } } }
/** * Created by fhuya on 1/5/15. */ public class FollowSplineAbove extends FollowAlgorithm { private final MavLinkDrone drone; @Override public void processNewLocation(Location location) { LatLong gcsLoc = new LatLong(location.getCoord()); double speed = location.getSpeed(); double bearing = location.getBearing(); double bearingInRad = Math.toRadians(bearing); double xVel = speed * Math.cos(bearingInRad); double yVel = speed * Math.sin(bearingInRad); drone.getGuidedPoint().newGuidedCoordAndVelocity(gcsLoc, xVel, yVel, 0); } @Override public FollowModes getType() { return FollowModes.SPLINE_ABOVE; } public FollowSplineAbove(MavLinkDroneManager droneManager, Handler handler) { super(droneManager, handler); drone = droneManager.getDrone(); } }
/** Returns highest element in tree and moves it to the root */ @NotNull private SplayTreeNode last() { if (right == null) { splay(); return this; } return right.last(); }
/** * Event to keep making sure player names are highlighted red in clan chat, since the red name goes away frequently */ @Subscribe public void onWidgetHiddenChanged(WidgetHiddenChanged widgetHiddenChanged) { if (client.getGameState() != GameState.LOGGED_IN || client.getWidget(WidgetInfo.LOGIN_CLICK_TO_PLAY_SCREEN) != null || client.getViewportWidget() == null || client.getWidget(WidgetInfo.CLAN_CHAT) == null || !config.highlightInClan()) { return; } clientThread.invokeLater(() -> { if (!client.getWidget(WidgetInfo.CLAN_CHAT).isHidden()) { highlightRedInCC(); } }); }
<reponame>AstorgaIndustries/viernes export class Ciudades { private ciudades: Ciudad[] = [ { name: 'Santiago', lng: '-70.6666667', lat: '-33.4500000', }, { name: '<NAME>', lng: '-70.7166667', lat: '-33.4166667', }, { name: '<NAME>', lng: '-70.7000000', lat: '-33.5666667', }, { name: 'Huechuraba', lng: '-70.6666667', lat: '-33.3500000', }, { name: '<NAME>', lng: '-70.6833333', lat: '-33.5500000', }, { name: '<NAME>', lng: '-70.5833333', lat: '-33.5833333', }, { name: '<NAME>', lng: '-70.5500000', lat: '-33.4500000', }, { name: '<NAME>', lng: '-70.5166667', lat: '-33.3500000', }, { name: '<NAME>', lng: '-70.7166667', lat: '-33.4333333', }, { name: 'Maip\u00fa', lng: '-70.7666667', lat: '-33.5166667', }, { name: '<NAME>', lng: '-70.6780860', lat: '-33.4924550', }, { name: 'Providencia', lng: '-70.6166667', lat: '-33.4333333', }, { name: 'Quilicura', lng: '-70.7500000', lat: '-33.3666667', }, { name: 'Recoleta', lng: '-33.4081480', lat: '-70.6391920', }, { name: '<NAME>\u00edn', lng: '-70.6166667', lat: '-33.5000000', }, { name: '<NAME>\u00f3n', lng: '-70.5000000', lat: '-33.4500000', }, { name: '<NAME>', lng: '-70.5833333', lat: '-33.6166667', }, { name: '<NAME>', lng: '-70.8333333', lat: '-33.5666667', }, { name: '<NAME>', lng: '-71.0166667', lat: '-33.6833333', }, { name: '<NAME>', lng: '-71.4666667', lat: '-33.9000000', }, { name: 'Curacav\u00ed', lng: '-71.1500000', lat: '-33.4000000', }, { name: 'Melipilla', lng: '-71.2166667', lat: '-33.7000000', }, { name: '<NAME>', lng: '-70.8166667', lat: '-33.6500000', }, { name: '<NAME>', lng: '-70.7166667', lat: '-33.6000000', }, { name: 'Lampa', lng: '-70.9000000', lat: '-33.2833333', }, { name: '<NAME>\u00e<NAME>', lng: '-70.3666667', lat: '-33.6333333', }, { name: 'Pe\u00f1aflor', lng: '-70.9166667', lat: '-33.6166667', }, { name: '<NAME>', lng: '-70.9000000', lat: '-33.7500000', }, { name: 'Talagante', lng: '-70.9333333', lat: '-33.6666667', }, { name: 'Mar\u00<NAME>', lng: '-71.1333333', lat: '-33.5333333', }, { name: 'Paine', lng: '-70.7500000', lat: '-33.8166667', }, { name: 'Buin', lng: '-70.7500000', lat: '-33.7333333', }, { name: 'Tiltil', lng: '-70.9333333', lat: '-33.0833333', }, { name: 'Colina', lng: '-70.6833333', lat: '-33.2000000', }, { name: 'Pirque', lng: '-70.5500000', lat: '-33.6333333', }, { name: 'Vitacura', lng: '-70.6000000', lat: '-33.4000000', }, { name: '<NAME>', lng: '-70.6666667', lat: '-33.5000000', }, { name: 'Renca', lng: '-70.7333333', lat: '-33.4000000', }, { name: '<NAME>', lng: '-70.7000000', lat: '-33.4500000', }, { name: 'Pudahuel', lng: '-70.7166667', lat: '-33.4333333', }, { name: 'Pe\u00f1alol\u00e9n', lng: '-70.5333333', lat: '-33.4833333', }, { name: '\u00d1u\u00f1oa', lng: '-70.6000000', lat: '-33.4666667', }, { name: 'Macul', lng: '-70.5666667', lat: '-33.5000000', }, { name: '<NAME>', lng: '-70.7166667', lat: '-33.5333333', }, { name: '<NAME>', lng: '-70.5833333', lat: '-33.4166667', }, { name: '<NAME>', lng: '-70.6166667', lat: '-33.5833333', }, { name: 'La Florida', lng: '-70.5666667', lat: '-33.5500000', }, { name: 'Independencia', lng: '-70.6549320', lat: '-33.4219880', }, { name: 'Estaci\u00f3<NAME>', lng: '-70.7029760', lat: '-33.4633150', }, { name: 'Conchal\u00ed', lng: '-70.6166667', lat: '-33.3500000', }, { name: 'Cerrillos', lng: '-70.7000000', lat: '-33.4833333', }, { name: 'Arica', lng: '-70.3144444', lat: '-18.4750000', }, { name: 'Camarones', lng: '-69.8666667', lat: '-19.0166667', }, { name: 'Putre', lng: '-69.5977778', lat: '-18.1916667', }, { name: '<NAME>', lng: '-69.5000000', lat: '-17.5666667', }, { name: 'Iquique', lng: '-70.1666667', lat: '-20.2166667', }, { name: '<NAME>', lng: '-70.1166667', lat: '-20.2500000', }, { name: '<NAME>', lng: '-69.7833333', lat: '-20.2666667', }, { name: 'Cami\u00f1a', lng: '-69.4166667', lat: '-19.3000000', }, { name: 'Colchane', lng: '-68.6166667', lat: '-19.2666667', }, { name: 'Huara', lng: '-69.7666667', lat: '-19.9666667', }, { name: 'Pica', lng: '-69.3333333', lat: '-20.5000000', }, { name: 'Antofagasta', lng: '-70.4000000', lat: '-23.6333333', }, { name: 'Mejillones', lng: '-70.4500000', lat: '-23.1000000', }, { name: '<NAME>', lng: '-69.3166667', lat: '-22.8833333', }, { name: 'Taltal', lng: '-69.7666667', lat: '-25.2833333', }, { name: 'Calama', lng: '-68.9166667', lat: '-22.4666667', }, { name: 'Ollague', lng: '-68.2666667', lat: '-21.2166667', }, { name: '<NAME>', lng: '-68.2166667', lat: '-22.9166667', }, { name: 'Mar\u00<NAME>', lng: '-69.6666667', lat: '-22.3500000', }, { name: 'Tocopilla', lng: '-70.2000000', lat: '-22.0666667', }, { name: 'Copiap\u00f3', lng: '-70.3166667', lat: '-27.3666667', }, { name: 'Caldera', lng: '-70.8166667', lat: '-27.0666667', }, { name: '<NAME>', lng: '-70.2666667', lat: '-27.4666667', }, { name: 'Cha\u00f1aral', lng: '-70.6000000', lat: '-26.3333333', }, { name: '<NAME>', lng: '-70.0500000', lat: '-26.3666667', }, { name: 'Vallenar', lng: '-70.7500000', lat: '-28.5666667', }, { name: '<NAME>', lng: '-70.4622222', lat: '-28.9336111', }, { name: 'Freirina', lng: '-71.0666667', lat: '-28.5000000', }, { name: 'Huasco', lng: '-71.2166667', lat: '-28.4500000', }, { name: 'R\u00<NAME>', lng: '-70.7000000', lat: '-30.2666667', }, { name: '<NAME>', lng: '-70.9333333', lat: '-30.6833333', }, { name: 'Ovalle', lng: '-71.2000000', lat: '-30.5833333', }, { name: '<NAME>', lng: '-71.5166667', lat: '-31.9000000', }, { name: 'Illapel', lng: '-71.1500000', lat: '-31.6166667', }, { name: 'Paiguano', lng: '-70.5166667', lat: '-30.0166667', }, { name: 'Andacollo', lng: '-71.0833333', lat: '-30.2166667', }, { name: '<NAME>', lng: '-71.2500000', lat: '-29.9000000', }, { name: 'Punitaqui', lng: '-71.2666667', lat: '-30.9000000', }, { name: 'Combarbal\u00e1', lng: '-71.0500000', lat: '-31.1666667', }, { name: 'Salamanca', lng: '-70.9666667', lat: '-31.7666667', }, { name: 'Canela', lng: '-71.4500000', lat: '-31.4000000', }, { name: 'Vicu\u00f1a', lng: '-70.7000000', lat: '-30.0166667', }, { name: '<NAME>', lng: '-71.2666667', lat: '-29.5000000', }, { name: 'Coquimbo', lng: '-71.3333333', lat: '-29.9500000', }, { name: 'Valpara\u00edso', lng: '-71.6163889', lat: '-33.0458333', }, { name: 'Conc\u00f3n', lng: '-71.5166667', lat: '-32.9166667', }, { name: 'Puchuncav\u00ed', lng: '-71.4166667', lat: '-32.7333333', }, { name: '<NAME>', lng: '-70.6166667', lat: '-32.8166667', }, { name: 'Vi\u0<NAME>', lng: '-71.6196749', lat: '-33.0458456', }, { name: 'Rinconada', lng: '-70.7000000', lat: '-32.8333333', }, { name: '<NAME>', lng: '-71.2166667', lat: '-32.4500000', }, { name: 'Papudo', lng: '-71.4500000', lat: '-32.5166667', }, { name: 'Zapallar', lng: '-71.4666667', lat: '-32.5333333', }, { name: 'Calera', lng: '-71.2166667', lat: '-32.7833333', }, { name: '<NAME>', lng: '-71.6166667', lat: '-33.6000000', }, { name: 'Cartagena', lng: '-71.6000000', lat: '-33.5500000', }, { name: '<NAME>', lng: '-71.6666667', lat: '-33.4500000', }, { name: '<NAME>', lng: '-70.7333333', lat: '-32.7500000', }, { name: 'Llaillay', lng: '-70.9666667', lat: '-32.8500000', }, { name: '<NAME>', lng: '-71.2333333', lat: '-32.8166667', }, { name: '<NAME>', lng: '-71.3666667', lat: '-33.0500000', }, { name: 'Limache', lng: '-71.2833333', lat: '-32.9833333', }, { name: 'Putaendo', lng: '-70.7333333', lat: '-32.6333333', }, { name: 'Olmu\u00e9', lng: '-71.2000000', lat: '-33.0000000', }, { name: 'Quilpu\u00e9', lng: '-71.4500000', lat: '-33.0500000', }, { name: '<NAME>\u00eda', lng: '-70.6666667', lat: '-32.7500000', }, { name: 'Panquehue', lng: '-70.8333333', lat: '-32.8000000', }, { name: 'Catemu', lng: '-71.0333333', lat: '-32.6333333', }, { name: '<NAME>', lng: '-71.6500000', lat: '-33.6333333', }, { name: '<NAME>', lng: '-71.7000000', lat: '-33.4000000', }, { name: 'Algarrobo', lng: '-71.6927778', lat: '-33.3911111', }, { name: 'Nogales', lng: '-71.2333333', lat: '-32.7166667', }, { name: 'Hijuelas', lng: '-71.1666667', lat: '-32.8000000', }, { name: 'Quillota', lng: '-71.2666667', lat: '-32.8833333', }, { name: 'Petorca', lng: '-70.9333333', lat: '-32.2500000', }, { name: 'Cabildo', lng: '-71.1333333', lat: '-32.4166667', }, { name: '<NAME>', lng: '-70.5833333', lat: '-32.8000000', }, { name: '<NAME>', lng: '-70.6333333', lat: '-32.8500000', }, { name: '<NAME>', lng: '-109.3750000', lat: '-27.0833333', }, { name: 'Quintero', lng: '-71.5333333', lat: '-32.7833333', }, { name: '<NAME>\u00e1ndez', lng: '-78.8666667', lat: '-33.6166667', }, { name: 'Casablanca', lng: '-71.4166667', lat: '-33.3166667', }, { name: 'Rancagua', lng: '-70.7397222', lat: '-34.1652778', }, { name: 'Coinco', lng: '-70.9666667', lat: '-34.2666667', }, { name: 'Do\u00f1ihue', lng: '-70.9666667', lat: '-34.2333333', }, { name: '<NAME>', lng: '-71.3166667', lat: '-34.3000000', }, { name: 'Malloa', lng: '-70.9500000', lat: '-34.4500000', }, { name: 'Olivar', lng: '-70.8175000', lat: '-34.2100000', }, { name: '<NAME>', lng: '-71.1333333', lat: '-34.5000000', }, { name: 'Marchihue', lng: '-71.6333333', lat: '-34.4000000', }, { name: 'Paredones', lng: '-71.1666667', lat: '-34.7833333', }, { name: 'Ch\u00e9pica', lng: '-71.2833333', lat: '-34.7333333', }, { name: 'Lolol', lng: '-71.6447222', lat: '-34.7286111', }, { name: 'Palmilla', lng: '-71.3666667', lat: '-34.6000000', }, { name: '<NAME>', lng: '-71.3666667', lat: '-34.6333333', }, { name: 'Placilla', lng: '-71.1166667', lat: '-34.6333333', }, { name: '<NAME>', lng: '-71.6666667', lat: '-34.2000000', }, { name: 'Rengo', lng: '-70.8666667', lat: '-34.4166667', }, { name: 'Pichidegua', lng: '-71.3000000', lat: '-34.3500000', }, { name: 'Pumanque', lng: '-71.6666667', lat: '-34.6000000', }, { name: 'Peralillo', lng: '-71.4833333', lat: '-34.4833333', }, { name: 'Nancagua', lng: '-71.2166667', lat: '-34.6666667', }, { name: 'Chimbarongo', lng: '-71.0500000', lat: '-34.7000000', }, { name: '<NAME>', lng: '-70.9666667', lat: '-34.5833333', }, { name: 'Navidad', lng: '-71.8333333', lat: '-33.9333333', }, { name: 'Litueche', lng: '-71.7333333', lat: '-34.1166667', }, { name: 'Pichilemu', lng: '-72.0000000', lat: '-34.3833333', }, { name: 'Requ\u00ednoa', lng: '-70.8333333', lat: '-34.2833333', }, { name: '<NAME>', lng: '-70.9833333', lat: '-34.3500000', }, { name: 'Peumo', lng: '-71.1666667', lat: '-34.4000000', }, { name: 'Mostazal', lng: '-70.7000000', lat: '-33.9833333', }, { name: 'Machal\u00ed', lng: '-70.6511111', lat: '-34.1825000', }, { name: 'Graneros', lng: '-70.7266667', lat: '-34.0647222', }, { name: 'Coltauco', lng: '-71.0857230', lat: '34.2872290', }, { name: 'Codegua', lng: '-70.6666667', lat: '-34.0333333', }, { name: 'Talca', lng: '-71.6666667', lat: '-35.4333333', }, { name: 'Curepto', lng: '-72.0166667', lat: '-35.0833333', }, { name: 'Maule', lng: '-71.7000000', lat: '-35.5333333', }, { name: 'Pencahue', lng: '-71.8166667', lat: '-35.4000000', }, { name: '<NAME>', lng: '-71.4833333', lat: '-35.5500000', }, { name: 'Cauquenes', lng: '-72.3500000', lat: '-35.9666667', }, { name: 'Pelluhue', lng: '-72.6333333', lat: '-35.8333333', }, { name: 'Huala\u00f1\u00e9', lng: '-71.8047222', lat: '-34.9766667', }, { name: 'Molina', lng: '-71.2833333', lat: '-34.1166667', }, { name: 'Romeral', lng: '-71.1333333', lat: '-34.9666667', }, { name: 'Teno', lng: '-71.1833333', lat: '-34.8666667', }, { name: 'Linares', lng: '-71.6000000', lat: '-35.8500000', }, { name: 'Longav\u00ed', lng: '-71.6833333', lat: '-35.9666667', }, { name: 'Retiro', lng: '-71.7666667', lat: '-36.0500000', }, { name: '<NAME>', lng: '-71.7500000', lat: '-35.6666667', }, { name: 'Constituci\u00f3n', lng: '-72.4166667', lat: '-35.3333333', }, { name: 'Empedrado', lng: '-72.2833333', lat: '-35.6000000', }, { name: 'Pelarco', lng: '-71.4500000', lat: '-35.3833333', }, { name: 'R\u00<NAME>', lng: '-71.2666667', lat: '-35.2833333', }, { name: '<NAME>', lng: '-71.5333333', lat: '-35.3166667', }, { name: 'Curic\u00f3', lng: '-71.2333333', lat: '-34.9833333', }, { name: 'Chanco', lng: '-72.5333333', lat: '-35.7333333', }, { name: 'Licant\u00e9n', lng: '-72.0000000', lat: '-34.9833333', }, { name: 'Rauco', lng: '-71.3166667', lat: '-34.9333333', }, { name: '<NAME>', lng: '-71.3833333', lat: '-35.0000000', }, { name: 'Vichuqu\u00e9n', lng: '-72.0000000', lat: '-34.8833333', }, { name: 'Colb\u00fan', lng: '-71.4166667', lat: '-35.7000000', }, { name: 'Parral', lng: '-71.8333333', lat: '-36.1500000', }, { name: '<NAME>', lng: '-71.7500000', lat: '-35.6000000', }, { name: '<NAME>', lng: '-71.5833333', lat: '-35.7500000', }, { name: 'Concepci\u00f3n', lng: '-73.0500000', lat: '-36.8333333', }, { name: 'Chiguayante', lng: '-73.0166667', lat: '-36.9166667', }, { name: 'Hualqui', lng: '-72.9333333', lat: '-36.9666667', }, { name: 'Penco', lng: '-72.9833333', lat: '-36.7333333', }, { name: '<NAME>', lng: '-72.9333333', lat: '-37.1666667', }, { name: 'Tom\u00e9', lng: '-72.9500000', lat: '-36.6166667', }, { name: 'Lebu', lng: '-73.6500000', lat: '-37.6166667', }, { name: 'Ca\u00f1ete', lng: '-73.3833333', lat: '-37.8000000', }, { name: 'Curanilahue', lng: '-73.3500000', lat: '-37.4666667', }, { name: 'Tir\u00faa', lng: '-73.5000000', lat: '-38.3333333', }, { name: 'Antuco', lng: '-71.6833333', lat: '-37.3333333', }, { name: 'Laja', lng: '-72.7000000', lat: '-37.2666667', }, { name: 'Nacimiento', lng: '-72.6666667', lat: '-37.5000000', }, { name: 'Quilaco', lng: '-71.9833333', lat: '-37.6666667', }, { name: '<NAME>', lng: '-72.7166667', lat: '-37.2666667', }, { name: 'Tucapel', lng: '-71.9500000', lat: '-37.2833333', }, { name: '<NAME>\u00edo', lng: '-71.3166667', lat: '-38.0500000', }, { name: 'Bulnes', lng: '-72.3014290', lat: '-36.7419870', }, { name: 'Coelemu', lng: '-72.7000000', lat: '-36.4833333', }, { name: 'Chill\u00e1<NAME>', lng: '-72.1333333', lat: '-36.6166667', }, { name: 'Ninhue', lng: '-72.4000000', lat: '-36.4000000', }, { name: 'Pemuco', lng: '-72.1000000', lat: '-36.9666667', }, { name: 'Portezuelo', lng: '-72.4333333', lat: '-36.5333333', }, { name: 'Quirihue', lng: '-72.5333333', lat: '-36.2833333', }, { name: 'Treguaco', lng: '-72.6666667', lat: '-36.4333333', }, { name: '<NAME>', lng: '-72.0333333', lat: '-36.8000000', }, { name: '<NAME>', lng: '-71.9580556', lat: '-36.4247222', }, { name: 'Yungay', lng: '-72.0166667', lat: '-37.1166667', }, { name: '<NAME>\u00e1s', lng: '-72.2166667', lat: '-36.5000000', }, { name: '<NAME>\u00e1n', lng: '-71.5500000', lat: '-36.5500000', }, { name: 'R\u00e1nquil', lng: '-72.5500000', lat: '-36.6500000', }, { name: 'Quill\u00f3n', lng: '-72.4666667', lat: '-36.7333333', }, { name: 'Pinto', lng: '-71.9000000', lat: '-36.7000000', }, { name: '\u00d1iqu\u00e9n', lng: '-71.9000000', lat: '-36.3000000', }, { name: '<NAME>', lng: '-72.0323130', lat: '-36.8994440', }, { name: 'Coihueco', lng: '-71.8333333', lat: '-36.6166667', }, { name: 'Cobquecura', lng: '-72.7833333', lat: '-36.1333333', }, { name: 'Chill\u00e1n', lng: '-72.1166667', lat: '-36.6000000', }, { name: 'Yumbel', lng: '-72.5333333', lat: '-37.1333333', }, { name: '<NAME>\u00e1rbara', lng: '-72.0166667', lat: '-37.6666667', }, { name: 'Quilleco', lng: '-71.9666667', lat: '-37.4666667', }, { name: 'Negrete', lng: '-72.5166667', lat: '-37.5833333', }, { name: 'Mulch\u00e9n', lng: '-72.2333333', lat: '-37.7166667', }, { name: 'Cabrero', lng: '-72.4000000', lat: '-37.0333333', }, { name: 'Los Angeles', lng: '-72.3500000', lat: '-37.4666667', }, { name: 'Los Alamos', lng: '-73.4666667', lat: '-37.6166667', }, { name: 'Contulmo', lng: '-73.2333333', lat: '-38.0000000', }, { name: 'Arauco', lng: '-73.3166667', lat: '-37.2500000', }, { name: 'Hualp\u00e9n', lng: '-73.0833333', lat: '-36.7833333', }, { name: 'Talcahuano', lng: '-73.1166667', lat: '-36.7166667', }, { name: '<NAME>', lng: '-73.1166667', lat: '-36.8333333', }, { name: 'Lota', lng: '-73.1560560', lat: '-37.0870730', }, { name: 'Florida', lng: '-72.6666667', lat: '-36.8166667', }, { name: 'Coronel', lng: '-73.1333333', lat: '-37.0166667', }, { name: 'Temuco', lng: '-72.6666667', lat: '-38.7500000', }, { name: 'Cunco', lng: '-72.0333333', lat: '-38.9166667', }, { name: 'Freire', lng: '-72.6333333', lat: '-38.9500000', }, { name: 'Gorbea', lng: '-72.6833333', lat: '-39.1000000', }, { name: 'Loncoche', lng: '-72.6333333', lat: '-39.3666667', }, { name: '<NAME>', lng: '-72.9500000', lat: '-38.7333333', }, { name: 'Perquenco', lng: '-72.3833333', lat: '-38.4166667', }, { name: 'Puc\u00f3n', lng: '-71.9666667', lat: '-39.2666667', }, { name: '<NAME>', lng: '-73.0500000', lat: '-38.9666667', }, { name: 'Vilc\u00fan', lng: '-72.3794444', lat: '-39.1183333', }, { name: 'Cholchol', lng: '-72.8500000', lat: '-38.6000000', }, { name: 'Collipulli', lng: '-72.4333333', lat: '-37.9500000', }, { name: 'Ercilla', lng: '-72.3833333', lat: '-38.0500000', }, { name: '<NAME>', lng: '-72.8333333', lat: '-37.9666667', }, { name: 'Pur\u00e9n', lng: '-73.0833333', lat: '-38.0166667', }, { name: 'Traigu\u00e9n', lng: '-72.6833333', lat: '-38.2500000', }, { name: 'Carahue', lng: '-73.1666667', lat: '-38.7000000', }, { name: 'Curarrehue', lng: '-71.5833333', lat: '-39.3500000', }, { name: 'Galvarino', lng: '-72.7833333', lat: '-38.4000000', }, { name: 'Lautaro', lng: '-72.4350000', lat: '-38.5291667', }, { name: '<NAME>', lng: '-72.6000000', lat: '-38.7666667', }, { name: 'Pitrufqu\u00e9n', lng: '-72.6500000', lat: '-38.9833333', }, { name: 'Tolt\u00e9n', lng: '-73.2333333', lat: '-39.2166667', }, { name: 'Villarrica', lng: '-72.2166667', lat: '-39.2666667', }, { name: 'Angol', lng: '-72.7166667', lat: '-37.8000000', }, { name: 'Curacaut\u00edn', lng: '-71.8833333', lat: '-38.4333333', }, { name: 'Lonquimay', lng: '-71.2333333', lat: '-38.4333333', }, { name: 'Lumaco', lng: '-72.9166667', lat: '-38.1500000', }, { name: 'Renaico', lng: '-72.5833333', lat: '-37.6666667', }, { name: 'Victoria', lng: '-72.3333333', lat: '-38.2166667', }, { name: 'Saavedra', lng: '-73.4000000', lat: '-38.7833333', }, { name: 'Melipeuco', lng: '-71.7000000', lat: '-38.8500000', }, { name: 'Valdivia', lng: '-73.2333333', lat: '-39.8000000', }, { name: 'Corral', lng: '-73.4333333', lat: '-39.8666667', }, { name: 'Lanco', lng: '-72.7666667', lat: '-39.4333333', }, { name: '<NAME>', lng: '-72.8333333', lat: '-39.8500000', }, { name: 'M\u00e1fil', lng: '-72.9500000', lat: '-39.6500000', }, { name: 'Mariquina', lng: '-72.9666667', lat: '-39.5166667', }, { name: 'Paillaco', lng: '-72.8833333', lat: '-40.0666667', }, { name: 'Panguipulli', lng: '-72.3333333', lat: '-39.6333333', }, { name: '<NAME>\u00f3n', lng: '-73.0833333', lat: '-40.2833333', }, { name: 'Futrono', lng: '-72.4000000', lat: '-40.1333333', }, { name: '<NAME>', lng: '-72.5000000', lat: '-40.3166667', }, { name: '<NAME>', lng: '-72.9666667', lat: '-40.3166667', }, { name: '<NAME>', lng: '-72.9333333', lat: '-41.4666667', }, { name: 'Cocham\u00f3', lng: '-72.3166667', lat: '-41.5000000', }, { name: 'Frutillar', lng: '-73.1000000', lat: '-41.1166667', }, { name: '<NAME>', lng: '-72.9833333', lat: '-41.3166667', }, { name: 'Ancud', lng: '-73.8333333', lat: '-41.8666667', }, { name: '<NAME>\u00e9lez', lng: '-73.5833333', lat: '-42.4333333', }, { name: 'Puqueld\u00f3n', lng: '-73.6333333', lat: '-42.5833333', }, { name: 'Quell\u00f3n', lng: '-73.6000000', lat: '-43.1000000', }, { name: 'Quinchao', lng: '-73.4166667', lat: '-42.5333333', }, { name: '<NAME>', lng: '-72.9000000', lat: '-40.9666667', }, { name: 'Puyehue', lng: '-72.6166667', lat: '-40.6666667', }, { name: 'Hualaihu\u00e9', lng: '-72.6833333', lat: '-42.0166667', }, { name: 'Chait\u00e9n', lng: '-72.7088889', lat: '-42.9194444', }, { name: '<NAME>', lng: '-73.4000000', lat: '-40.5166667', }, { name: 'Llanquihue', lng: '-73.0166667', lat: '-41.2500000', }, { name: 'Calbuco', lng: '-73.1333333', lat: '-41.7666667', }, { name: 'Fresia', lng: '-73.4500000', lat: '-41.1500000', }, { name: '<NAME>', lng: '-73.4833333', lat: '-41.4000000', }, { name: 'Maull\u00edn', lng: '-73.6000000', lat: '-41.6166667', }, { name: 'Castro', lng: '-73.8000000', lat: '-42.4666667', }, { name: 'Queil\u00e9n', lng: '-73.4666667', lat: '-42.8666667', }, { name: 'Quemchi', lng: '-73.5166667', lat: '-42.1333333', }, { name: 'Osorno', lng: '-73.1500000', lat: '-40.5666667', }, { name: 'Purranque', lng: '-73.1666667', lat: '-40.9166667', }, { name: 'R\u00<NAME>', lng: '-73.2333333', lat: '-40.7833333', }, { name: '<NAME>', lng: '-73.0166667', lat: '-40.4000000', }, { name: 'Futaleuf\u00fa', lng: '-71.8500000', lat: '-43.1666667', }, { name: 'Palena', lng: '-71.8000000', lat: '-43.6166667', }, { name: 'Dalcahue', lng: '-73.7000000', lat: '-42.3666667', }, { name: 'Chonchi', lng: '-73.8166667', lat: '-42.6166667', }, { name: 'Coyhaique', lng: '-72.0666667', lat: '-45.5666667', }, { name: 'Ais\u00e9n', lng: '-72.7000000', lat: '-45.4000000', }, { name: 'Guaitecas', lng: '-73.7333333', lat: '-43.8833333', }, { name: "O'Higgins", lng: '-72.5666667', lat: '-48.4666667', }, { name: '<NAME>', lng: '-71.7333333', lat: '-46.5500000', }, { name: 'Verde', lng: '-71.8333333', lat: '-44.2333333', }, { name: 'Cisnes', lng: '-72.7000000', lat: '-44.7500000', }, { name: 'Cochrane', lng: '-72.5500000', lat: '-47.2666667', }, { name: 'Tortel', lng: '-73.5666667', lat: '-47.8333333', }, { name: 'R\u00<NAME>\u00e1\u00f1ez', lng: '-71.9333333', lat: '-46.3000000', }, { name: '<NAME>', lng: '-70.9336111', lat: '-53.1669444', }, { name: 'R\u00<NAME>', lng: '-71.4833333', lat: '-52.6500000', }, { name: '<NAME> (Ex-Navarino)', lng: '-67.6166667', lat: '-54.9333333', }, { name: 'Porvenir', lng: '-70.3666667', lat: '-53.3000000', }, { name: 'Timaukel', lng: '-69.9000000', lat: '-53.6666667', }, { name: '<NAME>', lng: '-72.3500000', lat: '-51.2666667', }, { name: 'Natales', lng: '-72.5166667', lat: '-51.7333333', }, { name: 'Primavera', lng: '-69.2500000', lat: '-52.7166667', }, { name: 'Ant\u00e1rtica', lng: '-71.5000000', lat: '-75.0000000', }, { name: '<NAME>', lng: '-69.6833333', lat: '-52.3166667', }, { name: '<NAME>', lng: '-71.9166667', lat: '-52.2500000', }, ]; constructor() {} getCiudades(): Ciudad[] { return this.ciudades; } } export interface Ciudad { name: string; lng: string; lat: string; }
<reponame>josedr120/Java-Practice package Assigments1; import java.util.Scanner; interface manipulate { void get_total(); void DisplayMarklist(); } public class Practice6 { public static void main(String[] args) { new MarkSheet().DisplayMarklist(); } } class Student { private int Student_id; private String Student_name; public int getStudent_id() { return Student_id; } public void setStudent_id(int student_id) { Student_id = student_id; } public String getStudent_name() { return Student_name; } public void setStudent_name(String student_name) { Student_name = student_name; } } class MarkSheet implements manipulate { Scanner in = new Scanner(System.in); Student obj = new Student(); int id = obj.getStudent_id(); String name = obj.getStudent_name(); int mark1, mark2, mark3, mark4; @Override public void DisplayMarklist() { System.out.print("Enter ID: "); id = in.nextInt(); System.out.print("Enter Name: "); name = in.next(); System.out.print("Enter Mark 1: "); mark1 = in.nextInt(); System.out.print("Enter Mark 2: "); mark2 = in.nextInt(); System.out.print("Enter Mark 3: "); mark3 = in.nextInt(); System.out.print("Enter Mark 4: "); mark4 = in.nextInt(); System.out.println("Student ID: " + id); System.out.println("Student Name: " + name); System.out.println("Subject 1: " + mark1); System.out.println("Subject 2: " + mark2); System.out.println("Subject 3: " + mark3); System.out.println("Subject 4: " + mark4); get_total(); } @Override public void get_total() { int total = mark1 + mark2 + mark3 + mark4; System.out.println("Total: " + total); } }
package org.jboss.resteasy.plugins.providers.jaxb.json; import org.jboss.resteasy.plugins.providers.jaxb.JAXBUnmarshalException; import org.jboss.resteasy.plugins.providers.jaxb.json.i18n.Messages; import java.io.IOException; import java.io.Reader; /** * @author <a href="mailto:<EMAIL>"><NAME></a> * @version $Revision: 1 $ */ public class JsonParsing { public static String extractJsonMapString(Reader reader) throws IOException { int openBrace = 1; boolean quote = false; boolean backslash = false; int i = reader.read(); char c = (char) i; StringBuffer buffer = new StringBuffer(); if (c != '{') throw new JAXBUnmarshalException(Messages.MESSAGES.expectingLeftBraceJsonMap()); buffer.append(c); do { i = reader.read(); if (i == -1) throw new JAXBUnmarshalException(Messages.MESSAGES.unexpectedEndOfStream()); c = (char) i; buffer.append(c); if (backslash) { backslash = false; } else { switch (c) { case '"': { quote = !quote; break; } case '{': { if (!quote) openBrace++; break; } case '}': { if (!quote) openBrace--; break; } case '\\': { backslash = true; break; } } } } while (openBrace > 0); return buffer.toString(); } public static String getJsonString(Reader reader) throws IOException { boolean quote = true; boolean backslash = false; int i = reader.read(); char c = (char) i; StringBuffer buffer = new StringBuffer(); if (c != '"') throw new JAXBUnmarshalException(Messages.MESSAGES.expectingQuote()); do { i = reader.read(); if (i == -1) throw new JAXBUnmarshalException(Messages.MESSAGES.unexpectedEndOfStream()); c = (char) i; if (backslash) { buffer.append(c); backslash = false; } else { switch (c) { case '"': { quote = false; break; } case '\\': { backslash = true; break; } default: buffer.append(c); break; } } } while (quote); return buffer.toString(); } protected static char eatWhitspace(Reader buffer, boolean reset) throws IOException { int i; char c; do { buffer.mark(2); i = buffer.read(); if (i == -1) throw new JAXBUnmarshalException(Messages.MESSAGES.unexpectedEndOfJsonInput()); c = (char) i; } while (Character.isWhitespace(c)); if (reset) buffer.reset(); return c; } }
// ListCollectionNames implements the IDatabase.ListCollectionNames method. func (d *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) { res, err := d.ListCollections(ctx, filter, opts...) if err != nil { return nil, err } csr := res.(*Cursor) names := make([]string, 0) for _, doc := range csr.list { names = append(names, bsonkit.Get(doc, "name").(string)) } return names, nil }
package descriptions // UptimeDesc : const UptimeDesc = `SSHes into a node and runs "uptime" EXAMPLES ======== $ waggle uptime -n 004 { "edge processor": [ " 17:05:43 up 8 days, 5 min, 1 user, load average: 0.81, 0.51, 0.40" ], "node controller": [ " 17:05:41 up 54 days, 1:57, 1 user, load average: 0.32, 0.34, 0.32" ], "wagman": [ "10356146" ] } $ waggle uptime -n 004 -s nc { "node controller": [ " 17:06:19 up 54 days, 1:57, 1 user, load average: 0.18, 0.30, 0.31" ] } `
def _do_getversion(self, client, helper=False): cachedVersion = None if not self.opts.noGetVersionCache: cachedVersion = self._get_cached_getversion(client) if self.opts.noGetVersionCache or cachedVersion is None or (self.opts.GetVersionCacheOldestDate and cachedVersion['timestamp'] < self.opts.GetVersionCacheOldestDate): self.logger.debug("Actually calling GetVersion") if self.opts.noGetVersionCache: self.logger.debug(" ... opts.noGetVersionCache set") elif cachedVersion is None: self.logger.debug(" ... cachedVersion was None") failMsg = "GetVersion at %s" % (str(client.str)) if helper: failMsg = "Check AM properties at %s" % (str(client.str)) if self.opts.api_version >= 2: options = self._build_options("GetVersion", None, None) if len(options.keys()) == 0: (thisVersion, message) = _do_ssl(self.framework, None, failMsg, client.GetVersion) else: (thisVersion, message) = _do_ssl(self.framework, None, failMsg, client.GetVersion, options) else: (thisVersion, message) = _do_ssl(self.framework, None, failMsg, client.GetVersion) message = _append_geni_error_output(thisVersion, message) self._cache_getversion(client, thisVersion, message) else: self.logger.debug("Pulling GetVersion from cache") thisVersion = cachedVersion['version'] message = "From cached result from %s" % cachedVersion['timestamp'] return (thisVersion, message)
package shared import ( "reflect" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" ocache "github.com/openshift/origin/pkg/client/cache" quotaapi "github.com/openshift/origin/pkg/quota/api" ) type ClusterResourceQuotaInformer interface { Informer() cache.SharedIndexInformer // still use an indexer, no telling what someone will want to index on someday Indexer() cache.Indexer Lister() *ocache.IndexerToClusterResourceQuotaLister } // clusterResourceQuotaInformer is a core informer because quota needs to be working before the "ensure" // steps in the API server can complete type clusterResourceQuotaInformer struct { *sharedInformerFactory } func (f *clusterResourceQuotaInformer) Informer() cache.SharedIndexInformer { f.lock.Lock() defer f.lock.Unlock() informerObj := &quotaapi.ClusterResourceQuota{} informerType := reflect.TypeOf(informerObj) informer, exists := f.coreInformers[informerType] if exists { return informer } lw := f.customListerWatchers.GetListerWatcher(kapi.Resource("clusterresourcequotas")) if lw == nil { lw = &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return f.originClient.ClusterResourceQuotas().List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return f.originClient.ClusterResourceQuotas().Watch(options) }, } } informer = cache.NewSharedIndexInformer( lw, informerObj, f.defaultResync, cache.Indexers{}, ) f.coreInformers[informerType] = informer return informer } func (f *clusterResourceQuotaInformer) Indexer() cache.Indexer { informer := f.Informer() return informer.GetIndexer() } func (f *clusterResourceQuotaInformer) Lister() *ocache.IndexerToClusterResourceQuotaLister { return &ocache.IndexerToClusterResourceQuotaLister{Indexer: f.Indexer()} }
import functools from typing import Union, Optional, Tuple import dace from dace import SDFGState, nodes as nd, SDFG, dtypes, data as dt from daceml.onnx import environments from daceml.onnx.converters import clean_onnx_name from daceml.onnx.forward_implementation_abc import ONNXForward from daceml.onnx.nodes import onnx_op from daceml.onnx.op_implementations import op_implementation, empty_sdfg_for_node from daceml.util import in_desc_with_name, out_desc_with_name, remove_output_connector def _prod(sequence): return functools.reduce(lambda a, b: a * b, sequence, 1) def _iterables_equal(a, b) -> bool: if len(a) != len(b): return False return all(x == y for x, y in zip(a, b)) def _get_tensor_layout(desc: dt.Array) -> Optional[str]: """ Detect the layout of a 4d tensor. :param desc: the tensor. :return: "NCHW", "NHWC" or None. """ if len(desc.shape) == 1: # just return anything return "NCHW" if len(desc.shape) != 4: raise ValueError("Tensor with dimension != 4 is not supported") # in ONNX, tensor the dimensions are ordered N C H W # strides that the contiguous tensor would have cont_strides = [_prod(desc.shape[i + 1:]) for i in range(4)] nhwc_shape = [desc.shape[0], desc.shape[3], desc.shape[1], desc.shape[2]] # strides that a nhwc tensor would have if it was contiguous nhwc_contiguous_strides = [_prod(nhwc_shape[i + 1:]) for i in range(4)] # strides that the nhwc tensor would have if viewed as a nchw tensor nhwc_reshaped_strides = [ nhwc_contiguous_strides[0], nhwc_contiguous_strides[3], nhwc_contiguous_strides[1], nhwc_contiguous_strides[2] ] if _iterables_equal(desc.strides, cont_strides): return "NCHW" elif _iterables_equal(desc.strides, nhwc_reshaped_strides): return "NHWC" else: return None def _cudnn_tensor_descriptor_code(desc: dt.Array, state_field_name: str, filter: bool) -> Tuple[str, str]: """ Emit the cudnn code for the tensor descriptor for a given dace descriptor. :param desc: the descriptor of the dace tensor. :param state_field_name: the name of the pointer variable where the descriptor should be stored. :param filter: True if the tensor is a filter. :return: the init and exit code """ # detect layout layout = _get_tensor_layout(desc) if len(desc.shape) == 4: shape = desc.shape elif len(desc.shape) < 4: shape = list(desc.shape) + [1] * (4 - len(desc.shape)) else: raise ValueError("Tensor with dimension > 4 is not supported") assert layout is not None, "layout changed after can_be_applied" f_or_t_str = 'Filter' if filter else 'Tensor' layout_str = f"CUDNN_TENSOR_{layout}" dtype_str = _DACE_DTYPE_TO_CUDNN_DTYPE[desc.dtype] init_code = f""" __state->{state_field_name} = new cudnn{f_or_t_str}Descriptor_t; daceml::cudnn::CheckCudnnError(cudnnCreate{f_or_t_str}Descriptor(__state->{state_field_name})); daceml::cudnn::CheckCudnnError(cudnnSet{f_or_t_str}4dDescriptor( *__state->{state_field_name}, {dtype_str if filter else layout_str}, {layout_str if filter else dtype_str}, {",".join(str(s) for s in shape)} )); """ exit_code = f"""\ daceml::cudnn::CheckCudnnError(cudnnDestroy{f_or_t_str}Descriptor(*__state->{state_field_name})); delete __state->{state_field_name}; """ return init_code, exit_code _DACE_DTYPE_TO_CUDNN_DTYPE = { dace.float32: "CUDNN_DATA_FLOAT", dace.float64: "CUDNN_DATA_DOUBLE", dace.uint8: "CUDNN_DATA_UINT8", dace.int8: "CUDNN_DATA_INT8", dace.int32: "CUDNN_DATA_INT32", } @op_implementation(op="Conv", name="cuDNN") class CudnnConvolution(ONNXForward): """ Convolution implementation that uses cuDNN. This node will check for the existence of a _algorithm attribute on the ONNXConv node it is expanding. If this attribute does not exist, it will use `CudnnConvolution.default_algorithm`. """ default_algorithm = "gemm" # choices for algorithms algorithms = [ "implicit_gemm", "implicit_precomp_gemm", "gemm", "direct", "fft", "fft_tiling", "winograd", "winograd_nonfused", ] environments = [] @staticmethod def forward_can_be_applied(node: onnx_op.ONNXOp, state: SDFGState, sdfg: SDFG) -> bool: descs = [("X", in_desc_with_name(node, state, sdfg, "X")), ("W", in_desc_with_name(node, state, sdfg, "W")), ("Y", out_desc_with_name(node, state, sdfg, "Y"))] if "B" in node.in_connectors: descs.append(("B", in_desc_with_name(node, state, sdfg, "B"))) for name, desc in descs: # check that the dtype is supported by cudnn if desc.dtype not in [ dace.float32, dace.float64, dace.uint8, dace.int8, dace.int32 ]: return False # only 2d convs for now; ONNX supports N dimensional if name != "B" and len(desc.shape) != 4: return False if not isinstance(desc, dt.Array): return False # check that the layout is supported by cudnn if name != "B" and _get_tensor_layout(desc) is None: return False # padding must be symmetric if node.pads[0] != node.pads[2]: return False if node.pads[1] != node.pads[3]: return False return True @staticmethod def forward(node: onnx_op.ONNXOp, state: SDFGState, sdfg: SDFG) -> Union[nd.Node, SDFG]: nsdfg, nstate, inputs, outputs = empty_sdfg_for_node(sdfg, state, node) if "B" in inputs: nstate.remove_node(inputs["B"]) Y = out_desc_with_name(node, state, sdfg, "Y") # add broadcast state init_state = nsdfg.add_state_before(nstate, label="broadcast_bias") # yapf: disable init_state.add_mapped_tasklet("broadcast_bias", map_ranges={ "i{}".format(i): "0:{}".format(s) for i, s in enumerate(Y.shape) }, inputs=dict( b=dace.Memlet("B[i1]") ), code="y = b".format(), outputs=dict( y=dace.Memlet("Y[{}]".format( ", ".join("i{}".format(i) for i, _ in enumerate(Y.shape)))) ), external_edges=True) # yapf: enable X_desc = in_desc_with_name(node, state, sdfg, "X") T = X_desc.dtype unique_id = "{}_{}_{}_{}".format(clean_onnx_name(node.name), sdfg.sdfg_id, sdfg.node_id(state), state.node_id(node)) class Environment: cmake_minimum_version = None cmake_packages = [] cmake_variables = {} cmake_includes = [] cmake_libraries = [] cmake_compile_flags = [] cmake_link_flags = [] cmake_files = [] state_fields = [ f"cudnnConvolutionDescriptor_t *{unique_id}_conv_desc;", f"cudnnTensorDescriptor_t *{unique_id}_Y_desc;", f"cudnnTensorDescriptor_t *{unique_id}_X_desc;", f"cudnnFilterDescriptor_t *{unique_id}_W_desc;", f"float *{unique_id}_workspace;", f"size_t *{unique_id}_workspace_size;" ] dependencies = [environments.cuDNN] headers = [] init_code = "" finalize_code = "" Environment.__name__ = unique_id + "_environment" dace.library.environment(Environment) CudnnConvolution.environments = [Environment] # setup tensor descriptors for edge, is_input in node.iter_edges(state): conn = edge.dst_conn if is_input else edge.src_conn desc = in_desc_with_name(node, state, sdfg, conn) if is_input else out_desc_with_name( node, state, sdfg, conn) assert isinstance(desc, dt.Array) if conn == "B": # bias will be handled separately continue is_filter = conn == "W" init, exit = _cudnn_tensor_descriptor_code( desc, f"{unique_id}_{conn}_desc", is_filter) Environment.init_code += init Environment.finalize_code += exit if hasattr(node, "_algorithm"): algo = node._algorithm else: algo = CudnnConvolution.default_algorithm # setup conv descriptor # we know padding is symmetric pad_h, pad_w = node.pads[0], node.pads[1] stride_h, stride_w = node.strides dilation_h, dilation_w = node.dilations Environment.init_code += f""" __state->{unique_id}_conv_desc = new cudnnConvolutionDescriptor_t; daceml::cudnn::CheckCudnnError(cudnnCreateConvolutionDescriptor(__state->{unique_id}_conv_desc)); daceml::cudnn::CheckCudnnError(cudnnSetConvolution2dDescriptor( *__state->{unique_id}_conv_desc, {pad_h}, {pad_w}, {stride_h}, {stride_w}, {dilation_h}, {dilation_w}, CUDNN_CROSS_CORRELATION, {_DACE_DTYPE_TO_CUDNN_DTYPE[T]})); """ Environment.finalize_code += f""" daceml::cudnn::CheckCudnnError(cudnnDestroyConvolutionDescriptor(*__state->{unique_id}_conv_desc)); delete __state->{unique_id}_conv_desc; """ # setup workspace Environment.init_code += \ f""" {environments.cuDNN.handle_setup_code(node, init_stream=False)} // Setup workspace for {unique_id} size_t ws_size; daceml::cudnn::CheckCudnnError(cudnnGetConvolutionForwardWorkspaceSize( __dace_cudnn_handle, *__state->{unique_id}_X_desc, *__state->{unique_id}_W_desc, *__state->{unique_id}_conv_desc, *__state->{unique_id}_Y_desc, CUDNN_CONVOLUTION_FWD_ALGO_{algo.upper()}, &ws_size)); __state->{unique_id}_workspace_size = new size_t; *__state->{unique_id}_workspace_size = ws_size; cudaMalloc(&__state->{unique_id}_workspace, ws_size); """ Environment.finalize_code += f""" cudaFree(__state->{unique_id}_workspace); delete __state->{unique_id}_workspace_size; """ tasklet_code = f""" {environments.cuDNN.handle_setup_code(node)} float alpha = 1.f; float beta = {"1.f" if "B" in inputs else "0.f"}; daceml::cudnn::CheckCudnnError(cudnnConvolutionForward( __dace_cudnn_handle, &alpha, *__state->{unique_id}_X_desc, _X, *__state->{unique_id}_W_desc, _W, *__state->{unique_id}_conv_desc, CUDNN_CONVOLUTION_FWD_ALGO_{algo.upper()}, __state->{unique_id}_workspace, *__state->{unique_id}_workspace_size, &beta, *__state->{unique_id}_Y_desc, _Y )); """ tasklet = nstate.add_tasklet(unique_id, { "_X": dace.pointer(T), "_W": dace.pointer(T) }, {"_Y": dace.pointer(T)}, tasklet_code, dtypes.Language.CPP) nstate.add_edge(inputs["X"], None, tasklet, "_X", nsdfg.make_array_memlet("X")) nstate.add_edge(inputs["W"], None, tasklet, "_W", nsdfg.make_array_memlet("W")) nstate.add_edge(tasklet, "_Y", outputs["Y"], None, nsdfg.make_array_memlet("Y")) return nsdfg @op_implementation(op="BatchNormalization", name="cuDNN") class CudnnBatchNormalizationTraining(ONNXForward): environments = [] @staticmethod def forward_can_be_applied(node: onnx_op.ONNXOp, state: SDFGState, sdfg: SDFG) -> bool: X = in_desc_with_name(node, state, sdfg, "X") if len(X.shape) != 4: return False # only for training for now if not {"out_mean", "out_var", "saved_mean", "saved_var"}.issubset( node.out_connectors): return False if not {"scale", "B"}.issubset(node.in_connectors): return False return True @staticmethod def forward(node: onnx_op.ONNXOp, state: SDFGState, sdfg: SDFG) -> Union[nd.Node, SDFG]: nsdfg, nstate, inputs, outputs = empty_sdfg_for_node(sdfg, state, node) X_desc = in_desc_with_name(node, state, sdfg, "X") T = X_desc.dtype unique_id = "{}_{}_{}_{}".format(clean_onnx_name(node.name), sdfg.sdfg_id, sdfg.node_id(state), state.node_id(node)) class Environment: cmake_minimum_version = None cmake_packages = [] cmake_variables = {} cmake_includes = [] cmake_libraries = [] cmake_compile_flags = [] cmake_link_flags = [] cmake_files = [] state_fields = [ f"cudnnTensorDescriptor_t *{unique_id}_Y_desc;", f"cudnnTensorDescriptor_t *{unique_id}_X_desc;", f"cudnnTensorDescriptor_t *{unique_id}_scale_desc;", f"float *{unique_id}_workspace;", f"size_t *{unique_id}_workspace_size;" f"float *{unique_id}_reserved;", f"size_t *{unique_id}_reserved_size;" ] dependencies = [environments.cuDNN] headers = [] init_code = "" finalize_code = "" Environment.__name__ = unique_id + "_environment" dace.library.environment(Environment) CudnnBatchNormalizationTraining.environments = [Environment] init, exit = _cudnn_tensor_descriptor_code(inputs["X"].desc(nsdfg), f"{unique_id}_X_desc", False) Environment.init_code += init Environment.finalize_code += exit init, exit = _cudnn_tensor_descriptor_code(outputs["Y"].desc(nsdfg), f"{unique_id}_Y_desc", False) Environment.init_code += init Environment.finalize_code += exit # setup scale descriptor Environment.init_code += f""" __state->{unique_id}_scale_desc = new cudnnTensorDescriptor_t; daceml::cudnn::CheckCudnnError(cudnnCreateTensorDescriptor(__state->{unique_id}_scale_desc)); daceml::cudnn::CheckCudnnError(cudnnDeriveBNTensorDescriptor( *__state->{unique_id}_scale_desc, *__state->{unique_id}_X_desc, CUDNN_BATCHNORM_SPATIAL)); """ Environment.finalize_code += f""" daceml::cudnn::CheckCudnnError(cudnnDestroyTensorDescriptor(*__state->{unique_id}_scale_desc)); delete __state->{unique_id}_scale_desc; """ # setup workspace and reserve space Environment.init_code += \ f""" {environments.cuDNN.handle_setup_code(node, init_stream=False)} // Setup workspace and reserved space for {unique_id} size_t ws_size; daceml::cudnn::CheckCudnnError(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( __dace_cudnn_handle, CUDNN_BATCHNORM_SPATIAL, CUDNN_BATCHNORM_OPS_BN, *__state->{unique_id}_X_desc, nullptr, *__state->{unique_id}_Y_desc, *__state->{unique_id}_scale_desc, nullptr, &ws_size)); __state->{unique_id}_workspace_size = new size_t; *__state->{unique_id}_workspace_size = ws_size; cudaMalloc(&__state->{unique_id}_workspace, ws_size); size_t rs_size; daceml::cudnn::CheckCudnnError(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( __dace_cudnn_handle, CUDNN_BATCHNORM_SPATIAL, CUDNN_BATCHNORM_OPS_BN, nullptr, *__state->{unique_id}_X_desc, &rs_size)); __state->{unique_id}_reserved_size = new size_t; *__state->{unique_id}_reserved_size = rs_size; cudaMalloc(&__state->{unique_id}_reserved, rs_size); """ Environment.finalize_code += f""" cudaFree(__state->{unique_id}_workspace); cudaFree(__state->{unique_id}_reserved); delete __state->{unique_id}_reserved_size; delete __state->{unique_id}_workspace_size; """ tasklet_code = f""" {environments.cuDNN.handle_setup_code(node)} float alpha = 1.f; float beta = 0.f; daceml::cudnn::CheckCudnnError(cudnnBatchNormalizationForwardTrainingEx( __dace_cudnn_handle, CUDNN_BATCHNORM_SPATIAL, CUDNN_BATCHNORM_OPS_BN, &alpha, &beta, *__state->{unique_id}_X_desc, _X, *__state->{unique_id}_X_desc, nullptr, *__state->{unique_id}_Y_desc, _Y, *__state->{unique_id}_scale_desc, _scale, _B, {1 - node.momentum}, _in_mean, _in_var, {node.epsilon}, _saved_mean, _saved_var, nullptr, __state->{unique_id}_workspace, *__state->{unique_id}_workspace_size, __state->{unique_id}_reserved, *__state->{unique_id}_reserved_size)); """ in_connectors = ["X", "B", "scale", "in_mean", "in_var"] out_connectors = ["Y", "saved_mean", "saved_var"] tasklet = nstate.add_tasklet( unique_id, {f"_{i}": dace.pointer(T) for i in in_connectors}, {f"_{i}": dace.pointer(T) for i in out_connectors}, tasklet_code, dtypes.Language.CPP) for inp in in_connectors: nstate.add_edge(inputs[inp], None, tasklet, f"_{inp}", nsdfg.make_array_memlet(inp)) for inp, anode in inputs.items(): if f"_{inp}" not in tasklet.in_connectors: nstate.remove_node(anode) del node.in_connectors[inp] for outp in out_connectors: nstate.add_edge(tasklet, f"_{outp}", outputs[outp], None, nsdfg.make_array_memlet(outp)) # remove out_mean and out_var. We write these out to the same pointers as the inputs remove_output_connector(sdfg, state, node, "out_mean") remove_output_connector(sdfg, state, node, "out_var") del nsdfg.arrays["out_mean"] del nsdfg.arrays["out_var"] for outp, anode in outputs.items(): if f"_{outp}" not in tasklet.out_connectors: nstate.remove_node(anode) del node.out_connectors[outp] return nsdfg
<reponame>Alan-love/football-1 use crate::club::Club; use chrono::prelude::*; use chrono::Duration; use chrono::NaiveDate; use crate::league::{LeagueSettings, Season}; use super::DayMonthPeriod; use actix_web::web::to; use std::collections::{HashSet, HashMap}; use rand::prelude::SliceRandom; use rand::thread_rng; use crate::utils::DateUtils; #[derive(Debug, Clone)] pub struct ScheduleTour { pub items: Vec<ScheduleItem>, pub played: bool, } impl ScheduleTour { pub fn new(games_count: usize) -> Self { ScheduleTour { items: Vec::with_capacity(games_count), played: false, } } } #[derive(Debug)] pub struct ScheduleManager { pub tours: Vec<ScheduleTour> } #[derive(Debug, Clone)] pub struct ScheduleItem { pub id: String, pub date: NaiveDateTime, pub home_club_id: u32, pub away_club_id: u32, pub home_goals: Option<u8>, pub away_goals: Option<u8>, } impl ScheduleItem { pub fn new(date: NaiveDateTime, home_club_id: u32, away_club_id: u32) -> Self { let id = format!("{}{}{}", date, home_club_id, away_club_id); ScheduleItem { id, date, home_club_id, away_club_id, home_goals: None, away_goals: None, } } } impl Default for ScheduleItem { fn default() -> Self { ScheduleItem{ id: "".to_string(), date: NaiveDateTime::from_timestamp(0, 0), home_club_id: 0, away_club_id: 0, home_goals: None, away_goals: None } } } const DAY_PLAYING_TIMES: [(u8, u8); 4] = [(13, 0), (14, 0), (16, 0), (18, 0)]; impl ScheduleManager { pub fn new() -> Self { ScheduleManager { tours: Vec::new() } } pub fn exists(&self) -> bool { !self.tours.is_empty() } pub fn generate(&mut self, season: Season, clubs: &[Club], tours_count: usize, league_settings: &LeagueSettings) { self.tours = Vec::with_capacity((clubs.len() / 2) * tours_count); let (season_year_start, season_year_end) = match season { Season::OneYear(year) => (year, year), Season::TwoYear(start_year, end_year) => (start_year, end_year) }; let mut club_ids: Vec<u32> = clubs.iter().map(|c| c.id).collect(); for item in self.generate_for_period(&league_settings.season_starting_half, season_year_start, &club_ids, tours_count / 2) { self.tours.push(item); } club_ids.reverse(); for item in self.generate_for_period(&league_settings.season_ending_half, season_year_end, &club_ids, tours_count / 2) { self.tours.push(item); } } fn generate_for_period(&mut self, period: &DayMonthPeriod, year: u16, club_ids: &[u32], tours_count: usize) -> Vec<ScheduleTour> { let mut current_date = DateUtils::get_next_saturday( NaiveDate::from_ymd(year as i32, period.from_month as u32, period.from_day as u32)); let club_len = club_ids.len(); let club_half_len = club_len / 2; let items_count = (club_len / 2) * tours_count; let mut result = Vec::with_capacity(items_count); for _ in 0..tours_count { result.push(ScheduleTour::new(club_half_len)) } let mut rival_map = HashMap::with_capacity(club_half_len); for tour in 0..tours_count { let current_tour = &mut result[tour]; for club_idx in 0..club_half_len - 1 { let rival_idx = rival_map.entry(club_idx).or_insert(club_half_len + club_idx); let home_club_id = club_ids[club_idx]; let away_club_id = club_ids[*rival_idx]; *rival_idx += 1; //*rival_idx %= club_len; current_tour.items.push(ScheduleItem::new( current_date, home_club_id, away_club_id)); } current_tour.items.shuffle(&mut thread_rng()); current_date += Duration::days(7); } result } pub fn update_match_result(&mut self, id: &str, home_goals: u8, away_goals: u8) { for tour in &mut self.tours { if tour.played { continue; } if let Some(item) = tour.items.iter_mut().find(|i| i.id == id) { item.home_goals = Some(home_goals); item.away_goals = Some(away_goals); if tour.items.iter().all(|i| i.home_goals.is_some() && i.away_goals.is_some()) { tour.played = true; } } } } pub fn get_matches(&self, date: NaiveDateTime) -> Vec<&ScheduleItem> { self.tours.iter() .flat_map(|t| &t.items) .filter(|s| s.date == date) .collect() } } #[cfg(test)] mod tests { use super::*; use crate::club::ClubMood; use crate::ClubBoard; #[test] fn generate_is_correct() { //let mut clubs = Vec::new(); // // clubs.push(Club { // id: 1, // name: "1".to_string(), // mood: ClubMood::default(), // board: ClubBoard::new(), // players: PlayerCollection::new(vec![]), // staffs: StaffCollection::new(vec![]), // tactics: None, // transfer_list: vec![], // match_history: vec![] // }); // // clubs.push(Club { // id: 2, // name: "1".to_string(), // mood: ClubMood::default(), // board: ClubBoard::new(), // players: PlayerCollection::new(vec![]), // staffs: StaffCollection::new(vec![]), // tactics: None, // transfer_list: vec![], // match_history: vec![] // }); // // clubs.push(Club { // id: 3, // name: "1".to_string(), // mood: ClubMood::default(), // board: ClubBoard::new(), // players: PlayerCollection::new(vec![]), // staffs: StaffCollection::new(vec![]), // tactics: None, // transfer_list: vec![], // match_history: vec![] // }); // // let schedule = Schedule::generate(&clubs, NaiveDate::from_ymd(2020, 3, 1)).unwrap(); //sassert_eq!(2, schedule.items.len()); } }
/** * @return Entry point to manage TenantAccess. */ public TenantAccess tenantAccess() { if (this.tenantAccess == null) { this.tenantAccess = new TenantAccessImpl(this); } return this.tenantAccess; }
#include <bits/stdc++.h> using namespace std; #define GO ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL); //using ll = int64_t; //pipe sign | #define nln "\n" typedef long long ll; typedef unsigned long long ull; int main() { GO int n; cin>>n; int cnt=0,num; if(n>=100) { num=n/100; cnt+=num; n-=(100*num); } if(n>=20) { num=n/20; cnt+=num; n-=(20*num); } if(n>=10) { num=n/10; cnt+=num; n-=(10*num); } if(n>=5) { num=n/5; cnt+=num; n-=(5*num); } cout<<cnt+n; return 0; }
Penetrating thoracic great vessel injury: impact of admission hemodynamics and preoperative imaging. BACKGROUND The management of penetrating great vessel (PGV) injury is challenging. Patients in shock require rapid evaluation, whereas in stable patients, imaging studies may optimize the surgical approach. We reviewed our experience with PGV injury to determine the impact of admission blood pressure and accuracy of imaging studies, both angiography and computed tomographic angiography (CTA). METHODS Retrospective review of the trauma registry from 2001 to 2007 identifying patients with PGV injury. Demographics, admission systolic blood pressure, imaging studies, specific injuries, incision, methods of repair, hospital and intensive care length of stay, complications, and mortality were recorded. Shock was defined as systolic blood pressure <90 mm Hg. RESULTS Thirty-six consecutive patients were identified, average age was 28 (+/-10) years, of whom 20 (56%) presented in shock. Those in shock had more combined arterial-venous injuries (60% vs. 25%), concomitant thoracic injuries requiring resection (45% vs. 19%), and units of packed red blood cells (5.8 +/- 2 vs. 2.7 +/- 1.5), p < 0.01. For those in shock, the mean time to the operating room was 27 minutes +/- 9 minutes and 75% had sternotomy. Among stable patients, 56% had a periclavicular approach and 31% partial sternotomy. All 16 stable patients had imaging; angiography in 3 patients and CTA in 7 patients. In six patients who had both angiography and CTA, the results were concordant; therefore, CTA accurately diagnosed arterial injury in all 13 patients. Imaging changed the choice of incision in 4 (25%). Intensive care length of stay was significantly longer in the shock group 3.1 (+/-2.1) days versus 1.4 (+/-1.6) days (p = 0.01). There were 5 (14%) complications and no deaths. CONCLUSION Patients in shock require rapid evaluation. Sternotomy affords excellent exposure to all PGV injuries, and partial sternotomy is useful in stable patients. In stable patients, CTA can be valuable in defining the injury and may influence the surgical approach. Surgical results are surprisingly good, even in unstable patients and may be related to rapid transport and operation.
def delete_node(node_name): nodes = python.force_list(node_name) objects_to_delete = list() for node_found in nodes: if not python.is_string(node_found): node_found = node.get_name(node_found, fullname=True) objects_to_delete.append(node_found) return maya.cmds.delete(objects_to_delete)
package com.example.template4fx.control.dialog; import com.example.template4fx.control.FXControl; import com.example.template4fx.fx.Popup; import javafx.beans.property.BooleanProperty; import javafx.beans.property.SimpleBooleanProperty; import javafx.beans.property.SimpleStringProperty; import javafx.beans.property.StringProperty; import javafx.scene.Node; import javafx.scene.input.KeyEvent; public abstract class AbstractDialog extends FXControl implements Popup { AbstractDialog( String header, String svg ) { setHeader( header ); setSvg( svg ); getStyleClass().add( "dialog" ); } @Override public Node getNode() { return this; } @Override public void handleKeyEvent( KeyEvent event ) { } private final BooleanProperty closed = new SimpleBooleanProperty(); public boolean isClosed() { return closed.get(); } @Override public BooleanProperty closedProperty() { return closed; } public void setClosed( boolean closed ) { this.closed.set( closed ); } private final StringProperty header = new SimpleStringProperty(); public String getHeader() { return header.get(); } public StringProperty headerProperty() { return header; } public void setHeader( String header ) { this.header.set( header ); } private final StringProperty svg = new SimpleStringProperty(); public String getSvg() { return svg.get(); } public StringProperty svgProperty() { return svg; } public void setSvg( String svg ) { this.svg.set( svg ); } }
// Copyright 2014, Additive Regularization of Topic Models. #include "artm/core/master_component.h" #include <algorithm> #include <fstream> // NOLINT #include <vector> #include <set> #include <sstream> #include <utility> #include "boost/algorithm/string.hpp" #include "boost/algorithm/string/predicate.hpp" #include "boost/thread.hpp" #include "boost/uuid/uuid_io.hpp" #include "boost/uuid/uuid_generators.hpp" #include "glog/logging.h" #include "artm/regularizer_interface.h" #include "artm/score_calculator_interface.h" #include "artm/core/exceptions.h" #include "artm/core/helpers.h" #include "artm/core/batch_manager.h" #include "artm/core/cache_manager.h" #include "artm/core/check_messages.h" #include "artm/core/instance.h" #include "artm/core/processor.h" #include "artm/core/phi_matrix_operations.h" #include "artm/core/score_manager.h" #include "artm/core/dense_phi_matrix.h" #include "artm/core/template_manager.h" namespace artm { namespace core { static void HandleExternalTopicModelRequest(::artm::TopicModel* topic_model, std::string* lm) { lm->resize(sizeof(float) * topic_model->token_size() * topic_model->num_topics()); char* lm_ptr = &(*lm)[0]; float* lm_float = reinterpret_cast<float*>(lm_ptr); for (int token_index = 0; token_index < topic_model->token_size(); ++token_index) { for (int topic_index = 0; topic_index < topic_model->num_topics(); ++topic_index) { int index = token_index * topic_model->num_topics() + topic_index; lm_float[index] = topic_model->token_weights(token_index).value(topic_index); } } topic_model->clear_token_weights(); } static void HandleExternalThetaMatrixRequest(::artm::ThetaMatrix* theta_matrix, std::string* lm) { lm->resize(sizeof(float) * theta_matrix->item_id_size() * theta_matrix->num_topics()); char* lm_ptr = &(*lm)[0]; float* lm_float = reinterpret_cast<float*>(lm_ptr); for (int topic_index = 0; topic_index < theta_matrix->num_topics(); ++topic_index) { for (int item_index = 0; item_index < theta_matrix->item_id_size(); ++item_index) { int index = item_index * theta_matrix->num_topics() + topic_index; lm_float[index] = theta_matrix->item_weights(item_index).value(topic_index); } } theta_matrix->clear_item_weights(); } void MasterComponent::CreateOrReconfigureMasterComponent(const MasterModelConfig& config, bool reconfigure) { if (!reconfigure) instance_ = std::make_shared<Instance>(config); else instance_->Reconfigure(config); if (reconfigure) { // remove all regularizers instance_->regularizers()->clear(); } // create (or re-create the regularizers) for (int i = 0; i < config.regularizer_config_size(); ++i) CreateOrReconfigureRegularizer(config.regularizer_config(i)); } MasterComponent::MasterComponent(const MasterModelConfig& config) : instance_(nullptr) { CreateOrReconfigureMasterComponent(config, /*reconfigure =*/ false); } MasterComponent::MasterComponent(const MasterComponent& rhs) : instance_(rhs.instance_->Duplicate()) { } MasterComponent::~MasterComponent() {} std::shared_ptr<MasterComponent> MasterComponent::Duplicate() const { return std::shared_ptr<MasterComponent>(new MasterComponent(*this)); } std::shared_ptr<MasterModelConfig> MasterComponent::config() const { return instance_->config(); } void MasterComponent::DisposeModel(const std::string& name) { instance_->DisposeModel(name); } void MasterComponent::ClearThetaCache(const ClearThetaCacheArgs& args) { instance_->cache_manager()->Clear(); } void MasterComponent::ClearScoreCache(const ClearScoreCacheArgs& args) { instance_->score_manager()->Clear(); } void MasterComponent::ClearScoreArrayCache(const ClearScoreArrayCacheArgs& args) { instance_->score_tracker()->Clear(); } void MasterComponent::CreateOrReconfigureRegularizer(const RegularizerConfig& config) { instance_->CreateOrReconfigureRegularizer(config); } void MasterComponent::DisposeRegularizer(const std::string& name) { instance_->DisposeRegularizer(name); } void MasterComponent::CreateDictionary(const DictionaryData& data) { DisposeDictionary(data.name()); auto dictionary = std::make_shared<Dictionary>(data); instance_->dictionaries()->set(data.name(), dictionary); } void MasterComponent::AppendDictionary(const DictionaryData& data) { auto dict_ptr = instance_->dictionaries()->get(data.name()); if (dict_ptr == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation("Dictionary " + data.name() + " does not exist")); dict_ptr->Append(data); } void MasterComponent::DisposeDictionary(const std::string& name) { if (name.empty()) instance_->dictionaries()->clear(); else instance_->dictionaries()->erase(name); } void MasterComponent::ExportDictionary(const ExportDictionaryArgs& args) { Dictionary::Export(args, instance_->dictionaries()); } void MasterComponent::ImportDictionary(const ImportDictionaryArgs& args) { auto import_data = Dictionary::ImportData(args); int token_size = import_data.front()->token_size(); if (token_size <= 0) BOOST_THROW_EXCEPTION(CorruptedMessageException("Unable to read from " + args.file_name())); import_data.front()->set_name(args.dictionary_name()); CreateDictionary(*(import_data.front())); for (int i = 1; i < import_data.size(); ++i) { import_data.at(i)->set_name(args.dictionary_name()); AppendDictionary(*import_data.at(i)); } LOG(INFO) << "Import completed, token_size = " << token_size; } void MasterComponent::Request(::artm::MasterModelConfig* result) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation( "Invalid master_id; use ArtmCreateMasterModel instead of ArtmCreateMasterComponent")); result->CopyFrom(*config); } void MasterComponent::Request(const GetDictionaryArgs& args, DictionaryData* result) { std::shared_ptr<Dictionary> dict_ptr = instance_->dictionaries()->get(args.dictionary_name()); if (dict_ptr == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation("Dictionary " + args.dictionary_name() + " does not exist or has no tokens")); dict_ptr->StoreIntoDictionaryData(result); result->set_name(args.dictionary_name()); } void MasterComponent::ImportBatches(const ImportBatchesArgs& args) { for (int i = 0; i < args.batch_size(); ++i) { std::shared_ptr<Batch> batch = std::make_shared<Batch>(args.batch(i)); FixAndValidateMessage(batch.get(), /* throw_error =*/ true); instance_->batches()->set(batch->id(), batch); } } void MasterComponent::DisposeBatch(const std::string& name) { instance_->batches()->erase(name); } void MasterComponent::ExportModel(const ExportModelArgs& args) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config != nullptr) if (!args.has_model_name()) const_cast<ExportModelArgs*>(&args)->set_model_name(config->pwt_name()); if (boost::filesystem::exists(args.file_name())) BOOST_THROW_EXCEPTION(DiskWriteException("File already exists: " + args.file_name())); std::ofstream fout(args.file_name(), std::ofstream::binary); if (!fout.is_open()) BOOST_THROW_EXCEPTION(DiskWriteException("Unable to create file " + args.file_name())); std::shared_ptr<const PhiMatrix> phi_matrix = instance_->GetPhiMatrixSafe(args.model_name()); const PhiMatrix& n_wt = *phi_matrix; LOG(INFO) << "Exporting model " << args.model_name() << " to " << args.file_name(); const int token_size = n_wt.token_size(); if (token_size == 0) BOOST_THROW_EXCEPTION(InvalidOperation("Model " + args.model_name() + " has no tokens, export failed")); int tokens_per_chunk = std::min<int>(token_size, 100 * 1024 * 1024 / n_wt.topic_size()); ::artm::GetTopicModelArgs get_topic_model_args; get_topic_model_args.set_model_name(args.model_name()); get_topic_model_args.set_matrix_layout(::artm::MatrixLayout_Sparse); get_topic_model_args.mutable_token()->Reserve(tokens_per_chunk); get_topic_model_args.mutable_class_id()->Reserve(tokens_per_chunk); const char version = 0; fout << version; for (int token_id = 0; token_id < token_size; ++token_id) { Token token = n_wt.token(token_id); get_topic_model_args.add_token(token.keyword); get_topic_model_args.add_class_id(token.class_id); if (((token_id + 1) == token_size) || (get_topic_model_args.token_size() >= tokens_per_chunk)) { ::artm::TopicModel external_topic_model; PhiMatrixOperations::RetrieveExternalTopicModel(n_wt, get_topic_model_args, &external_topic_model); std::string str = external_topic_model.SerializeAsString(); fout << str.size(); fout << str; get_topic_model_args.clear_class_id(); get_topic_model_args.clear_token(); } } fout.close(); LOG(INFO) << "Export completed, token_size = " << n_wt.token_size() << ", topic_size = " << n_wt.topic_size(); } void MasterComponent::ImportModel(const ImportModelArgs& args) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config != nullptr) if (!args.has_model_name()) const_cast<ImportModelArgs*>(&args)->set_model_name(config->pwt_name()); std::ifstream fin(args.file_name(), std::ifstream::binary); if (!fin.is_open()) BOOST_THROW_EXCEPTION(DiskReadException("Unable to open file " + args.file_name())); LOG(INFO) << "Importing model " << args.model_name() << " from " << args.file_name(); char version; fin >> version; if (version != 0) { std::stringstream ss; ss << "Unsupported fromat version: " << static_cast<int>(version); BOOST_THROW_EXCEPTION(DiskReadException(ss.str())); } std::shared_ptr<DensePhiMatrix> target; while (!fin.eof()) { int length; fin >> length; if (fin.eof()) break; if (length <= 0) BOOST_THROW_EXCEPTION(CorruptedMessageException("Unable to read from " + args.file_name())); std::string buffer(length, '\0'); fin.read(&buffer[0], length); ::artm::TopicModel topic_model; if (!topic_model.ParseFromArray(buffer.c_str(), length)) BOOST_THROW_EXCEPTION(CorruptedMessageException("Unable to read from " + args.file_name())); topic_model.set_name(args.model_name()); if (target == nullptr) target = std::make_shared<DensePhiMatrix>(args.model_name(), topic_model.topic_name()); PhiMatrixOperations::ApplyTopicModelOperation(topic_model, 1.0f, target.get()); } fin.close(); if (target == nullptr) BOOST_THROW_EXCEPTION(CorruptedMessageException("Unable to read from " + args.file_name())); instance_->SetPhiMatrix(args.model_name(), target); LOG(INFO) << "Import completed, token_size = " << target->token_size() << ", topic_size = " << target->topic_size(); } void MasterComponent::AttachModel(const AttachModelArgs& args, int address_length, float* address) { ModelName model_name = args.model_name(); LOG(INFO) << "Attaching model " << model_name << " to " << address << " (" << address_length << " bytes)"; std::shared_ptr<const PhiMatrix> phi_matrix = instance_->GetPhiMatrixSafe(model_name); PhiMatrixFrame* frame = dynamic_cast<PhiMatrixFrame*>(const_cast<PhiMatrix*>(phi_matrix.get())); if (frame == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation("Unable to attach to model " + model_name)); std::shared_ptr<AttachedPhiMatrix> attached = std::make_shared<AttachedPhiMatrix>(address_length, address, frame); instance_->SetPhiMatrix(model_name, attached); } void MasterComponent::InitializeModel(const InitializeModelArgs& args) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config != nullptr) { InitializeModelArgs* mutable_args = const_cast<InitializeModelArgs*>(&args); if (!args.has_model_name()) mutable_args->set_model_name(config->pwt_name()); if (args.topic_name_size() == 0) mutable_args->mutable_topic_name()->CopyFrom(config->topic_name()); FixMessage(mutable_args); } auto dict = instance_->dictionaries()->get(args.dictionary_name()); if (dict == nullptr) { std::stringstream ss; ss << "Dictionary '" << args.dictionary_name() << "' does not exist"; BOOST_THROW_EXCEPTION(InvalidOperation(ss.str())); } if (dict->size() == 0) { std::stringstream ss; ss << "Dictionary '" << args.dictionary_name() << "' has no entries"; BOOST_THROW_EXCEPTION(InvalidOperation(ss.str())); } LOG(INFO) << "InitializeModel() with " << args.topic_name_size() << " topics and " << dict->size() << " tokens"; auto new_ttm = std::make_shared< ::artm::core::DensePhiMatrix>(args.model_name(), args.topic_name()); for (int index = 0; index < dict->size(); ++index) { ::artm::core::Token token = dict->entry(index)->token(); std::vector<float> vec = Helpers::GenerateRandomVector(new_ttm->topic_size(), token, args.seed()); int token_id = new_ttm->AddToken(token); new_ttm->increase(token_id, vec); } PhiMatrixOperations::FindPwt(*new_ttm, new_ttm.get()); instance_->SetPhiMatrix(args.model_name(), new_ttm); } void MasterComponent::FilterDictionary(const FilterDictionaryArgs& args) { auto data = Dictionary::Filter(args, instance_->dictionaries()); CreateDictionary(*(data.first)); if (data.second->cooc_first_index_size() > 0) AppendDictionary(*(data.second)); } void MasterComponent::GatherDictionary(const GatherDictionaryArgs& args) { auto data = Dictionary::Gather(args, *instance_->batches()); CreateDictionary(*(data.first)); if (data.second->cooc_first_index_size() > 0) AppendDictionary(*(data.second)); } void MasterComponent::ReconfigureMasterModel(const MasterModelConfig& config) { CreateOrReconfigureMasterComponent(config, /*reconfigure = */ true); } void MasterComponent::Request(const GetTopicModelArgs& args, ::artm::TopicModel* result) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config != nullptr) if (!args.has_model_name()) const_cast<GetTopicModelArgs*>(&args)->set_model_name(config->pwt_name()); auto phi_matrix = instance_->GetPhiMatrixSafe(args.model_name()); PhiMatrixOperations::RetrieveExternalTopicModel(*phi_matrix, args, result); } void MasterComponent::Request(const GetTopicModelArgs& args, ::artm::TopicModel* result, std::string* external) { if (args.matrix_layout() != artm::MatrixLayout_Dense) BOOST_THROW_EXCEPTION(InvalidOperation("Dense matrix format is required for ArtmRequestTopicModelExternal")); Request(args, result); HandleExternalTopicModelRequest(result, external); } void MasterComponent::Request(const GetScoreValueArgs& args, ScoreData* result) { instance_->score_manager()->RequestScore(args.score_name(), result); } void MasterComponent::Request(const GetScoreArrayArgs& args, ScoreArray* result) { instance_->score_tracker()->RequestScoreArray(args, result); } void MasterComponent::Request(const GetMasterComponentInfoArgs& /*args*/, MasterComponentInfo* result) { this->instance_->RequestMasterComponentInfo(result); } void MasterComponent::Request(const ProcessBatchesArgs& args, ProcessBatchesResult* result) { BatchManager batch_manager; RequestProcessBatchesImpl(args, &batch_manager, /* async =*/ false, nullptr, result->mutable_theta_matrix()); instance_->score_manager()->RequestAllScores(result->mutable_score_data()); } void MasterComponent::Request(const ProcessBatchesArgs& args, ProcessBatchesResult* result, std::string* external) { const bool is_dense_theta = args.theta_matrix_type() == artm::ThetaMatrixType_Dense; const bool is_dense_ptdw = args.theta_matrix_type() == artm::ThetaMatrixType_DensePtdw; if (!is_dense_theta && !is_dense_ptdw) BOOST_THROW_EXCEPTION(InvalidOperation("Dense matrix format is required for ArtmRequestProcessBatchesExternal")); Request(args, result); HandleExternalThetaMatrixRequest(result->mutable_theta_matrix(), external); } void MasterComponent::AsyncRequestProcessBatches(const ProcessBatchesArgs& process_batches_args, BatchManager *batch_manager) { RequestProcessBatchesImpl(process_batches_args, batch_manager, /* async =*/ true, /*score_manager=*/ nullptr, /* theta_matrix=*/ nullptr); } void MasterComponent::RequestProcessBatchesImpl(const ProcessBatchesArgs& process_batches_args, BatchManager* batch_manager, bool async, ScoreManager* score_manager, ::artm::ThetaMatrix* theta_matrix) { const ProcessBatchesArgs& args = process_batches_args; // short notation ModelName model_name = args.pwt_source_name(); if (instance_->processor_size() <= 0) BOOST_THROW_EXCEPTION(InvalidOperation( "Can't process batches because there are no processors. Check your MasterModelConfig.num_processors setting.")); std::shared_ptr<const PhiMatrix> phi_matrix = instance_->GetPhiMatrixSafe(model_name); const PhiMatrix& p_wt = *phi_matrix; const_cast<ProcessBatchesArgs*>(&args)->mutable_topic_name()->CopyFrom(p_wt.topic_name()); if (args.has_nwt_target_name()) { if (args.nwt_target_name() == args.pwt_source_name()) BOOST_THROW_EXCEPTION(InvalidOperation( "ProcessBatchesArgs.pwt_source_name == ProcessBatchesArgs.nwt_target_name")); auto nwt_target(std::make_shared<DensePhiMatrix>(args.nwt_target_name(), p_wt.topic_name())); nwt_target->Reshape(p_wt); instance_->SetPhiMatrix(args.nwt_target_name(), nwt_target); } if (async && args.theta_matrix_type() != ThetaMatrixType_None) BOOST_THROW_EXCEPTION(InvalidOperation( "ArtmAsyncProcessBatches require ProcessBatchesArgs.theta_matrix_type to be set to None")); // The code below must not use cache_manger in async mode. // Since cache_manager lives on stack it will be destroyed once we return from this function. // Therefore, no pointers to cache_manager should exist upon return from RequestProcessBatchesImpl. CacheManager cache_manager; bool return_theta = false; bool return_ptdw = false; CacheManager* ptdw_cache_manager_ptr = nullptr; CacheManager* theta_cache_manager_ptr = nullptr; switch (args.theta_matrix_type()) { case ThetaMatrixType_Cache: if (instance_->config()->cache_theta()) theta_cache_manager_ptr = instance_->cache_manager(); break; case ThetaMatrixType_Dense: case ThetaMatrixType_Sparse: theta_cache_manager_ptr = &cache_manager; return_theta = true; break; case ThetaMatrixType_DensePtdw: case ThetaMatrixType_SparsePtdw: ptdw_cache_manager_ptr = &cache_manager; return_ptdw = true; } if (args.batch_filename_size() < instance_->processor_size()) { LOG_FIRST_N(INFO, 1) << "Batches count (=" << args.batch_filename_size() << ") is smaller than num processors (=" << instance_->processor_size() << "), which may cause suboptimal performance."; } auto createProcessorInput = [&](){ // NOLINT boost::uuids::uuid task_id = boost::uuids::random_generator()(); batch_manager->Add(task_id); auto pi = std::make_shared<ProcessorInput>(); pi->set_batch_manager(batch_manager); pi->set_score_manager(score_manager); pi->set_cache_manager(theta_cache_manager_ptr); pi->set_ptdw_cache_manager(ptdw_cache_manager_ptr); pi->set_model_name(model_name); pi->mutable_args()->CopyFrom(args); pi->set_task_id(task_id); if (args.reuse_theta()) pi->set_reuse_theta_cache_manager(instance_->cache_manager()); if (args.has_nwt_target_name()) pi->set_nwt_target_name(args.nwt_target_name()); return pi; }; // Enqueue tasks based on args.batch_filename for (int batch_index = 0; batch_index < args.batch_filename_size(); ++batch_index) { auto pi = createProcessorInput(); pi->set_batch_filename(args.batch_filename(batch_index)); pi->set_batch_weight(args.batch_weight(batch_index)); instance_->processor_queue()->push(pi); } // Enqueue tasks based on args.batch for (int batch_index = 0; batch_index < args.batch_size(); ++batch_index) { auto pi = createProcessorInput(); pi->mutable_batch()->CopyFrom(args.batch(batch_index)); pi->set_batch_weight(args.batch_weight(batch_index)); instance_->processor_queue()->push(pi); } if (async) return; while (!batch_manager->IsEverythingProcessed()) { boost::this_thread::sleep(boost::posix_time::milliseconds(kIdleLoopFrequency)); } GetThetaMatrixArgs get_theta_matrix_args; switch (args.theta_matrix_type()) { case ThetaMatrixType_Dense: case ThetaMatrixType_DensePtdw: get_theta_matrix_args.set_matrix_layout(MatrixLayout_Dense); break; case ThetaMatrixType_Sparse: case ThetaMatrixType_SparsePtdw: get_theta_matrix_args.set_matrix_layout(MatrixLayout_Sparse); break; } if (theta_matrix != nullptr && args.has_theta_matrix_type()) cache_manager.RequestThetaMatrix(get_theta_matrix_args, theta_matrix); } void MasterComponent::MergeModel(const MergeModelArgs& merge_model_args) { VLOG(0) << "MasterComponent: start merging models"; if (merge_model_args.nwt_source_name_size() == 0) BOOST_THROW_EXCEPTION(InvalidOperation("MergeModelArgs.nwt_source_name must not be empty")); if (merge_model_args.nwt_source_name_size() != merge_model_args.source_weight_size()) BOOST_THROW_EXCEPTION(InvalidOperation( "MergeModelArgs.nwt_source_name_size() != MergeModelArgs.source_weight_size()")); std::shared_ptr<DensePhiMatrix> nwt_target; std::stringstream ss; for (int i = 0; i < merge_model_args.nwt_source_name_size(); ++i) { ModelName model_name = merge_model_args.nwt_source_name(i); ss << (i == 0 ? "" : ", ") << model_name; float weight = merge_model_args.source_weight(i); std::shared_ptr<const PhiMatrix> phi_matrix = instance_->GetPhiMatrix(model_name); if (phi_matrix == nullptr) { LOG(WARNING) << "Model " << model_name << " does not exist"; continue; } const PhiMatrix& n_wt = *phi_matrix; if (nwt_target == nullptr) { nwt_target = std::make_shared<DensePhiMatrix>( merge_model_args.nwt_target_name(), merge_model_args.topic_name_size() != 0 ? merge_model_args.topic_name() : n_wt.topic_name()); } if (n_wt.token_size() > 0) { ::artm::TopicModel topic_model; PhiMatrixOperations::RetrieveExternalTopicModel(n_wt, GetTopicModelArgs(), &topic_model); PhiMatrixOperations::ApplyTopicModelOperation(topic_model, weight, nwt_target.get()); } } if (nwt_target == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation( "ArtmMergeModel() have not found any models to merge. " "Verify that at least one of the following models exist: " + ss.str())); instance_->SetPhiMatrix(merge_model_args.nwt_target_name(), nwt_target); VLOG(0) << "MasterComponent: complete merging models"; } void MasterComponent::RegularizeModel(const RegularizeModelArgs& regularize_model_args) { VLOG(0) << "MasterComponent: start regularizing model " << regularize_model_args.pwt_source_name(); const std::string& pwt_source_name = regularize_model_args.pwt_source_name(); const std::string& nwt_source_name = regularize_model_args.nwt_source_name(); const std::string& rwt_target_name = regularize_model_args.rwt_target_name(); if (!regularize_model_args.has_pwt_source_name()) BOOST_THROW_EXCEPTION(InvalidOperation("RegularizeModelArgs.pwt_source_name is missing")); if (!regularize_model_args.has_nwt_source_name()) BOOST_THROW_EXCEPTION(InvalidOperation("RegularizeModelArgs.nwt_source_name is missing")); if (!regularize_model_args.has_rwt_target_name()) BOOST_THROW_EXCEPTION(InvalidOperation("RegularizeModelArgs.rwt_target_name is missing")); std::shared_ptr<const PhiMatrix> nwt_phi_matrix = instance_->GetPhiMatrixSafe(nwt_source_name); const PhiMatrix& n_wt = *nwt_phi_matrix; std::shared_ptr<const PhiMatrix> pwt_phi_matrix = instance_->GetPhiMatrixSafe(pwt_source_name); const PhiMatrix& p_wt = *pwt_phi_matrix; auto rwt_target(std::make_shared<DensePhiMatrix>(rwt_target_name, nwt_phi_matrix->topic_name())); rwt_target->Reshape(*nwt_phi_matrix); PhiMatrixOperations::InvokePhiRegularizers(instance_.get(), regularize_model_args.regularizer_settings(), p_wt, n_wt, rwt_target.get()); instance_->SetPhiMatrix(rwt_target_name, rwt_target); VLOG(0) << "MasterComponent: complete regularizing model " << regularize_model_args.pwt_source_name(); } void MasterComponent::NormalizeModel(const NormalizeModelArgs& normalize_model_args) { VLOG(0) << "MasterComponent: start normalizing model " << normalize_model_args.nwt_source_name(); const std::string& pwt_target_name = normalize_model_args.pwt_target_name(); const std::string& nwt_source_name = normalize_model_args.nwt_source_name(); const std::string& rwt_source_name = normalize_model_args.rwt_source_name(); if (!normalize_model_args.has_pwt_target_name()) BOOST_THROW_EXCEPTION(InvalidOperation("NormalizeModelArgs.pwt_target_name is missing")); if (!normalize_model_args.has_nwt_source_name()) BOOST_THROW_EXCEPTION(InvalidOperation("NormalizeModelArgs.pwt_target_name is missing")); std::shared_ptr<const PhiMatrix> nwt_phi_matrix = instance_->GetPhiMatrixSafe(nwt_source_name); const PhiMatrix& n_wt = *nwt_phi_matrix; std::shared_ptr<const PhiMatrix> rwt_phi_matrix; if (normalize_model_args.has_rwt_source_name()) rwt_phi_matrix = instance_->GetPhiMatrixSafe(rwt_source_name); auto pwt_target(std::make_shared<DensePhiMatrix>(pwt_target_name, n_wt.topic_name())); pwt_target->Reshape(n_wt); if (rwt_phi_matrix == nullptr) PhiMatrixOperations::FindPwt(n_wt, pwt_target.get()); else PhiMatrixOperations::FindPwt(n_wt, *rwt_phi_matrix, pwt_target.get()); instance_->SetPhiMatrix(pwt_target_name, pwt_target); VLOG(0) << "MasterComponent: complete normalizing model " << normalize_model_args.nwt_source_name(); } void MasterComponent::OverwriteTopicModel(const ::artm::TopicModel& args) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config != nullptr) if (!args.has_name()) const_cast< ::artm::TopicModel*>(&args)->set_name(config->pwt_name()); auto target = std::make_shared<DensePhiMatrix>(args.name(), args.topic_name()); PhiMatrixOperations::ApplyTopicModelOperation(args, 1.0f, target.get()); instance_->SetPhiMatrix(args.name(), target); } void MasterComponent::Request(const GetThetaMatrixArgs& args, ::artm::ThetaMatrix* result) { instance_->cache_manager()->RequestThetaMatrix(args, result); } void MasterComponent::Request(const GetThetaMatrixArgs& args, ::artm::ThetaMatrix* result, std::string* external) { if (args.matrix_layout() != artm::MatrixLayout_Dense) BOOST_THROW_EXCEPTION(InvalidOperation("Dense matrix format is required for ArtmRequestThetaMatrixExternal")); Request(args, result); HandleExternalThetaMatrixRequest(result, external); } // ToDo(sashafrey): what should be the default cache policy for TransformMasterModel? // Currently it saves the result in the cache. The result is then empty... void MasterComponent::Request(const TransformMasterModelArgs& args, ::artm::ThetaMatrix* result) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation( "Invalid master_id; use ArtmCreateMasterModel instead of ArtmCreateMasterComponent")); if (args.theta_matrix_type() == ThetaMatrixType_Cache) ClearThetaCache(ClearThetaCacheArgs()); ClearScoreCache(ClearScoreCacheArgs()); ProcessBatchesArgs process_batches_args; process_batches_args.mutable_batch_filename()->CopyFrom(args.batch_filename()); process_batches_args.mutable_batch()->CopyFrom(args.batch()); process_batches_args.set_pwt_source_name(config->pwt_name()); if (config->has_num_document_passes()) process_batches_args.set_num_document_passes(config->num_document_passes()); for (auto& regularizer : config->regularizer_config()) { process_batches_args.add_regularizer_name(regularizer.name()); process_batches_args.add_regularizer_tau(regularizer.tau()); } if (config->has_opt_for_avx()) process_batches_args.set_opt_for_avx(config->opt_for_avx()); if (config->has_reuse_theta()) process_batches_args.set_reuse_theta(config->reuse_theta()); process_batches_args.mutable_class_id()->CopyFrom(config->class_id()); process_batches_args.mutable_class_weight()->CopyFrom(config->class_weight()); process_batches_args.set_theta_matrix_type(args.theta_matrix_type()); if (args.has_predict_class_id()) process_batches_args.set_predict_class_id(args.predict_class_id()); FixMessage(&process_batches_args); BatchManager batch_manager; RequestProcessBatchesImpl(process_batches_args, &batch_manager, /* async =*/ false, /*score_manager =*/ nullptr, result); } void MasterComponent::Request(const TransformMasterModelArgs& args, ::artm::ThetaMatrix* result, std::string* external) { const bool is_dense_theta = args.theta_matrix_type() == artm::ThetaMatrixType_Dense; const bool is_dense_ptdw = args.theta_matrix_type() == artm::ThetaMatrixType_DensePtdw; if (!is_dense_theta && !is_dense_ptdw) BOOST_THROW_EXCEPTION(InvalidOperation("Dense matrix format is required for ArtmRequestProcessBatchesExternal")); Request(args, result); HandleExternalThetaMatrixRequest(result, external); } class BatchesIterator { public: virtual ~BatchesIterator() {} virtual void move(ProcessBatchesArgs* args) = 0; }; class OfflineBatchesIterator : public BatchesIterator { public: OfflineBatchesIterator(const ::google::protobuf::RepeatedPtrField<std::string>& batch_filename, const ::google::protobuf::RepeatedField<float>& batch_weight) : batch_filename_(batch_filename), batch_weight_(batch_weight) {} virtual ~OfflineBatchesIterator() {} private: const ::google::protobuf::RepeatedPtrField<std::string>& batch_filename_; const ::google::protobuf::RepeatedField<float>& batch_weight_; virtual void move(ProcessBatchesArgs* args) { args->mutable_batch_filename()->CopyFrom(batch_filename_); args->mutable_batch_weight()->CopyFrom(batch_weight_); } }; class OnlineBatchesIterator : public BatchesIterator { public: OnlineBatchesIterator(const ::google::protobuf::RepeatedPtrField<std::string>& batch_filename, const ::google::protobuf::RepeatedField<float>& batch_weight, const ::google::protobuf::RepeatedField<int>& update_after, const ::google::protobuf::RepeatedField<float>& apply_weight, const ::google::protobuf::RepeatedField<float>& decay_weight) : batch_filename_(batch_filename), batch_weight_(batch_weight), update_after_(update_after), apply_weight_(apply_weight), decay_weight_(decay_weight), current_(0) {} virtual ~OnlineBatchesIterator() {} bool more() const { return current_ < static_cast<int>(update_after_.size()); } virtual void move(ProcessBatchesArgs* args) { args->clear_batch_filename(); args->clear_batch_weight(); if (static_cast<int>(current_) >= update_after_.size()) return; unsigned first = (current_ == 0) ? 0 : update_after_.Get(current_ - 1); unsigned last = update_after_.Get(current_); for (unsigned i = first; i < last; ++i) { args->add_batch_filename(batch_filename_.Get(i)); args->add_batch_weight(batch_weight_.Get(i)); } current_++; } float apply_weight() { return apply_weight_.Get(current_); } float decay_weight() { return decay_weight_.Get(current_); } int update_after() { return update_after_.Get(current_); } float apply_weight(int index) { return apply_weight_.Get(index); } float decay_weight(int index) { return decay_weight_.Get(index); } int update_after(int index) { return update_after_.Get(index); } void reset() { current_ = 0; } private: const ::google::protobuf::RepeatedPtrField<std::string>& batch_filename_; const ::google::protobuf::RepeatedField<float>& batch_weight_; const ::google::protobuf::RepeatedField<int>& update_after_; const ::google::protobuf::RepeatedField<float>& apply_weight_; const ::google::protobuf::RepeatedField<float>& decay_weight_; unsigned current_; // index in update_after_ array }; class StringIndex { public: explicit StringIndex(std::string prefix) : i_(0), prefix_(prefix) {} StringIndex(std::string prefix, int i) : i_(i), prefix_(prefix) {} int get_index() { return i_; } operator std::string() const { return prefix_ + boost::lexical_cast<std::string>(i_); } StringIndex operator+(int offset) { return StringIndex(prefix_, i_ + offset); } StringIndex operator-(int offset) { return StringIndex(prefix_, i_ - offset); } int operator++() { return ++i_; } int operator++(int) { return i_++; } private: int i_; std::string prefix_; }; class ArtmExecutor { public: ArtmExecutor(const MasterModelConfig& master_model_config, MasterComponent* master_component) : master_model_config_(master_model_config), pwt_name_(master_model_config.pwt_name()), nwt_name_(master_model_config.nwt_name()), master_component_(master_component) { if (master_model_config.has_num_document_passes()) process_batches_args_.set_num_document_passes(master_model_config.num_document_passes()); process_batches_args_.mutable_class_id()->CopyFrom(master_model_config.class_id()); process_batches_args_.mutable_class_weight()->CopyFrom(master_model_config.class_weight()); for (auto& regularizer : master_model_config.regularizer_config()) { process_batches_args_.add_regularizer_name(regularizer.name()); process_batches_args_.add_regularizer_tau(regularizer.tau()); } for (auto& regularizer : master_model_config.regularizer_config()) { RegularizerSettings* settings = regularize_model_args_.add_regularizer_settings(); settings->set_tau(regularizer.tau()); settings->set_name(regularizer.name()); if (regularizer.has_gamma()) settings->set_gamma(regularizer.gamma()); } if (master_model_config.has_opt_for_avx()) process_batches_args_.set_opt_for_avx(master_model_config.opt_for_avx()); if (master_model_config.has_reuse_theta()) process_batches_args_.set_reuse_theta(master_model_config.reuse_theta()); } void ExecuteOfflineAlgorithm(int num_collection_passes, OfflineBatchesIterator* iter) { const std::string rwt_name = "rwt"; master_component_->ClearScoreCache(ClearScoreCacheArgs()); for (int pass = 0; pass < num_collection_passes; ++pass) { ::artm::core::ScoreManager score_manager(master_component_->instance_.get()); ProcessBatches(pwt_name_, nwt_name_, iter, &score_manager); Regularize(pwt_name_, nwt_name_, rwt_name); Normalize(pwt_name_, nwt_name_, rwt_name); StoreScores(&score_manager); } Dispose(rwt_name); } void ExecuteOnlineAlgorithm(OnlineBatchesIterator* iter) { const std::string rwt_name = "rwt"; StringIndex nwt_hat_index("nwt_hat"); master_component_->ClearScoreCache(ClearScoreCacheArgs()); while (iter->more()) { float apply_weight = iter->apply_weight(); float decay_weight = iter->decay_weight(); ::artm::core::ScoreManager score_manager(master_component_->instance_.get()); ProcessBatches(pwt_name_, nwt_hat_index, iter, &score_manager); Merge(nwt_name_, decay_weight, nwt_hat_index, apply_weight); Dispose(nwt_hat_index); Regularize(pwt_name_, nwt_name_, rwt_name); Normalize(pwt_name_, nwt_name_, rwt_name); StoreScores(&score_manager); nwt_hat_index++; } // while (iter->more()) iter->reset(); } void ExecuteAsyncOnlineAlgorithm(OnlineBatchesIterator* iter) { /************************************************** 1. Enough batches. i = 0: process(b1, pwt, nwt0) i = 1: process(b2, pwt, nwt1) wait(nwt0) merge(nwt, nwt0) dispose(nwt0) regularize(pwt, nwt, rwt) normalize(nwt, rwt, pwt2) dispose(pwt0) i = 2: process(b3, pwt2, nwt2) wait(nwt1) merge(nwt, nwt1) dispose(nwt1) regularize(pwt2, nwt, rwt) normalize(nwt, rwt, pwt3) dispose(pwt1) i = 3: process(b4, pwt3, nwt3) wait(nwt2) merge(nwt, nwt2) dispose(nwt2) regularize(pwt3, nwt, rwt) normalize(nwt, rwt, pwt4) dispose(pwt2) i = 4: process(b5, pwt4, nwt4) wait(nwt3) merge(nwt, nwt3) dispose(nwt3) regularize(pwt4, nwt, rwt) normalize(nwt, rwt, pwt5) dispose(pwt3) i = 4: wait(nwt4) merge(nwt, nwt4) dispose(nwt4) regularize(pwt5, nwt, rwt) normalize(nwt, rwt, pwt) dispose(pwt4) dispose(pwt5) 2. Not enough batches -- same code works just fine. i = 0: process(b1, pwt, nwt0) i = 1: wait(nwt0) merge(nwt, nwt0) dispose(nwt0) regularize(pwt, nwt, rwt) normalize(nwt, rwt, pwt) dispose(pwt0) dispose(pwt1) **************************************************/ const std::string rwt_name = "rwt"; std::string pwt_active = pwt_name_; StringIndex pwt_index("pwt"); StringIndex nwt_hat_index("nwt_hat"); master_component_->ClearScoreCache(ClearScoreCacheArgs()); int op_id = AsyncProcessBatches(pwt_active, nwt_hat_index, iter); while (true) { bool is_last = !iter->more(); pwt_index++; nwt_hat_index++; float apply_weight = iter->apply_weight(op_id); float decay_weight = iter->decay_weight(op_id); int temp_op_id = op_id; if (!is_last) op_id = AsyncProcessBatches(pwt_active, nwt_hat_index, iter); Await(temp_op_id); Merge(nwt_name_, decay_weight, nwt_hat_index - 1, apply_weight); Dispose(nwt_hat_index - 1); Regularize(pwt_active, nwt_name_, rwt_name); pwt_active = is_last ? pwt_name_ : std::string(pwt_index + 1); Normalize(pwt_active, nwt_name_, rwt_name); Dispose(pwt_index - 1); if (is_last) Dispose(pwt_index); if (is_last) break; } iter->reset(); } private: const MasterModelConfig& master_model_config_; const std::string& pwt_name_; const std::string& nwt_name_; MasterComponent* master_component_; ProcessBatchesArgs process_batches_args_; RegularizeModelArgs regularize_model_args_; std::vector<std::shared_ptr<BatchManager>> async_; void ProcessBatches(std::string pwt, std::string nwt, BatchesIterator* iter, ScoreManager* score_manager) { process_batches_args_.set_pwt_source_name(pwt); process_batches_args_.set_nwt_target_name(nwt); iter->move(&process_batches_args_); BatchManager batch_manager; LOG(INFO) << DescribeMessage(process_batches_args_); master_component_->RequestProcessBatchesImpl(process_batches_args_, &batch_manager, /* async =*/ false, /* score_manager =*/ score_manager, /* theta_matrix*/ nullptr); process_batches_args_.clear_batch_filename(); } int AsyncProcessBatches(std::string pwt, std::string nwt, BatchesIterator* iter) { process_batches_args_.set_pwt_source_name(pwt); process_batches_args_.set_nwt_target_name(nwt); process_batches_args_.set_theta_matrix_type(ThetaMatrixType_None); iter->move(&process_batches_args_); int operation_id = static_cast<int>(async_.size()); async_.push_back(std::make_shared<BatchManager>()); LOG(INFO) << DescribeMessage(process_batches_args_); master_component_->RequestProcessBatchesImpl(process_batches_args_, async_.back().get(), /* async =*/ true, /* score_manager =*/ nullptr, /* theta_matrix*/ nullptr); process_batches_args_.clear_batch_filename(); return operation_id; } void Await(int operation_id) { while (!async_[operation_id]->IsEverythingProcessed()) { boost::this_thread::sleep(boost::posix_time::milliseconds(kIdleLoopFrequency)); } } void Regularize(std::string pwt, std::string nwt, std::string rwt) { if (regularize_model_args_.regularizer_settings_size() > 0) { regularize_model_args_.set_nwt_source_name(nwt); regularize_model_args_.set_pwt_source_name(pwt); regularize_model_args_.set_rwt_target_name(rwt); LOG(INFO) << DescribeMessage(regularize_model_args_); master_component_->RegularizeModel(regularize_model_args_); } } void Normalize(std::string pwt, std::string nwt, std::string rwt) { NormalizeModelArgs normalize_model_args; if (regularize_model_args_.regularizer_settings_size() > 0) normalize_model_args.set_rwt_source_name(rwt); normalize_model_args.set_nwt_source_name(nwt); normalize_model_args.set_pwt_target_name(pwt); LOG(INFO) << DescribeMessage(normalize_model_args); master_component_->NormalizeModel(normalize_model_args); } void StoreScores(::artm::core::ScoreManager* score_manager) { auto config = master_component_->config(); for (auto& score_config : config->score_config()) { ScoreData* score_data = master_component_->instance_->score_tracker()->Add(); score_manager->RequestScore(score_config.name(), score_data); } } void Merge(std::string nwt, double decay_weight, std::string nwt_hat, double apply_weight) { MergeModelArgs merge_model_args; merge_model_args.add_nwt_source_name(nwt); merge_model_args.add_source_weight(decay_weight); merge_model_args.add_nwt_source_name(nwt_hat); merge_model_args.add_source_weight(apply_weight); merge_model_args.set_nwt_target_name(nwt); LOG(INFO) << DescribeMessage(merge_model_args); master_component_->MergeModel(merge_model_args); } void Dispose(std::string model_name) { LOG(INFO) << "DisposeModel " << model_name; master_component_->DisposeModel(model_name); } }; void MasterComponent::FitOnline(const FitOnlineMasterModelArgs& args) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation( "Invalid master_id; use ArtmCreateMasterModel instead of ArtmCreateMasterComponent")); ArtmExecutor artm_executor(*config, this); OnlineBatchesIterator iter(args.batch_filename(), args.batch_weight(), args.update_after(), args.apply_weight(), args.decay_weight()); if (args.async()) { artm_executor.ExecuteAsyncOnlineAlgorithm(&iter); } else { artm_executor.ExecuteOnlineAlgorithm(&iter); } } void MasterComponent::FitOffline(const FitOfflineMasterModelArgs& args) { std::shared_ptr<MasterModelConfig> config = instance_->config(); if (config == nullptr) BOOST_THROW_EXCEPTION(InvalidOperation( "Invalid master_id; use ArtmCreateMasterModel instead of ArtmCreateMasterComponent")); if (args.batch_filename_size() == 0) { std::vector<std::string> batch_names; if (!args.has_batch_folder()) { // Default to processing all in-memory batches batch_names = instance_->batches()->keys(); if (batch_names.empty()) { BOOST_THROW_EXCEPTION(InvalidOperation( "FitOfflineMasterModelArgs.batch_filename is empty. " "Populate this field or provide batches via ArtmImportBatches API")); } } else { for (auto& batch_path : artm::core::Helpers::ListAllBatches(args.batch_folder())) batch_names.push_back(batch_path.string()); if (batch_names.empty()) BOOST_THROW_EXCEPTION(InvalidOperation("No batches found in " + args.batch_folder() + " folder")); } FitOfflineMasterModelArgs* mutable_args = const_cast<FitOfflineMasterModelArgs*>(&args); for (auto& batch_name : batch_names) mutable_args->add_batch_filename(batch_name); FixMessage(mutable_args); } ArtmExecutor artm_executor(*config, this); OfflineBatchesIterator iter(args.batch_filename(), args.batch_weight()); artm_executor.ExecuteOfflineAlgorithm(args.num_collection_passes(), &iter); } } // namespace core } // namespace artm
def suspensions(self): return sum([1 for susp in self.data['susp'] if susp is True])
<filename>enderio-decoration/src/main/java/com/enderio/decoration/common/block/painted/PaintedFenceGateBlock.java package com.enderio.decoration.common.block.painted; import com.enderio.decoration.common.init.DecorBlockEntities; import net.minecraft.core.BlockPos; import net.minecraft.world.level.block.EntityBlock; import net.minecraft.world.level.block.FenceGateBlock; import net.minecraft.world.level.block.entity.BlockEntity; import net.minecraft.world.level.block.state.BlockState; import javax.annotation.Nonnull; import javax.annotation.Nullable; public class PaintedFenceGateBlock extends FenceGateBlock implements EntityBlock { public PaintedFenceGateBlock(Properties properties) { super(properties); } @Nullable @Override public BlockEntity newBlockEntity(@Nonnull BlockPos pos, @Nonnull BlockState state) { return DecorBlockEntities.SINGLE_PAINTED.create(pos, state); } }
// GetRIB writes the contents of the RIBs specified in the filter to msgCh. filter is a map, // keyed by the gRIBI AFTType enumeration, if the value is set to true, the AFT is written // to msgCh, otherwise it is skipped. The contents of the RIB are returned as gRIBI // GetResponse messages which are written to the supplied msgCh. stopCh is a channel that // indicates that the GetRIB method should stop its work and return immediately. // // An error is returned if the RIB cannot be returned. func (r *RIBHolder) GetRIB(filter map[spb.AFTType]bool, msgCh chan *spb.GetResponse, stopCh chan struct{}) error { * we take the NI-level lock: in the incoming master case, the client can ensure that they wait for the Get to complete before writing ==> there is no convergence impact. In the multi-master case (or even a consistency) check case, we impact convergence. * we take a more granular lock, in this case we do not impact convergence for any other entity than that individual entry. The latter is a better choice for a high-performance implementation, but its not clear that we need to worry about this for this implementation *yet*. In the future we should consider a fine-grained per-entry lock. r.mu.RLock() defer r.mu.RUnlock() rewrite ALL to the values that we support. if filter[spb.AFTType_ALL] { filter = map[spb.AFTType]bool{ spb.AFTType_IPV4: true, spb.AFTType_NEXTHOP: true, spb.AFTType_NEXTHOP_GROUP: true, } } if filter[spb.AFTType_IPV4] { for pfx, e := range r.r.Afts.Ipv4Entry { select { case <-stopCh: return nil default: p, err := concreteIPv4Proto(e) if err != nil { return status.Errorf(codes.Internal, "cannot marshal IPv4Entry for %s into GetResponse, %v", pfx, err) } msgCh <- &spb.GetResponse{ Entry: []*spb.AFTEntry{{ NetworkInstance: r.name, Entry: &spb.AFTEntry_Ipv4{ Ipv4: p, }, }}, } } } } if filter[spb.AFTType_NEXTHOP_GROUP] { for index, e := range r.r.Afts.NextHopGroup { select { case <-stopCh: return nil default: p, err := concreteNextHopGroupProto(e) if err != nil { return status.Errorf(codes.Internal, "cannot marshal NextHopGroupEntry for index %d into GetResponse, %v", index, err) } msgCh <- &spb.GetResponse{ Entry: []*spb.AFTEntry{{ NetworkInstance: r.name, Entry: &spb.AFTEntry_NextHopGroup{ NextHopGroup: p, }, }}, } } } } if filter[spb.AFTType_NEXTHOP] { for id, e := range r.r.Afts.NextHop { select { case <-stopCh: return nil default: p, err := concreteNextHopProto(e) if err != nil { return status.Errorf(codes.Internal, "cannot marshal NextHopEntry for ID %d into GetResponse, %v", id, err) } msgCh <- &spb.GetResponse{ Entry: []*spb.AFTEntry{{ NetworkInstance: r.name, Entry: &spb.AFTEntry_NextHop{ NextHop: p, }, }}, } } } } return nil }