content
stringlengths
10
4.9M
<filename>src/formatters/index.ts export { exportAsCSV } from './csv' export { exportAsJSON } from './json'
/* * Copyright © 2021-2022 <NAME>, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package io.cdap.cdap.security.authorization.ldap.role.permission; import com.fasterxml.jackson.annotation.JsonProperty; /** * Enum with Permissions for roles */ public enum RolePermission { // Namespace @JsonProperty("Create Namespace") CREATE_NAMESPACE, @JsonProperty("View Namespace") VIEW_NAMESPACE, @JsonProperty("Modify Namespace") MODIFY_NAMESPACE, @JsonProperty("Delete Namespace") DELETE_NAMESPACE, // Pipeline @JsonProperty("Create Pipeline") CREATE_PIPELINE, @JsonProperty("Deploy Pipeline") DEPLOY_PIPELINE, @JsonProperty("Execute Pipeline") EXECUTE_PIPELINE, @JsonProperty("View Pipeline") VIEW_PIPELINE, @JsonProperty("Preview Pipeline") PREVIEW_PIPELINE, @JsonProperty("Delete Pipeline") DELETE_PIPELINE, @JsonProperty("Modify Pipeline") MODIFY_PIPELINE, // Schedule @JsonProperty("Create Schedule") CREATE_SCHEDULE, @JsonProperty("Change Schedule") CHANGE_SCHEDULE, // Schedule @JsonProperty("Create Triggers") CREATE_TRIGGERS, @JsonProperty("Set Triggers") SET_TRIGGERS, // Schedule @JsonProperty("Create Tag") CREATE_TAG, @JsonProperty("View Tags") VIEW_TAGS, @JsonProperty("Delete Tag") DELETE_TAG, @JsonProperty("View Logs") VIEW_LOGS, @JsonProperty("View Metadata") VIEW_METADATA, // Artifacts @JsonProperty("Deploy Artifacts") DEPLOY_ARTIFACTS, @JsonProperty("Deploy Drivers") DEPLOY_DRIVERS, // Studio @JsonProperty("Use Studio") USE_STUDIO, // Wrangle @JsonProperty("Use Wrangler") USE_WRANGLER, // Compute Profile @JsonProperty("Create Compute Profile") CREATE_COMPUTE_PROFILE, @JsonProperty("View Compute Profile") VIEW_COMPUTE_PROFILE, @JsonProperty("Modify Compute Profile") MODIFY_COMPUTE_PROFILE, @JsonProperty("Delete Compute Profile") DELETE_COMPUTE_PROFILE, // Secret Key @JsonProperty("Manage Secure Key") MANAGE_SECURE_KEY, @JsonProperty("View Secure Key") VIEW_SECURE_KEY, // System Preferences @JsonProperty("Manage System Preferences") MANAGE_SYSTEM_PREFERENCES, @JsonProperty("View System Services") VIEW_SYSTEM_SERVICES, }
import { Component, OnInit } from '@angular/core'; import { ActivatedRoute, NavigationEnd, Router } from '@angular/router'; import {MatDialog} from '@angular/material/dialog'; import { NotifierService } from 'angular-notifier'; import { filter } from 'rxjs/operators'; @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.css'] }) export class AppComponent implements OnInit { busy: boolean; authenticated: boolean; user: string; administrator: boolean; sideopen: boolean; i: number; private notifier: NotifierService; constructor(private router: Router, public dialog: MatDialog, notifier: NotifierService, private route: ActivatedRoute) { this.notifier = notifier; } public showNotification(type: string, message: string): void{ this.notifier.notify(type, message); } ngOnInit(): void { // Called after the constructor, initializing input properties, and the first call to ngOnChanges. // Add 'implements OnInit' to the class. if (localStorage.getItem('user') && localStorage.getItem('jwt')){ this.authenticated = true; this.user = localStorage.getItem('user'); if (localStorage.getItem('role') === 'Admin'){ this.administrator = true; } } } onLogin(): void{ if (localStorage.getItem('jwt') && localStorage.getItem('user')){ this.authenticated = true; this.user = localStorage.getItem('user'); } this.busy = false; if (localStorage.getItem('role') === 'Admin'){ this.administrator = true; this.showNotification('info', 'Autentificat ca administrator'); } } routeOn(ev: any): void{ // console.log(ev); this.busy = true; } routeOff(ev: any): void{ this.busy = false; this.onLogin(); } logout(): void{ localStorage.clear(); this.user = null; this.authenticated = false; this.busy = false; this.administrator = false; this.router.navigate(['/']); } }
package convert import ( "errors" "reflect" ) type ( ActualValue func(*Info) error ActualValueMs map[reflect.Kind]ActualValue ) type ActualValuer interface { AC(reflect.Kind) ActualValue } func (ms ActualValueMs) AC(kind reflect.Kind) ActualValue { if ms[kind] != nil { return ms[kind] } return ms[reflect.Invalid] } func (ms ActualValueMs) Clone() ActualValueMs { c := ActualValueMs{} for kind, actualValuer := range ms { c[kind] = actualValuer } return c } var ( acv ActualValueMs = map[reflect.Kind]ActualValue{} invalidValue = errors.New("目标数据无效") invalidMethod = errors.New("目标函数无效") ) func AcDefaultActualValuer() ActualValuer { return acv }
The only color in the landscape for miles, Salvation Mountain looms like a mirage on the horizon. The three-story, three-decade work of artist Leonard Knight, who died yesterday at the age of 82, is a piece of brightly painted, hand-sculpted California desert, like an impossibly scaled cathedral made from Play-Doh. Knight built his 50-foot tall, 150-foot wide mountain in the desert about an hour east of Palm Springs, near the edge of the Salton Sea, a vast inland ocean created when an engineering failure allowed the Colorado River to flow into a nearby valley. In this sparsely populated basin, Knight began creating a hillside with scriptures and religious affirmations back in 1984. Advertisement After Knight began work on the project, Salvation Mountain quickly became a must-see for tourists making a pilgrimage to the eerie beauty of Salton Sea. An estimated 40 to 100 people stop by per day as part of an itinerary that includes a dying body of water ringed with fish skeletons, vacant resorts dotting the once-posh shoreline, and Slab City, an informal community of free-spirited squatters and seasonal snowbirds. Knight was a squatter himself: The land Salvation Mountain sits on is owned by the state. Salvation Mountain is such a part of Southern California culture that a version made an appearance in Grand Theft Auto V and it had a cameo in the film Into the Wild, where Knight played himself. Advertisement The technicolor, DIY aesthetic often draws comparisons to Watts Towers in South Los Angeles, another "outsider art" project that was largely the work of a single man. Knight's construction methods included stacking haybales like igloos, binding together tree branches with nylon cording, and self-mixing his own adobe clay, which he'd pack into shapes and slather over with paint like frosting on a giant birthday cake. Advertisement According to the Los Angeles Times, Knight grew up in Vermont where he was a "welder, handyman, guitar teacher, painter and body-and-fender man." After having a spiritual epiphany, he decided to devote his life to spreading messages of devotion and prayer. He came to Slab City with a hot-air balloon emblazoned with the words "God Is Love" that he planned to fly across the country. After the heat and sun destroyed his balloon's nylon, he turned his attention to an outcropping of rocks where he planned to paint his message. His first attempt at building the monument collapsed because he had used too much sand in his adobe. Advertisement Knight didn't charge admission to visit (although there is a donation box), instead, fans often left cans of paint to contribute to the project, along with other offerings like brushes and tools, and found objects like trophies, which he'd add to his collections. Salvation Mountain was named a National Folk Art Site by the National Folk Art Society of America in 2010 and several documentaries and television shows have featured Knight's work. Senator Barbara Boxer spoke at length about Knight in a 2002 Congressional address. There's a website created for the project, and a group of volunteers on Facebook who appear to be maintaining the artwork (which I'd think would need to be repainted regularly to save it from the bleaching sun). Advertisement Until a few years ago, Knight would receive visitors in person, invariably wearing a paint-spattered shirt and a wide smile as he gave tours, played guitar, or worked diligently in the searing desert sun. He lived in his truck on the property until he was moved to a convalescent home in San Diego, where he died peacefully, and hopefully well-assured that his message had indeed been received. [Los Angeles Times] Advertisement Photos by: Softly Lit Studios, MythicSeabass, sandwichgirl, bdearth, shindoverse, Jennifer Joy Jameson, hmcharg, kellyludwig, Joe Bielawa Gizmodo's Landmark Status examines the strange and surprising structures that our cities have chosen to protect or save. Discover something interesting that's been landmarked near you? Drop us a tip in the comments.
#include "../../BetterEdit.hpp" #include "../CustomKeybinds/KeybindManager.hpp" #include <InputNode.hpp> #include "KeybindingsLayer.hpp" #include "KeybindListView.hpp" #include "Scrollbar.hpp" #include "KeybindSettingsLayer.hpp" #include "KeymapLayer.hpp" #include "../../utils/BGLabel.hpp" static constexpr const int KBLDELEGATE_TAG = 0x44432; static constexpr const int KBLLIST_TAG = 0x44431; static constexpr const int KBLSCROLLBAR_TAG = 0x44430; static constexpr const int KBLINPUT_TAG = 0x44429; static constexpr const int KBLCANCELSELECT_TAG = 0x44428; static constexpr const int KBLSELECTLABEL_TAG = 0x44427; std::string searchQuery = ""; bool g_bSelectMode = false; Keybind g_obSelectKeybind; std::map<std::string, bool> KeybindingsLayerDelegate::m_mFoldedCategories = std::map<std::string, bool>(); void KeybindingsLayerDelegate::textChanged(CCTextInputNode* input) { if (input->getString() && strlen(input->getString())) searchQuery = input->getString(); else searchQuery = ""; this->m_pLayer->reloadList(); } void KeybindingsLayerDelegate::FLAlert_Clicked(FLAlertLayer*, bool btn2) { if (btn2) { KeybindManager::get()->resetAllToDefaults(); this->m_pLayer->reloadList(); } } KeybindingsLayerDelegate* KeybindingsLayerDelegate::create(KeybindingsLayer_CB* layer) { auto ret = new KeybindingsLayerDelegate; if (ret && ret->init()) { ret->setTag(KBLDELEGATE_TAG); ret->m_pLayer = layer; ret->autorelease(); return ret; } CC_SAFE_DELETE(ret); return nullptr; } bool matchSearchQuery(KeybindType type, KeybindCallback* bind) { if (stringToLower(bind->name).find(stringToLower(searchQuery)) != std::string::npos) return true; std::string keyStr = ""; for (auto const& key : KeybindManager::get()->getKeybindsForCallback(type, bind)) keyStr += key.toString() + ", "; return stringToLower(keyStr).find(stringToLower(searchQuery)) != std::string::npos; } void KeybindingsLayer_CB::reloadList() { auto oldList = as<KeybindListView*>(this->m_pLayer->getChildByTag(KBLLIST_TAG)); auto y = 0.0f; if (oldList) { y = oldList->m_pTableView->getMinY() - oldList->m_pTableView->m_pContentLayer->getPositionY(); oldList->removeFromParent(); } auto winSize = CCDirector::sharedDirector()->getWinSize(); auto delegate = as<KeybindingsLayerDelegate*>(this->getChildByTag(KBLDELEGATE_TAG)); auto arr = CCArray::create(); arr->addObject(new KeybindItem("Gameplay", delegate)); if (!delegate->m_mFoldedCategories["Gameplay"]) for (auto bind : KeybindManager::get()->getCallbacks(kKBPlayLayer)) { if ( !searchQuery.size() || matchSearchQuery(kKBPlayLayer, bind) ) arr->addObject(new KeybindItem( bind, kKBPlayLayer, delegate, g_bSelectMode, g_obSelectKeybind )); } arr->addObject(new KeybindItem("Editor", delegate)); if (!delegate->m_mFoldedCategories["Editor"]) for (auto const& [cat, binds] : KeybindManager::get()->getCallbacksSorted(kKBEditor)) { arr->addObject( new KeybindItem( KeybindManager::get()->getCategoryName(cat).c_str(), delegate ) ); if (!delegate->m_mFoldedCategories[ KeybindManager::get()->getCategoryName(cat) ]) for (auto const& bind : binds) { if (!searchQuery.size() || matchSearchQuery(kKBEditor, bind)) arr->addObject(new KeybindItem( bind, kKBEditor, delegate, g_bSelectMode, g_obSelectKeybind )); } } auto list = KeybindListView::create(arr, 340.0f, 180.0f); list->setPosition(winSize / 2 - CCPoint { 170.0f, 120.0f }); list->setTag(KBLLIST_TAG); y = list->m_pTableView->getMinY() - y; if (y > list->m_pTableView->getMaxY()) y = list->m_pTableView->getMaxY(); list->m_pTableView->m_pContentLayer->setPositionY(y); this->m_pLayer->addChild(list); CATCH_NULL(as<Scrollbar*>(this->m_pLayer->getChildByTag(KBLSCROLLBAR_TAG))) ->setList(list); } void KeybindingsLayer_CB::onResetAll(CCObject*) { FLAlertLayer::create( as<KeybindingsLayerDelegate*>(this->getChildByTag(KBLDELEGATE_TAG)), "Reset Keybinds", "Cancel", "Reset", "Are you sure you want to <cr>reset</c> ALL <cy>keybinds</c> " "to <cl>default</c>?" )->show(); } void KeybindingsLayer_CB::onGlobalSettings(CCObject*) { this->detachInput(); KeybindSettingsLayer::create()->show(); } void KeybindingsLayer_CB::onKeymap(CCObject*) { this->detachInput(); KeymapLayer::create(this)->show(); } void KeybindingsLayer_CB::detachInput() { auto input = as<InputNode*>(this->m_pLayer->getChildByTag(KBLINPUT_TAG)); if (input) { input->getInputNode()->m_pTextField->detachWithIME(); input->getInputNode()->detachWithIME(); } } void KeybindingsLayer_CB::onFinishSelect(CCObject*) { g_bSelectMode = false; auto btn = this->m_pButtonMenu->getChildByTag(KBLCANCELSELECT_TAG); if (btn) btn->setVisible(false); auto lbl = as<BGLabel*>(this->m_pLayer->getChildByTag(KBLSELECTLABEL_TAG)); if (lbl) lbl->setVisible(false); this->reloadList(); this->detachInput(); auto l = KeymapLayer::create(this); l->loadKeybind(g_obSelectKeybind); l->show(); } void KeybindingsLayer_CB::setSelectMode(bool b, Keybind const& kb) { g_bSelectMode = b; g_obSelectKeybind = kb; auto btn = this->m_pButtonMenu->getChildByTag(KBLCANCELSELECT_TAG); if (btn) btn->setVisible(b); auto lbl = as<BGLabel*>(this->m_pLayer->getChildByTag(KBLSELECTLABEL_TAG)); if (lbl) { lbl->setVisible(b); lbl->setString(g_obSelectKeybind.toString().c_str()); } this->reloadList(); } GDMAKE_HOOK(0x153670, "_ZN16KeybindingsLayer10addKeyPairEPKcS1_") CCLabelBMFont* __fastcall KeybindingsLayer_addKeyPair( KeybindingsLayer* self, char* key, char* combo ) { return nullptr; } GDMAKE_HOOK(0x153cc0, "_ZN16KeybindingsLayer10onNextPageEPN7cocos2d8CCObjectE") void __fastcall KeybindingsLayer_onNextPage( KeybindingsLayer* self, edx_t edx, CCObject* pSender ) { self->goToPage(self->m_nCurrentPage + 1); } GDMAKE_HOOK(0x153cd0, "_ZN16KeybindingsLayer10onPrevPageEPN7cocos2d8CCObjectE") void __fastcall KeybindingsLayer_onPrevPage( KeybindingsLayer* self, edx_t edx, CCObject* pSender ) { self->goToPage(self->m_nCurrentPage - 1); } GDMAKE_HOOK(0x153ce0, "_ZN16KeybindingsLayer8goToPageEi") void __fastcall KeybindingsLayer_goToPage( KeybindingsLayer* self, edx_t edx, int page ) { GDMAKE_ORIG_V(self, edx, page); // if (page < 0) // page = 0; // if (page > self->m_nPageCount) // page = self->m_nPageCount; // self->m_nCurrentPage = page; // CCARRAY_FOREACH_B_TYPE(self->m_pPages->allKeys(), key, CCDictElement) { // CCARRAY_FOREACH_B_TYPE(as<CCArray*>(key->getObject()), node, CCNode) // node->setVisible(key->getIntKey() == page); // } } GDMAKE_HOOK(0x152f40, "_ZN16KeybindingsLayer4initEv") bool __fastcall KeybindingsLayer_init(KeybindingsLayer* self) { if (!self->initWithColor({ 0, 0, 0, 105 })) return false; auto winSize = CCDirector::sharedDirector()->getWinSize(); self->m_bNoElasticity = true; self->m_pLayer = CCLayer::create(); self->addChild(self->m_pLayer); auto delegate = KeybindingsLayerDelegate::create(as<KeybindingsLayer_CB*>(self)); self->addChild(delegate); auto bg = CCScale9Sprite::create("GJ_square01.png", { 0.0f, 0.0f, 80.0f, 80.0f }); bg->setContentSize({ 420.0f, 280.0f }); bg->setPosition(winSize / 2); self->m_pLayer->addChild(bg); auto title = CCLabelBMFont::create("Key Bindings", "bigFont.fnt"); title->setPosition(winSize.width / 2, winSize.height / 2 + 140.0f - 24.0f); title->setScale(.8f); self->m_pLayer->addChild(title); auto input = InputNode::create(425.0f, "Search Keybinds"); input->setPosition(title->getPosition() + CCPoint { 0.0f, -35.0f }); input->getInputNode()->setPositionX(input->getInputNode()->getPositionX() - 200.0f); input->setTag(KBLINPUT_TAG); input->setString(searchQuery.c_str()); CCARRAY_FOREACH_B_TYPE( input->getInputNode()->getChildren(), c, CCNode ) c->setAnchorPoint({ .0f, .5f }); input->setScale(.8f); input->getInputNode()->setDelegate(delegate); self->m_pLayer->addChild(input); self->m_pPages = CCDictionary::create(); self->m_pPages->retain(); self->m_pUnused = CCDictionary::create(); self->m_pUnused->retain(); self->m_pButtonMenu = cocos2d::CCMenu::create(); self->m_pLayer->addChild(self->m_pButtonMenu); auto bar = Scrollbar::create(nullptr); bar->setPosition(winSize.width / 2 + 190.0f, winSize.height / 2 - 30.0f); bar->setTag(KBLSCROLLBAR_TAG); self->m_pLayer->addChild(bar, 800); as<KeybindingsLayer_CB*>(self)->reloadList(); self->registerWithTouchDispatcher(); CCDirector::sharedDirector()->getTouchDispatcher()->incrementForcePrio(2); { // auto topGradient = CCSprite::createWithSpriteFrameName("d_gradient_c_01_001.png"); // topGradient->setPosition({ // winSize.width / 2, // winSize.height / 2 + 45.0f // }); // topGradient->setFlipY(true); // topGradient->setScaleX(11.5f); // topGradient->setColor(cc3x(0x953)); // self->m_pLayer->addChild(topGradient); // auto bottomGradient = CCSprite::createWithSpriteFrameName("d_gradient_c_01_001.png"); // bottomGradient->setPosition({ // winSize.width / 2, // winSize.height / 2 - 105.0f // }); // bottomGradient->setScaleX(11.5f); // bottomGradient->setColor(cc3x(0x953)); // self->m_pLayer->addChild(bottomGradient); auto topItem = CCSprite::createWithSpriteFrameName("GJ_commentTop_001.png"); topItem->setPosition({ winSize.width / 2, winSize.height / 2 + 55.0f }); topItem->setZOrder(500); self->m_pLayer->addChild(topItem); auto bottomItem = CCSprite::createWithSpriteFrameName("GJ_commentTop_001.png"); bottomItem->setPosition({ winSize.width / 2, winSize.height / 2 - 115.0f }); bottomItem->setZOrder(500); bottomItem->setFlipY(true); self->m_pLayer->addChild(bottomItem); auto sideItem = CCSprite::createWithSpriteFrameName("GJ_commentSide_001.png"); sideItem->setPosition({ winSize.width / 2 - 173.5f, winSize.height / 2 - 29.0f }); sideItem->setZOrder(500); sideItem->setScaleY(5.0f); self->m_pLayer->addChild(sideItem); auto sideItemRight = CCSprite::createWithSpriteFrameName("GJ_commentSide_001.png"); sideItemRight->setPosition({ winSize.width / 2 + 173.5f, winSize.height / 2 - 29.0f }); sideItemRight->setZOrder(500); sideItemRight->setScaleY(5.0f); sideItemRight->setFlipX(true); self->m_pLayer->addChild(sideItemRight); } auto selectLabel = BGLabel::create("", "goldFont.fnt"); selectLabel->setVisible(false); selectLabel->setTag(KBLSELECTLABEL_TAG); selectLabel->setPosition(winSize.width / 2, winSize.height / 2 + 120.0f); self->m_pLayer->addChild(selectLabel); auto resetBtn = CCMenuItemSpriteExtra::create( CCNodeConstructor<ButtonSprite*>() .fromButtonSprite( "Reset", "GJ_button_05.png", "bigFont.fnt" ) .scale(.6f) .done(), self, menu_selector(KeybindingsLayer_CB::onResetAll) ); resetBtn->setPosition(210.0f - 40.0f, 140.0f - 25.0f); self->m_pButtonMenu->addChild(resetBtn); auto mapBtn = CCMenuItemSpriteExtra::create( CCNodeConstructor<ButtonSprite*>() .fromButtonSprite( "Map", "GJ_button_05.png", "bigFont.fnt" ) .scale(.6f) .done(), self, menu_selector(KeybindingsLayer_CB::onKeymap) ); mapBtn->setPosition(210.0f, - 140.0f); self->m_pButtonMenu->addChild(mapBtn); auto selectBtn = CCMenuItemSpriteExtra::create( CCNodeConstructor<ButtonSprite*>() .fromButtonSprite( "Cancel", "GJ_button_06.png", "bigFont.fnt" ) .scale(.7f) .done(), self, menu_selector(KeybindingsLayer_CB::onFinishSelect) ); selectBtn->setPosition(0.0f, - 140.0f); selectBtn->setVisible(false); selectBtn->setTag(KBLCANCELSELECT_TAG); self->m_pButtonMenu->addChild(selectBtn); auto settingsBtn = CCMenuItemSpriteExtra::create( CCNodeConstructor() .fromFrameName("GJ_optionsBtn_001.png") .scale(.6f) .done(), self, menu_selector(KeybindingsLayer_CB::onGlobalSettings) ); settingsBtn->setPosition(- 210.0f + 5.0f, - 140.0f + 5.0f); self->m_pButtonMenu->addChild(settingsBtn); auto closeBtn = CCMenuItemSpriteExtra::create( CCSprite::createWithSpriteFrameName("GJ_closeBtn_001.png"), self, (SEL_MenuHandler)&KeybindingsLayer::onClose ); closeBtn->setPosition(-210.0f + 5.0f, 140.0f - 5.0f); closeBtn->setSizeMult(1.5f); self->m_pButtonMenu->addChild(closeBtn); MAKE_INFOBUTTON( "Keybinds", "You can <cy>customize</c> all keybinds to whatever " "shortcut you want.\n\n " "A <cg>modifier</c> is a keybind that modifies the " "behaviour of other inputs like scrolling. A modifier " "can be a modifier key alone, while normal keybinds " "can only be a <cl>key</c> or a <cl>key</c> + <cg>modifier</c>.\n\n " "<cp>Repeating</c> means that when you hold down a shortcut, " "it'll start repeating at the set <cr>interval</c> after " "the set <cb>start</c> time.", .65f, 210.0f - 90.0f, 140.0f - 25.0f, self->m_pButtonMenu ); self->setKeypadEnabled(true); self->setTouchEnabled(true); return true; }
Learning complex questions from the input A central question in language acquisition is how children master sentence types that they have seldom, if ever, heard. Here we report the findings of a preregistered, randomized, single-blind intervention study designed to test the prediction that, for one such sentence type, complex questions (e.g., Is the crocodile who’s hot eating?), children could combine schemas learned, on the basis of the input, for complex noun phrases (the who’s ) and simple questions (Is ing?) to yield a complex-question schema (Is who’s ACTIONing?). To investigate this possibility, 122 children aged 4;2 to 6;8 years (M=5;6, SD= 7.7 months) were trained on simple questions (e.g., Is the bird cleaning?) and either (Experimental group, N=61) complex noun phrases (e.g., the bird who’s sad) or (Control group, N=61) matched simple noun phrases (e.g., the sad bird). On most measures, the two groups did not differ on their ability to produce novel complex questions at test. However, the Experimental group did show some evidence of generalizing a particular complex NP schema (the who’s as opposed to the that’s ) from training to test; a finding that is potentially compatible with constructivist, generativist and task-based explanations.
/** * Inner class that extends the AbstractTreeTableModel */ class NetworkTreeTableModel extends AbstractTreeTableModel { String[] columns = { "Network", "Nodes", "Edges" }; Class[] columns_class = { TreeTableModel.class, String.class, String.class }; public NetworkTreeTableModel(Object root) { super(root); } public Object getChild(Object parent, int index) { Enumeration tree_node_enum = ((DefaultMutableTreeNode) getRoot()) .breadthFirstEnumeration(); while (tree_node_enum.hasMoreElements()) { DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree_node_enum .nextElement(); if (node == parent) { return node.getChildAt(index); } } return null; } public int getChildCount(Object parent) { Enumeration tree_node_enum = ((DefaultMutableTreeNode) getRoot()) .breadthFirstEnumeration(); while (tree_node_enum.hasMoreElements()) { DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree_node_enum .nextElement(); if (node == parent) { return node.getChildCount(); } } return 0; } public int getColumnCount() { return columns.length; } public String getColumnName(int column) { return columns[column]; } public Class getColumnClass(int column) { return columns_class[column]; } public Object getValueAt(Object node, int column) { if (column == 0) return ((DefaultMutableTreeNode) node).getUserObject(); GraphView net = (((ViewTreeNode) node).getView()); String s = ""; if (column == 1) { s += "(" + net.getNodeViewCount() + ")"; s += "(" + net.getSelectedNodes().size() + ")"; } else if (column == 2) { s += "(" + net.getEdgeViewCount() + ")"; s += "(" + net.getSelectedEdges().size() + ")"; } return s; } }
GOLD Coast academy graduate Brad Scheer has penned a one-year contract extension, tying him to the Suns until at least the end of 2019. The 18-year-old, who was recruited with pick No.67 in last year's NAB AFL Draft, spent most of his junior career playing in the midfield but is eyeing a regular spot in the forward line this season. Scheer said he was determined to make a name for himself at the Suns. "I'm thrilled to have re-signed with the Suns, I have been part of the club for the past five years as a member of the talent academy," Scheer told goldcoastfc.com.au on Wednesday night. "Personally, it was a straightforward decision. I didn't need to think twice, and couldn't wait to sign to stay on the Gold Coast." Scheer did not take part in the JLT Community Series after contracting a bout of pneumonia ahead of the first pre-season hit-out against the Brisbane Lions last month. The youngster will make his first appearance with the Suns in this Saturday's NEAFL practice match against Aspley. "I just want to get my fitness and things back," Scheer said. "I don't think right now I'm fit enough to play at that senior level. (I've) just been struck down with illness so I just want to work my way in the NEAFL team and hopefully get a gig sometime during the year." Scheer showed exciting signs during his time in the Suns' academy and averaged 25 possessions in last year's NAB AFL Under-18 Championships.
// Print some stuff at the end. public void complete() { if (raw || env.getBoolean("quiet")) return; if (accepted == 0) { throw new RuntimeException( "0 of "+submitted+" jobs successfully submitted"); } else { System.out.println("Success: "+ accepted+" of "+submitted+" jobs successfully submitted"); } }
#include <stdio.h> int main(int argc,char* arv[]){ int T; scanf("%d",&T); while(T--) { int A,B,C,N; scanf("%d%d%d%d",&A,&B,&C,&N); int S=A+B+C+N; puts(S%3!=0||A>S/3||B>S/3||C>S/3?"NO":"YES"); } return 0; }
//@HEADER // ************************************************************************ // // Kokkos v. 4.0 // Copyright (2022) National Technology & Engineering // Solutions of Sandia, LLC (NTESS). // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions. // See https://kokkos.org/LICENSE for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //@HEADER #include "KokkosSparse_coo2crs.hpp" #include "KokkosSparse_crs2coo.hpp" #include "KokkosKernels_TestUtils.hpp" namespace Test { template <class CrsType, class RowType, class ColType, class DataType> CrsType vanilla_coo2crs(size_t m, size_t n, RowType row, ColType col, DataType data) { using RowIndexType = typename RowType::value_type; using ColIndexType = typename ColType::value_type; using ValueType = typename DataType::value_type; std::unordered_map<RowIndexType, std::unordered_map<ColIndexType, ValueType> *> umap; int nnz = 0; for (uint64_t i = 0; i < data.extent(0); i++) { auto r = row(i); auto c = col(i); auto d = data(i); if (r >= 0 && c >= 0) { if (umap.find(r) != umap.end()) { // exists auto my_row = umap.at(r); if (my_row->find(c) != my_row->end()) my_row->at(c) += d; else { my_row->insert(std::make_pair(c, d)); nnz++; } } else { // create a new row. auto new_row = new std::unordered_map<ColIndexType, ValueType>(); umap.insert(std::make_pair(r, new_row)); new_row->insert(std::make_pair(c, d)); nnz++; } } } typename CrsType::row_map_type::non_const_type row_map("vanilla_row_map", m + 1); typename CrsType::values_type values("vanilla_values", nnz); typename CrsType::staticcrsgraph_type::entries_type col_ids("vanilla_col_ids", nnz); typename CrsType::row_map_type::non_const_type::HostMirror row_map_h = Kokkos::create_mirror_view(row_map); typename CrsType::values_type::HostMirror values_h = Kokkos::create_mirror_view(values); typename CrsType::staticcrsgraph_type::entries_type::HostMirror col_ids_h = Kokkos::create_mirror_view(col_ids); int row_len = 0; for (uint64_t i = 0; i < m; i++) { if (umap.find(i) != umap.end()) row_len += umap.at(i)->size(); row_map_h(i + 1) = row_len; } for (uint64_t i = 0; i < m; i++) { if (umap.find(i) == umap.end()) // Fully sparse row continue; auto row_start = row_map_h(i); auto row_end = row_map_h(i + 1); auto my_row = umap.at(i); auto iter = my_row->begin(); for (auto j = row_start; j < row_end; j++, iter++) { col_ids_h(j) = iter->first; values_h(j) = iter->second; } delete my_row; } Kokkos::deep_copy(row_map, row_map_h); Kokkos::deep_copy(col_ids, col_ids_h); Kokkos::deep_copy(values, values_h); return CrsType("vanilla_coo2csr", m, n, nnz, values, row_map, col_ids); } template <class CrsType, class RowType, class ColType, class DataType> void check_crs_matrix(CrsType crsMat, RowType row, ColType col, DataType data, std::string failure_info = "no failure information!") { using value_type = typename DataType::value_type; using ats = Kokkos::ArithTraits<value_type>; // Copy coo to host typename RowType::HostMirror row_h = Kokkos::create_mirror_view(row); Kokkos::deep_copy(row_h, row); typename ColType::HostMirror col_h = Kokkos::create_mirror_view(col); Kokkos::deep_copy(col_h, col); typename DataType::HostMirror data_h = Kokkos::create_mirror_view(data); Kokkos::deep_copy(data_h, data); auto crsMatRef = vanilla_coo2crs<CrsType, typename RowType::HostMirror, typename ColType::HostMirror, typename DataType::HostMirror>( crsMat.numRows(), crsMat.numCols(), row_h, col_h, data_h); auto crs_col_ids_ref_d = crsMatRef.graph.entries; auto crs_row_map_ref_d = crsMatRef.graph.row_map; auto crs_vals_ref_d = crsMatRef.values; using ViewTypeCrsColIdsRef = decltype(crs_col_ids_ref_d); using ViewTypeCrsRowMapRef = decltype(crs_row_map_ref_d); using ViewTypeCrsValsRef = decltype(crs_vals_ref_d); // Copy crs to host typename ViewTypeCrsColIdsRef::HostMirror crs_col_ids_ref = Kokkos::create_mirror_view(crs_col_ids_ref_d); Kokkos::deep_copy(crs_col_ids_ref, crs_col_ids_ref_d); typename ViewTypeCrsRowMapRef::HostMirror crs_row_map_ref = Kokkos::create_mirror_view(crs_row_map_ref_d); Kokkos::deep_copy(crs_row_map_ref, crs_row_map_ref_d); typename ViewTypeCrsValsRef::HostMirror crs_vals_ref = Kokkos::create_mirror_view(crs_vals_ref_d); Kokkos::deep_copy(crs_vals_ref, crs_vals_ref_d); auto crs_col_ids_d = crsMat.graph.entries; auto crs_row_map_d = crsMat.graph.row_map; auto crs_vals_d = crsMat.values; using ViewTypeCrsColIds = decltype(crs_col_ids_d); using ViewTypeCrsRowMap = decltype(crs_row_map_d); using ViewTypeCrsVals = decltype(crs_vals_d); // Copy crs to host typename ViewTypeCrsColIds::HostMirror crs_col_ids = Kokkos::create_mirror_view(crs_col_ids_d); Kokkos::deep_copy(crs_col_ids, crs_col_ids_d); typename ViewTypeCrsRowMap::HostMirror crs_row_map = Kokkos::create_mirror_view(crs_row_map_d); Kokkos::deep_copy(crs_row_map, crs_row_map_d); typename ViewTypeCrsVals::HostMirror crs_vals = Kokkos::create_mirror_view(crs_vals_d); Kokkos::deep_copy(crs_vals, crs_vals_d); Kokkos::fence(); ASSERT_EQ(crsMatRef.nnz(), crsMat.nnz()) << failure_info; for (int i = 0; i < crsMatRef.numRows(); i++) { ASSERT_EQ(crs_row_map_ref(i), crs_row_map(i)) << "crs_row_map_ref(" << i << " = " << crs_row_map_ref(i) << " != " << "crs_row_map(" << i << " = " << crs_row_map(i) << " -- " << failure_info; } for (int i = 0; i < crsMatRef.numRows(); ++i) { auto row_start_ref = crs_row_map_ref(i); auto row_stop_ref = crs_row_map_ref(i + 1); auto row_len_ref = row_stop_ref - row_start_ref; auto row_start = crs_row_map(i); auto row_len = crs_row_map(i + 1) - row_start; ASSERT_EQ(row_start_ref, row_start); ASSERT_EQ(row_len_ref, row_len); for (auto j = row_start_ref; j < row_stop_ref; ++j) { // Look for the corresponding col_id auto col_id_ref = crs_col_ids_ref(j); std::string fail_msg = "row: " + std::to_string(i) + ", crs_col_ids_ref(" + std::to_string(j) + ") = " + std::to_string(col_id_ref); auto k = row_start_ref; for (; k < row_stop_ref; ++k) { if (crs_col_ids(k) == col_id_ref) break; } if (k == row_stop_ref) FAIL() << fail_msg << " not found in crs_col_ids!" << failure_info; // NOTE: ASSERT_EQ doesn't work -- values may be summed in different // orders We sum at most m x n values. auto eps = crsMatRef.numCols() * crsMatRef.numRows() * 10e1 * ats::epsilon(); EXPECT_NEAR_KK(crs_vals_ref(j), crs_vals(k), eps, fail_msg + " mismatched values!" + failure_info); } } } template <class ScalarType, class LayoutType, class ExeSpaceType> void doCoo2Crs(size_t m, size_t n, ScalarType min_val, ScalarType max_val) { RandCooMat<ScalarType, LayoutType, ExeSpaceType> cooMat(m, n, m * n, min_val, max_val); auto randRow = cooMat.get_row(); auto randCol = cooMat.get_col(); auto randData = cooMat.get_data(); std::string failure_info = "\nBegin arguments for above failure...\n" + cooMat.info + "scalar: " + std::string(typeid(ScalarType).name()) + "\n" + "layout: " + std::string(typeid(LayoutType).name()) + "\n" + "m: " + std::to_string(m) + ", n: " + std::to_string(n) + "\n...end arguments for above failure.\n"; auto crsMat = KokkosSparse::coo2crs(m, n, randRow, randCol, randData); check_crs_matrix(crsMat, randRow, randCol, randData, failure_info); } template <class LayoutType, class ExeSpaceType> void doAllScalarsCoo2Crs(size_t m, size_t n, int min, int max) { doCoo2Crs<float, LayoutType, ExeSpaceType>(m, n, min, max); doCoo2Crs<double, LayoutType, ExeSpaceType>(m, n, min, max); doCoo2Crs<Kokkos::complex<float>, LayoutType, ExeSpaceType>(m, n, min, max); doCoo2Crs<Kokkos::complex<double>, LayoutType, ExeSpaceType>(m, n, min, max); } template <class ExeSpaceType> void doAllLayoutsCoo2Crs(size_t m, size_t n, int min, int max) { doAllScalarsCoo2Crs<Kokkos::LayoutLeft, ExeSpaceType>(m, n, min, max); doAllScalarsCoo2Crs<Kokkos::LayoutRight, ExeSpaceType>(m, n, min, max); } template <class ExeSpaceType> void doAllCoo2Crs(size_t m, size_t n) { int min = 1, max = 10; doAllLayoutsCoo2Crs<ExeSpaceType>(m, n, min, max); } TEST_F(TestCategory, sparse_coo2crs) { uint64_t ticks = std::chrono::high_resolution_clock::now().time_since_epoch().count() % UINT32_MAX; std::srand(ticks); doAllCoo2Crs<TestExecSpace>(0, 0); // Square cases for (size_t i = 1; i < 256; i *= 4) { size_t dim = (std::rand() % 511) + 1; doAllCoo2Crs<TestExecSpace>(dim, dim); } // Non-square cases for (size_t i = 1; i < 256; i *= 4) { size_t m = (std::rand() % 511) + 1; size_t n = (std::rand() % 511) + 1; while (n == m) n = (std::rand() % 511) + 1; doAllCoo2Crs<TestExecSpace>(m, n); } RandCooMat<double, Kokkos::LayoutRight, TestExecSpace> cooMat(2, 2, 2 * 2, 10, 10); auto crsMatrix = KokkosSparse::coo2crs(2, 2, cooMat.get_row(), cooMat.get_col(), cooMat.get_data()); auto cooMatrix = KokkosSparse::crs2coo(crsMatrix); check_crs_matrix(crsMatrix, cooMatrix.row(), cooMatrix.col(), cooMatrix.data()); } TEST_F(TestCategory, sparse_coo2crs_staticMatrix_edgeCases) { int m = 4; int n = 4; long long staticRow[16]{0, 1, 3, 2, 3, 2, 2, 2, 0, 0, 0, 1, 2, 0, 3, 0}; long long staticCol[16]{1, 1, 2, 3, 3, 2, 3, 2, 0, 0, 1, 3, 1, 2, 0, 0}; float staticData[16]{7.28411, 8.17991, 8.84304, 5.01788, 9.85646, 5.79404, 8.42014, 1.90238, 8.24195, 4.39955, 3.2637, 5.4546, 6.51895, 8.09302, 9.36294, 3.44206}; Kokkos::View<long long *, TestExecSpace> row("coo row", 16); Kokkos::View<long long *, TestExecSpace> col("coo col", 16); Kokkos::View<float *, TestExecSpace> data("coo data", 16); typename Kokkos::View<long long *, TestExecSpace>::HostMirror row_h = Kokkos::create_mirror_view(row); typename Kokkos::View<long long *, TestExecSpace>::HostMirror col_h = Kokkos::create_mirror_view(col); typename Kokkos::View<float *, TestExecSpace>::HostMirror data_h = Kokkos::create_mirror_view(data); for (int i = 0; i < 16; i++) { row_h(i) = staticRow[i]; col_h(i) = staticCol[i]; data_h(i) = staticData[i]; } Kokkos::deep_copy(row, row_h); Kokkos::deep_copy(col, col_h); Kokkos::deep_copy(data, data_h); // Even partitions with multiple threads auto crsMatTs4 = KokkosSparse::coo2crs(m, n, row, col, data); check_crs_matrix(crsMatTs4, row_h, col_h, data_h); // Even partitions, single thread, fully sparse row long long staticRowTs1[16]{0, 3, 0, 2, 2, 3, 0, 3, 2, 0, 0, 0, 0, 3, 3, 0}; long long staticColTs1[16]{3, 1, 3, 1, 2, 2, 1, 1, 2, 3, 3, 1, 1, 0, 0, 0}; float staticDataTs1[16]{6.1355, 6.53989, 8.58559, 6.37476, 4.18964, 2.41146, 1.82177, 1.4249, 1.52659, 5.50521, 8.0484, 3.98874, 6.74709, 3.35072, 7.81944, 5.83494}; for (int i = 0; i < 16; i++) { row_h(i) = staticRowTs1[i]; col_h(i) = staticColTs1[i]; data_h(i) = staticDataTs1[i]; } Kokkos::deep_copy(row, row_h); Kokkos::deep_copy(col, col_h); Kokkos::deep_copy(data, data_h); auto crsMatTs1 = KokkosSparse::coo2crs(m, n, row, col, data); check_crs_matrix(crsMatTs1, row_h, col_h, data_h); // Fully sparse for (int i = 0; i < 16; i++) { row_h(i) = -staticRowTs1[i]; col_h(i) = -staticColTs1[i]; } Kokkos::deep_copy(row, row_h); Kokkos::deep_copy(col, col_h); auto crsMatFsTs1 = KokkosSparse::coo2crs(m, n, row, col, data); check_crs_matrix(crsMatFsTs1, row_h, col_h, data); } } // namespace Test
import java.util.*; import java.io.*; import java.math.*; public class Main3 { public static boolean bg = false; public static void main(String[] args) throws Exception { BR in = new BR(); //ST in = new ST(); //LR in = new LR(); int n1 = ri(in.nx()); char[] l1 = in.nx().toCharArray(); int n2 = ri(in.nx()); IntL[] m1 = new IntL[26]; for (int i = 0; i < 26; i++){ m1[i] = new IntL(); } for (int i = 0; i < n2; i++){ int k1 = ri(in.nx()); char f1 = in.nx().charAt(0); m1[f1 - 'a'].add(k1); } IntL[] m2 = new IntL[26]; for (int i = 0; i < 26; i++){ m2[i] = new IntL(); } for (int i = 0;i<n1;i++){ for (int j = 0;j<l1.length; j++){ int id = i*l1.length+j; m2[l1[j]-'a'].add(id); } } for (int i = 0; i < 26; i++){ Shuffle.ints(m2[i].l1, 0, m2[i].size-1); } Tree<Integer>[] m3 = new Tree[26]; for (int i = 0; i < 26; i++){ m3[i] = new Tree(); for (int j = 0;j<m2[i].size;j++){ m3[i].add(m2[i].l1[j]); } } if (bg) px(m3); HashSet<Integer> del = new HashSet(); for (int i = 0; i < 26; i++){ for (int e: m1[i]){ int cur = m3[i].cont.get(m3[i].findx(e)); del.add(cur); m3[i].remove(cur); } } if (bg) px(m3); StringBuilder fin = new StringBuilder(); for (int i = 0;i<n1;i++){ for (int j = 0;j<l1.length; j++){ int id = i*l1.length+j; if (!del.contains(id)){ fin.append(l1[j]); } } } pn(fin); } private static void pn(Object o1){ System.out.println(o1); } private static void px(Object... o1) { System.err.println(Arrays.deepToString(o1)); } private static int ri(String k1){ return Integer.parseInt(k1); } private static class Shuffle { private static Random r1 = new Random(); public static void ints(int[] l1, int n1, int n2){ for (int i = 0;i<n2-n1+1;i++){ int k1 = i + r1.nextInt(n2-n1+1-i); int temp = l1[i+n1]; l1[i+n1] = l1[k1+n1]; l1[k1+n1] = temp; } } } private static class BR { BufferedReader k1 = null; StringTokenizer k2 = null; public BR(){ k1 = new BufferedReader(new InputStreamReader(System.in)); } public String nx() throws Exception { for (;;){ if (k2 == null){ String temp = k1.readLine(); if (temp == null) return null; k2 = new StringTokenizer(temp); } else if (!k2.hasMoreTokens()){ String temp = k1.readLine(); if (temp == null) return null; k2 = new StringTokenizer(temp); } else break; } return k2.nextToken(); } } private static class IntL implements Iterable<Integer>{ public int[] l1 = null; public int size = 0;; public IntL(int initialCapacity) { this.l1 = new int[initialCapacity]; } public IntL() { this(5); } public void add(int e) { if (size == l1.length){ l1 = Arrays.copyOf(l1, l1.length*3/2+1); } l1[size++] = e; } @Override public String toString(){ StringBuilder fin = new StringBuilder(); fin.append('{'); for (int i=0;i<size;i++){ if(i != 0) fin.append(", "); fin.append(l1[i]); } fin.append('}'); return fin.toString(); } @Override public Iterator<Integer> iterator() { return new It(); } public class It implements Iterator<Integer> { int ptr = 0; @Override public boolean hasNext() { if (ptr< size) return true; return false; } @Override public Integer next() { int fin = l1[ptr]; ptr++; return fin; } @Override public void remove() { } } } private static class Tree<E extends Comparable<E> > { public ArrayList<E> cont = new ArrayList(); public ArrayList<Integer> left = new ArrayList(); public ArrayList<Integer> right = new ArrayList(); public IntL no = new IntL(); public IntL child = new IntL(); public int size = 0; public void update(int id, int n1){ int old = no.l1[id]; int niu = old + n1; if (niu < 0) niu = 0; int diff = niu - old; no.l1[id] = niu; size += diff; for (int e: finds){ child.l1[e] += diff; } } public int newNode(E k1, int n1){ cont.add(k1); left.add(-1); right.add(-1); no.add(n1); child.add(n1); size += n1; for (int e: finds){ child.l1[e] += n1; } return cont.size()-1; } public ArrayList<Integer> finds = null; public int find(E a1){ finds = new ArrayList(); if (cont.size() == 0) return -1; return recfind(a1,0); } public int recfind(E a1, int i1){ finds.add(i1); E comp = cont.get(i1); int val = a1.compareTo(comp); if (val == 0) return i1; else if (val < 0){ int k1 = left.get(i1); if (k1 == -1) return -1; else return recfind(a1,k1); } else { int k1 = right.get(i1); if (k1 == -1) return -1; else return recfind(a1,k1); } } public int findx(int n1){ if (n1 > size) return -1; return recfindx(n1,0,0); } public int recfindx(int n1, int i1, int surplus){ int comp = child.l1[i1]; if (right.get(i1) != -1) comp -= child.l1[right.get(i1)]; int upper = surplus + comp; int lower = upper - no.l1[i1]+1; if (n1<=upper && n1>=lower) return i1; if (n1 <= upper) return recfindx(n1, left.get(i1), surplus); else return recfindx(n1, right.get(i1), upper); } public void add(E a1){ add(a1, 1); } public void remove(E a1){ add(a1, -1); } public void add (E a1, int n1){ find(a1); if (finds.size() == 0) newNode(a1, n1); else { int id = finds.get(finds.size()-1); E last = cont.get(id); int k1 = a1.compareTo(last); if (k1 == 0) update(id,n1); else if (k1 < 0){ if (n1 > 0) left.set(id, newNode(a1, n1)); } else { if (n1 > 0) right.set(id, newNode(a1, n1)); } } } @Override public String toString(){ StringBuilder fin = new StringBuilder(); fin.append('{'); for (int i = 0;i < size; i++){ if (i != 0) fin.append(", "); fin.append(cont.get(findx(i+1))); } fin.append('}'); return fin.toString(); } } }
/** * offload_enqueue - add an offload packet to an SGE offload receive queue * @q: the SGE response queue * @skb: the packet * * Add a new offload packet to an SGE response queue's offload packet * queue. If the packet is the first on the queue it schedules the RX * softirq to process the queue. */ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) { skb->next = skb->prev = NULL; if (q->rx_tail) q->rx_tail->next = skb; else { struct sge_qset *qs = rspq_to_qset(q); if (__netif_rx_schedule_prep(qs->netdev)) __netif_rx_schedule(qs->netdev); q->rx_head = skb; } q->rx_tail = skb; }
Brazil as an Emerging Donor in Africa’s Agricultural Sector: Comparing Two Projects This article approaches the recent phenomenon of South–South cooperation with a focus on Brazil’s recent interventions in the agricultural sector. It reports on the case of Brazil as a provider of technical cooperation for the African continent, based on the experience of its national agricultural research institute, best known by its acronym Brazilian Agricultural Research Corporation (EMBRAPA). The first section provides a historical genealogy of South–South cooperation and the experience of Brazil. The second compares two projects that EMBRAPA is currently implementing in Africa. Based on this historical and comparative discussion, it concludes by assessing the potential of South–South cooperation for re-politicizing international development.
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. /// Operation shape for `CreateDeliveryStream`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`create_delivery_stream`](crate::client::Client::create_delivery_stream). /// /// See [`crate::client::fluent_builders::CreateDeliveryStream`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct CreateDeliveryStream { _private: (), } impl CreateDeliveryStream { /// Creates a new builder-style object to manufacture [`CreateDeliveryStreamInput`](crate::input::CreateDeliveryStreamInput) pub fn builder() -> crate::input::create_delivery_stream_input::Builder { crate::input::create_delivery_stream_input::Builder::default() } /// Creates a new `CreateDeliveryStream` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for CreateDeliveryStream { type Output = std::result::Result< crate::output::CreateDeliveryStreamOutput, crate::error::CreateDeliveryStreamError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_create_delivery_stream_error(response) } else { crate::operation_deser::parse_create_delivery_stream_response(response) } } } /// Operation shape for `DeleteDeliveryStream`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`delete_delivery_stream`](crate::client::Client::delete_delivery_stream). /// /// See [`crate::client::fluent_builders::DeleteDeliveryStream`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DeleteDeliveryStream { _private: (), } impl DeleteDeliveryStream { /// Creates a new builder-style object to manufacture [`DeleteDeliveryStreamInput`](crate::input::DeleteDeliveryStreamInput) pub fn builder() -> crate::input::delete_delivery_stream_input::Builder { crate::input::delete_delivery_stream_input::Builder::default() } /// Creates a new `DeleteDeliveryStream` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for DeleteDeliveryStream { type Output = std::result::Result< crate::output::DeleteDeliveryStreamOutput, crate::error::DeleteDeliveryStreamError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_delete_delivery_stream_error(response) } else { crate::operation_deser::parse_delete_delivery_stream_response(response) } } } /// Operation shape for `DescribeDeliveryStream`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`describe_delivery_stream`](crate::client::Client::describe_delivery_stream). /// /// See [`crate::client::fluent_builders::DescribeDeliveryStream`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DescribeDeliveryStream { _private: (), } impl DescribeDeliveryStream { /// Creates a new builder-style object to manufacture [`DescribeDeliveryStreamInput`](crate::input::DescribeDeliveryStreamInput) pub fn builder() -> crate::input::describe_delivery_stream_input::Builder { crate::input::describe_delivery_stream_input::Builder::default() } /// Creates a new `DescribeDeliveryStream` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for DescribeDeliveryStream { type Output = std::result::Result< crate::output::DescribeDeliveryStreamOutput, crate::error::DescribeDeliveryStreamError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_describe_delivery_stream_error(response) } else { crate::operation_deser::parse_describe_delivery_stream_response(response) } } } /// Operation shape for `ListDeliveryStreams`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`list_delivery_streams`](crate::client::Client::list_delivery_streams). /// /// See [`crate::client::fluent_builders::ListDeliveryStreams`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListDeliveryStreams { _private: (), } impl ListDeliveryStreams { /// Creates a new builder-style object to manufacture [`ListDeliveryStreamsInput`](crate::input::ListDeliveryStreamsInput) pub fn builder() -> crate::input::list_delivery_streams_input::Builder { crate::input::list_delivery_streams_input::Builder::default() } /// Creates a new `ListDeliveryStreams` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for ListDeliveryStreams { type Output = std::result::Result< crate::output::ListDeliveryStreamsOutput, crate::error::ListDeliveryStreamsError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_delivery_streams_error(response) } else { crate::operation_deser::parse_list_delivery_streams_response(response) } } } /// Operation shape for `ListTagsForDeliveryStream`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`list_tags_for_delivery_stream`](crate::client::Client::list_tags_for_delivery_stream). /// /// See [`crate::client::fluent_builders::ListTagsForDeliveryStream`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListTagsForDeliveryStream { _private: (), } impl ListTagsForDeliveryStream { /// Creates a new builder-style object to manufacture [`ListTagsForDeliveryStreamInput`](crate::input::ListTagsForDeliveryStreamInput) pub fn builder() -> crate::input::list_tags_for_delivery_stream_input::Builder { crate::input::list_tags_for_delivery_stream_input::Builder::default() } /// Creates a new `ListTagsForDeliveryStream` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for ListTagsForDeliveryStream { type Output = std::result::Result< crate::output::ListTagsForDeliveryStreamOutput, crate::error::ListTagsForDeliveryStreamError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_tags_for_delivery_stream_error(response) } else { crate::operation_deser::parse_list_tags_for_delivery_stream_response(response) } } } /// Operation shape for `PutRecord`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`put_record`](crate::client::Client::put_record). /// /// See [`crate::client::fluent_builders::PutRecord`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct PutRecord { _private: (), } impl PutRecord { /// Creates a new builder-style object to manufacture [`PutRecordInput`](crate::input::PutRecordInput) pub fn builder() -> crate::input::put_record_input::Builder { crate::input::put_record_input::Builder::default() } /// Creates a new `PutRecord` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for PutRecord { type Output = std::result::Result<crate::output::PutRecordOutput, crate::error::PutRecordError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_put_record_error(response) } else { crate::operation_deser::parse_put_record_response(response) } } } /// Operation shape for `PutRecordBatch`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`put_record_batch`](crate::client::Client::put_record_batch). /// /// See [`crate::client::fluent_builders::PutRecordBatch`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct PutRecordBatch { _private: (), } impl PutRecordBatch { /// Creates a new builder-style object to manufacture [`PutRecordBatchInput`](crate::input::PutRecordBatchInput) pub fn builder() -> crate::input::put_record_batch_input::Builder { crate::input::put_record_batch_input::Builder::default() } /// Creates a new `PutRecordBatch` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for PutRecordBatch { type Output = std::result::Result<crate::output::PutRecordBatchOutput, crate::error::PutRecordBatchError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_put_record_batch_error(response) } else { crate::operation_deser::parse_put_record_batch_response(response) } } } /// Operation shape for `StartDeliveryStreamEncryption`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`start_delivery_stream_encryption`](crate::client::Client::start_delivery_stream_encryption). /// /// See [`crate::client::fluent_builders::StartDeliveryStreamEncryption`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct StartDeliveryStreamEncryption { _private: (), } impl StartDeliveryStreamEncryption { /// Creates a new builder-style object to manufacture [`StartDeliveryStreamEncryptionInput`](crate::input::StartDeliveryStreamEncryptionInput) pub fn builder() -> crate::input::start_delivery_stream_encryption_input::Builder { crate::input::start_delivery_stream_encryption_input::Builder::default() } /// Creates a new `StartDeliveryStreamEncryption` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for StartDeliveryStreamEncryption { type Output = std::result::Result< crate::output::StartDeliveryStreamEncryptionOutput, crate::error::StartDeliveryStreamEncryptionError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_start_delivery_stream_encryption_error(response) } else { crate::operation_deser::parse_start_delivery_stream_encryption_response(response) } } } /// Operation shape for `StopDeliveryStreamEncryption`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`stop_delivery_stream_encryption`](crate::client::Client::stop_delivery_stream_encryption). /// /// See [`crate::client::fluent_builders::StopDeliveryStreamEncryption`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct StopDeliveryStreamEncryption { _private: (), } impl StopDeliveryStreamEncryption { /// Creates a new builder-style object to manufacture [`StopDeliveryStreamEncryptionInput`](crate::input::StopDeliveryStreamEncryptionInput) pub fn builder() -> crate::input::stop_delivery_stream_encryption_input::Builder { crate::input::stop_delivery_stream_encryption_input::Builder::default() } /// Creates a new `StopDeliveryStreamEncryption` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for StopDeliveryStreamEncryption { type Output = std::result::Result< crate::output::StopDeliveryStreamEncryptionOutput, crate::error::StopDeliveryStreamEncryptionError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_stop_delivery_stream_encryption_error(response) } else { crate::operation_deser::parse_stop_delivery_stream_encryption_response(response) } } } /// Operation shape for `TagDeliveryStream`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`tag_delivery_stream`](crate::client::Client::tag_delivery_stream). /// /// See [`crate::client::fluent_builders::TagDeliveryStream`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct TagDeliveryStream { _private: (), } impl TagDeliveryStream { /// Creates a new builder-style object to manufacture [`TagDeliveryStreamInput`](crate::input::TagDeliveryStreamInput) pub fn builder() -> crate::input::tag_delivery_stream_input::Builder { crate::input::tag_delivery_stream_input::Builder::default() } /// Creates a new `TagDeliveryStream` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for TagDeliveryStream { type Output = std::result::Result< crate::output::TagDeliveryStreamOutput, crate::error::TagDeliveryStreamError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_tag_delivery_stream_error(response) } else { crate::operation_deser::parse_tag_delivery_stream_response(response) } } } /// Operation shape for `UntagDeliveryStream`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`untag_delivery_stream`](crate::client::Client::untag_delivery_stream). /// /// See [`crate::client::fluent_builders::UntagDeliveryStream`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UntagDeliveryStream { _private: (), } impl UntagDeliveryStream { /// Creates a new builder-style object to manufacture [`UntagDeliveryStreamInput`](crate::input::UntagDeliveryStreamInput) pub fn builder() -> crate::input::untag_delivery_stream_input::Builder { crate::input::untag_delivery_stream_input::Builder::default() } /// Creates a new `UntagDeliveryStream` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for UntagDeliveryStream { type Output = std::result::Result< crate::output::UntagDeliveryStreamOutput, crate::error::UntagDeliveryStreamError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_untag_delivery_stream_error(response) } else { crate::operation_deser::parse_untag_delivery_stream_response(response) } } } /// Operation shape for `UpdateDestination`. /// /// This is usually constructed for you using the the fluent builder returned by /// [`update_destination`](crate::client::Client::update_destination). /// /// See [`crate::client::fluent_builders::UpdateDestination`] for more details about the operation. #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UpdateDestination { _private: (), } impl UpdateDestination { /// Creates a new builder-style object to manufacture [`UpdateDestinationInput`](crate::input::UpdateDestinationInput) pub fn builder() -> crate::input::update_destination_input::Builder { crate::input::update_destination_input::Builder::default() } /// Creates a new `UpdateDestination` operation. pub fn new() -> Self { Self { _private: () } } } impl aws_smithy_http::response::ParseStrictResponse for UpdateDestination { type Output = std::result::Result< crate::output::UpdateDestinationOutput, crate::error::UpdateDestinationError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_update_destination_error(response) } else { crate::operation_deser::parse_update_destination_response(response) } } }
8 SHARES Share Tweet Would you like a garden that produces bountiful harvest year after year without back-breaking effort? Of course! Who wouldn’t? Last fall, I sat in on a lecture by Rick Austin, author of Secret Garden of Survival. Rick showed the class one picture after another of a forest-like garden filled with every fruit and vegetable imaginable and said once planted, the garden pretty much took care of itself. Amazing, I thought. I have to do this at our mountain house! A month or so later, I’d thought I figured out the best spot for a food forest and called a tree removal company. The week the bulldozers were scheduled to raze a half acre of our wooded land, I met Joshua Deel of Barefoot Farms & Permaculture. Whether you consider it a coincidence or an act of God, it was perfect timing. Turns out Josh is a certified permaculture designer and the owner of Sustainable Design Solutions and do you know what he said when I told him of my landscaping plans? “Wait!! Don’t do it! Permaculture is so much more than a food forest.” I immediately cancelled the construction crew demolition and over the last month, Josh has been busy creating a design to turn the mountain house into a permaculture homestead. The more I learn about permaculture, the more excited I get. My goal today is to share some of that excitement and show you why I think permaculture is the way of the future. Josh has graciously agreed to help by answering some of the most common questions about permaculture. What is permaculture? As the founder of permaculture, Bill Mollison, says, “Permaculture is about design, values, ethics and a personal responsibility for earth care.” Permaculture is the intentional design and maintenance of agriculturally productive ecosystems, which have the diversity, stability, and resilience of natural ecosystems. It is the harmonious integration of landscape and people—whole human habitats—providing their food, energy, shelter, and other material and non-material needs in a sustainable way. The philosophy behind permaculture is one of working with, rather than against, nature; of looking at systems in all their functions rather than asking only one yield of them and thereby allowing systems to demonstrate their own complexities and maturation. This requires a multi-disciplinary approach integrating ecology, agriculture, architecture, and construction. Why should I care about permaculture? Without permanent agriculture there is no possibility of a stable social order. Period. Soil degradation is at the core of environmental, even social, problems we face. Our present system of agriculture and land clearing for agriculture either remove soil or deplete it. The only way to change what we as a culture and society have created in history and change the problems that persist in the present is to partner with ecosystems and design for ecosystem interactions that will build soil, and ultimately a permanent culture as well! We can design for the futures we want to have and want our posterity to enjoy by changing the present paradigm, designing ecology-based societies instead of consumer-based ones. What are the benefits of permaculture? Permaculture offers an alternative paradigm to the current system of pending crises in food, fuel and energy, economics, politics, etc. It changes the social system in that it makes people define where their resources- what their needs are for food and support systems- come from within their region or biosphere. Permaculture design can establish secure whole human habitats that are both resilient and regenerative, supplying for all our needs! This kind of human habitat offers numerous health and lifestyle benefits, including the expansion of time density- that is, meaningful life experiences increase exponentially and overall quality of life enhances! What are some examples of permaculture in practice? Permaculture is being practiced and demonstrated in virtually every climate around the world no matter how extreme. One amazing demonstration of this is the “Greening the Desert” project in the Middle East: Permaculture is also practiced on any scale, from the very small urban or city space to the larger rural agricultural scale and even broader macrospaces such as entire landscape rehabilitation projects as the Loess Plateau in China (nearly 9 million acres!). Permaculture is also employed in holistic aid relief projects globally. Is this just some New Age method of gardening with fancy terms like food forests? Again, permaculture is an ethics based design science that supplies all our needs and benefits the environment. Permaculture isn’t a metaphysical practice, although it learns from the lessons offered by things like local traditions and techniques as well as cultures and customs. It is primarily concerned with assembling conceptual, material, and strategic components in a pattern which functions to benefit life in all its forms. This can certainly be achieved by learning from traditions and techniques that work in one landscape and climate as well as those that worked closely with nature. The ethics that give permaculture its footing are: Earth Care—this includes all living and non-living things, land, water, animals, air, etc. People Care—to promote self-reliance and community responsibility. Return of Surplus—this is to pass on anything surplus to our needs for the aims above (this is not to be achieved through government coercion, but voluntarily). What’s the difference between permaculture and sustainability? Sustainability needs to be demystified. Simply, sustainability is an energy audit of a system. A sustainable system produces more energy than it consumes, so that there is enough in surplus to maintain and replace that system over the lifetime of it’s components. In other words, the system is just replacing itself, there’s no real increase. Sustainability is the minimum requirement of permaculture, and in order to gain an increased surplus you have to link to living systems because living systems get their energy from the sun, a free resource! Permaculture looks to gain a surplus in order to increase diversity and fertility of the system while providing our needs. We look beyond sustainability towards abundance! Do I need a lot of land to get started in permaculture? Not at all! You can practice permaculture right where you are—even if it’s at your place of employment (even in a cubicle!). In fact, we know that the smaller the space, the higher the production per square foot will be! This is because efforts will be more intensive or concentrated. Do I need animals to practice permaculture? It depends. In context of the previous questions, one begins to see the scope of permaculture, its holistic nature and application. An example here would be a garden. There are different approaches to garden pests. One pest-negative approach applies chemicals and uses other such applications to eradicate them. A permaculture practitioner would simply design for an integrated pest management system that offers beneficial predator habitat (such as frogs, various birds, and other beneficial creatures) that is plant positive and offers chaos in cooperative confusion to eliminate unwanted pests. Once the pests are removed in this approach (usually eaten by a beneficial predator), they are re-deposited to the site in the form of fertilizing excrements. The first approach requires costly off-site inputs and harsh additives; the latter approach simply works with nature in an intentional design that is truly sustainable (a closed loop!). Besides, we don’t have “pest problems” we have “site deficiencies”. This is the answer to the question: what is my site lacking? Perhaps, its animals. What does permaculture have to do with being prepared? Permaculture designed human habitats are adaptive, resilient, abundant, and secure places in a future of peak oil, climate instability, and deepening economic insolvency. This is at the heart of preparedness, to be adaptive, resilient, and secure while enjoying true abundance. How can I start incorporating permaculture into my life—or home, garden, community—right now? This is a great question! There are many ways to start doing this, today even. The first step is becoming educated about the practice, and the best way to do this is taking a 72hr. Permaculture Design Certification course (commonly referred to as the PDC). A great book, was written on how to transition to a permaculture lifestyle. To read it, click here. I’m not great at gardening. How can permaculture help me? That’s okay, you don’t have to be a great gardener, or architect, or builder, or landscaper, or highly educated, or a person of great financial means to practice permaculture. Again, permaculture is concerned with so much more than gardening. It is a holistic, whole system approach to designing our human habitats. It gives us a mainframe way of thinking and observing. Within permaculture are many disparate disciplines and sciences, and this doesn’t mean you have to be an expert in all of these areas- in fact it’ll take a lifetime (and then some) to master all of these. Simply, you need to gain the mainframe understanding of how to design ecologically sound systems and how the various disciplines (gardening, earthworks, landscaping, etc.) are arranged within permaculture design, and then enjoy the learning process of applying the vast disciplines necessary to implement your plan. Money’s tight. Does that mean permaculture isn’t for me? Not at all! There are so many free resources online, DIY sites, and work-trade programs where you exchange labor for permaculture training that will give you the skills and know-how necessary to get started in permaculture. Where can I go to find more information? There are numerous websites, but some of the best include: permies.com is great for DIY resources, videos, tutorials, etc. permaculturenews.com is excellent for daily articles and the news feed. permacultureglobal.com is a global forum for people practicing permaculture, demonstration sites, permaculture aid work, and training courses. geofflawton.com Geoff Lawton is one of the premier permaculture designers today and offers top quality courses and videos. thebarefootfarm.com is our farm website, which has all of our contact information and tabs for services we offer in permaculture consultation, design, and implementation as well as educational tours, workshops, and PDC courses. Don’t hesitate to touch base with us. We’d love to hear from you!
def run(self): print('uniqueId: {}'.format(self.signed_se['header']['uniqueId'])) self.identifier = self.signed_se['header']['uniqueId'] protected = json.loads( base64url_decode(self.signed_se['protected'].encode('ascii'))) if protected['kid'] != self.verification_cert_kid_b64: raise ValueError('kid does not match certificate value') if protected['x5t#S256'] != self.verification_cert_x5t_s256_b64: raise ValueError('x5t#S256 does not match certificate value') jws_compact = '.'.join([ self.signed_se['protected'], self.signed_se['payload'], self.signed_se['signature'] ]) secure_element = json.loads( jose.jws.verify(token=jws_compact, key=self.verification_public_key_pem, algorithms=verification_algorithms)) try: public_keys = secure_element['publicKeySet']['keys'] except KeyError: public_keys = [] for jwk in public_keys: cert = '' for cert_b64 in jwk.get('x5c', []): cert = x509.load_der_x509_certificate( data=b64decode(cert_b64), backend=default_backend()) self.certificate_chain = self.certificate_chain + cert.public_bytes( encoding=serialization.Encoding.PEM).decode('ascii')
Republican megadonor Sheldon Adelson has privately told one of his allies that he supports a campaign that depicts H.R. McMaster as anti-Israel and seeks to remove him from his post as national security adviser. In an email to Mort Klein, the president of the Zionist Organization of America, Adelson writes: "Now that I have talked to somebody with personal experience with McMaster, I support your efforts." Why this matters: Adelson is arguably the most influential donor in Republican politics, spending tens of millions of dollars each election season. He also funds the ZOA, which has been relentlessly attacking McMaster and leading an outside campaign to remove him from his post. Adelson's spokesman Andy Abboud told Axios recently that Adelson had nothing to do with ZOA's campaign against McMaster and was "perfectly comfortable" with the job McMaster was doing. Abboud was correct about Adelson being unaware of the campaign, though Adelson makes clear in his email to Klein that he was never "comfortable" with McMaster because he knew nothing about him. What changed: Adelson tells Klein he spoke with Safra Catz, the Israeli-born CEO of Oracle. Adelson says Catz told him about a dinner she had recently with McMaster and "it certainly enlightened me quite a bit." But Adelson also makes clear in his email that he doesn't want to be publicly associated with the campaign against McMaster. (Klein never claimed Adelson was supporting it, and while he accepts funding from Adelson he is known as an independent Israel hawk who cannot be corralled, even by his major donor.) The pushback: A White House source points out that the Israel team at the White House, including noted right winger Ambassador Friedman, "feel McMaster is remarkably pro-Israel and he just had a meeting with senior Israeli officials where he won plaudits from them for understanding their unique security needs."
/* * Because my date structure is circularVector and i'm adding the priority * vehicle at the beginning and the others at the end ,, when we want to * pass the vehicles first we will start with priority vehicles (it will * pass the traffic first) so that i used removeFirst the removed Vehicle * will be returned (like popping a vehicle from the structure) */ public Vehicle removeVehicle() { Vehicle temp = mVehicles.getFirst(); mVehicles.removeFirst(); return temp; }
// ServeHTTP dispatches the request to the handler whose method matches the // request method. If handler is not found it adds the Allow header to the // response based on the configured handlers. If the request is not an OPTIONS // request, it also sets the response status code to 405 (Method Not Allowed). func (route Method) ServeHTTP(rw http.ResponseWriter, req *http.Request) { method := strings.ToUpper(req.Method) if h, ok := route[method]; ok { h.ServeHTTP(rw, req) return } allow := []string{http.MethodOptions} for k := range route { k = strings.ToUpper(k) if k != http.MethodOptions { allow = append(allow, k) } } sort.Strings(allow) rw.Header().Set("Allow", strings.Join(allow, ", ")) if method != http.MethodOptions { http.Error(rw, "405 method not allowed", http.StatusMethodNotAllowed) } }
import React from 'react'; type Props = { Component: React.FunctionComponent; }; export function FixtureElement({ Component }: Props) { return <Component />; } FixtureElement.cosmosCapture = false;
you have an idea for a future Mind Meld? [Dohave an idea for a future Mind Meld? Let us know !] The Bram Stoker Award final ballot was recently announced, reminding me why horror as a genre is so much fun, so in that spirit, I asked our panel these questions: Q: What first piqued your interest in horror, and why do you enjoy writing in the genre? What direction do you see the genre taking in the future, and who are a few of your favorite horror writers, books, or stories? Here’s what they had to say… Ania Ahlborn Born in Ciechanow Poland, Ania Ahlborn has always been drawn to the darker, mysterious, and sometimes morbid sides of life. Her earliest childhood memory is of crawling through a hole in the chain link fence that separated her family home from the large wooded cemetery next door. She’d spend hours among the headstones, breaking up bouquets of silk flowers so that everyone had their equal share. Beyond writing, Ania enjoys reading, cooking, baking, watching movies, drawing, and traveling. She currently resides in Albuquerque, New Mexico with her husband and Scottie, Beau. Learn more about Ania on her site, www.AniaAhlborn.com My first few childhood memories are all pretty dark. There was the time when I haplessly stood barefoot and barelegged atop a fire ant hill only to look down and see myself covered in them from ankle to waist. Then there was me sneaking into a neighboring cemetery through a hole I found in a chain link fence. I made it my duty to collect all the dusty plastic flowers off the graves and dole them out equally among those who had none. Sometimes, I’d surprise my mother with a bouquet of deadman’s blooms. It’s not easy for me to pinpoint what piqued my interest in horror. I sometimes think I was born beneath a looming shadow that tends to darken all my thoughts. Horror is a genre that bucks the status quo. That being said, it’s difficult to say what direction it’ll take. Some authors say that horror will become more “legitimate” as time goes on, but that’s a pretty silly notion. Horror has always been legitimate. It’s loved (and sometimes obsessed over) by millions. Yes, some of it is terrible, but without the bad there wouldn’t be the good. We wouldn’t know sub par from genius without the help of my favorite author, Stephen King, and amazing books like Full Dark No Stars. We wouldn’t know a good ghost story from a bad one if Joe Hill hadn’t written Heart Shaped Box, or understand that, sometimes, horror can be nothing more than following someone through their twisted everyday lives if Bret Easton Ellis hadn’t published American Psycho. Horror is fully dependent on the fears of the one who consumes it. If you really want to know what direction the genre will take, just ask yourself: what am I afraid of? John Mantooth Haunted Legends (Tor, 2010), among others. His collection, Shoebox Train Wreck was published in 2012 (Chizine), and his first novel, The Year of the Storm (Berkley, 2013) has recently been nominated for a Stoker. John Mantooth is an award-winning author whose short stories have been recognized in numerous year’s best anthologies. His short fiction has been published in Fantasy Magazine, Crime Factory, Thuglit, and the Stoker winning anthology,(Tor, 2010), among others. His collection,was published in 2012 (Chizine), and his first novel,(Berkley, 2013) has recently been nominated for a Stoker. When I was about eleven years old, I walked by my father’s room and heard him laughing very loudly. I poked my head inside and asked him what was so funny. He told me to sit down because he wanted to read something to me. He held a paperback book in his hand by a man named Stephen King. The book wasn’t a typical King book; it was made up of four different novellas. It was called Different Seasons. My father read me the pie-eating contest scene from “The Body.” By the time he was finished, I was laughing too. I went straight to his bookshelf and started reading all of King’s books. I suppose that was how I got interested. Weird, because it wasn’t really the horror element that grabbed my attention, but the humor. As for the future of horror, I see the genre spreading out, becoming more inclusive. If you think about it, there are a lot of books being published with elements of horror that aren’t being classified as such. More and more, I believe people will begin to recognize this and horror won’t be seen as a dirty word anymore. Eventually, I believe horror will be mainstream again. But two things have to happen. Horror writers and hardcore fans have to be more accepting of books outside of their tight-knit community, and people that like good books have to embrace the truth that many of those good books are really horror novels (The Lovely Bones, The Road, etc.). As far as modern horror writers, I think the best in the business are John Langan, Nathan Ballingrud, Paul Tremblay, and Laird Barron. All four are incredible writers first and foremost, but also innovative storytellers who are helping to move horror in new and exciting directions. Rena Mason The Evolutionist, East End Girls, and a handful of short stories. Rena Mason is a Sin City resident and longtime fan of dark fantasy, murder mysteries, horror, sci-fi, and history. She loves mashing these genres with stories of everyday life for unexpected twists. She’s the author of, and a handful of short stories. The adrenaline rush and intensity of fear is what first piqued my interest in Horror. I enjoy writing it hoping I can relay those feelings to others. I’d like to see Horror as a genre become more mainstream .For classics, I’ve always enjoyed works by Edgar Allan Poe and Shirley Jackson, my favorite short story of Poe’s is “Hop Frog”, and the novel, We Have Always Lived in the Castle by Jackson. Several other authors I enjoy reading are Lisa Morton, Lucy Snyder, Lisa Mannetti (loved her novella duo Deathwatch,) Michael Rowe, Gene O’Neill, and so many more. Being more involved in the Horror Community through the Horror Writers Association has given me the opportunity to read my favorites and discover new ones. Steve Rasnic Tem Blood Kin (Solaris), southern gothic with a sharp left turn into horror. His most recent story collections are Here with the Shadows (Swan River Press) and Celestial Inventories (ChiZine). He is a past winner of the World Fantasy, British Fantasy, and Bram Stoker awards. Steve Rasnic Tem’s latest novel is(Solaris), southern gothic with a sharp left turn into horror. His most recent story collections are(Swan River Press) and(ChiZine). He is a past winner of the World Fantasy, British Fantasy, and Bram Stoker awards. It was a number of impulses and interests, really, which came together for me between 11th grade and the end of college. I had enormous fears and anxieties as a kid, not just of things I could see or imagine—I also thought the world was full of secrets that were impossible to prepare for. For me there had always this invisible world of dreams and nightmares which I was convinced were just as real as the everyday world, just as essential, but simply not literal. And it seemed to me that if you wanted to write about the full scope of human experience you had to include those things. I started reading ghost stories early on, which I didn’t necessarily connect to horror writing. To me ghost stories were about this spiritual life—a way to talk about these invisible things which terrified me, to turn them into a subject matter for writing, and in that way control their influence on my everyday life. Almost immediately this had a calming effect, and to this day I’m still a rather calm and peaceful person. Anxiety has become a staple of my fiction, but it’s no longer part of how I approach things in my everyday life. It also seemed to me that when done well, horror fiction was highly emotional fiction, with the emotions spilling over into the landscape itself, into every detail used to build mood and atmosphere. This has always appealed to me. In fact, the older I get the more reluctant I am to involve myself with anything that doesn’t stir my emotions. What direction do you see, or would you like to see, the genre taking in the future? I think the key aesthetic question for the horror genre has always been just how close do you get to the real anxieties and fears of the readership before that readership turns away. Part of the attraction of such traditional figures as the vampire, zombie, werewolf etc. is that we know they’re not real—they’re simply comfortable stand-ins for the issues which actually trouble us. And in some cases these analogues stray so far from the actual source of our fears (contemporary YA vampire fiction, for example) that they lose a great deal of their effectiveness. Some writers in the field may react to this trend by writing more extremely. I expect this dynamic to continue, because at its heart, this genre is rather conservative. We talk at times about the “transgressive” nature of horror, and brave attempts are made from time to time to write transgressively, but it’s been my experience that these gestures rarely stick. The majority of readers appear to have a low threshold of discomfort in terms of their reading material. But I’m not like most readers, I suppose. I like a wide variety of fiction, but generally speaking I want to be moved and shaken, and brought to tears at times by the things I read. And sometimes I want to be made acutely uncomfortable. I trust there will always be writers capable of doing that. And if there is any kind of trend, I do see a progressively higher level of writing skill in some of the newer writers who decide to take on this kind of literature. Who are a few of your favorite horror writers, books, or stories? Cormac McCarthy, Ramsey Campbell, Dennis Etchison, M.R. James, Caitlin Kiernan, Peter Straub, Kafka, are all favorites—but there are really too many to mention. Among the newer writers I particularly like Lynda Rucker, Nathan Ballingrud, Kelly Link, Steve Duffy, Simon Stranzas, Simon Unsworth, Gary McMahon, Richard Gavin, Laird Barron, and John Langan. There are also those writers who still produce ghostly fiction in the old tradition—writers like Mark Valentine, Helen Grant, John Howard, Peter Bell, Reggie Oliver—who I enjoy very much. But again, there are SO many. In fact I think we’re having a golden age in short horror fiction at the moment. Lisa Morton NETHERWORLD (JournalStone, January 2014), MALEDICTION (Evil Jester Press, October 2013), TRICK OR TREAT: A HISTORY OF HALLOWEEN (Reaktion Books), and ZOMBIE APOCALYPSE!: WASHINGTON DECEASED, coming June 2014 from Constable & Robinson Lisa Morton is a six-time Bram Stoker Award winning author and screenwriter. “Her work is consistently dark, unsettling, and frightening” says American Library Association’s READERS’ ADVISORY GUIDE TO HORROR. Her book include(JournalStone, January 2014),(Evil Jester Press, October 2013),(Reaktion Books), and, coming June 2014 from Constable & Robinson I always loved horror as a kid (so did my parents, fortunately), but it wasn’t until I was fifteen and saw The Exorcist in a packed theater during its initial release that I realized I had to write this stuff for my living. It’s hard to explain to younger people who weren’t there, because no movie since has had that impact, but The Exorcist tore through audiences with an insane amount of impact. People screamed and fainted and fled the theater in droves, and it was a revelation to teenaged me that a mere work of art could do that. I went into that theater thinking I wanted to be an anthropologist; I came out knowing I had to be a writer. I remain committed to the genre all these years later because I do want to give people that emotional roller coaster experience. When done well, I truly believe it’s a transcendent experience. I’d like to see the genre move out of its ghetto (which I think it is doing) and be accepted as a field that can not only supply the thrills and chills but the art and craft as well. And I’d like to see a broader range of voices writing in the genre, especially more women and more people of color. Jeff Strand PRESSURE, DWELLER, A BAD DAY FOR VOODOO, WOLF HUNT, SINGLE WHITE PSYCHOPATH SEEKS SAME, BENJAMIN’S PARASITE, FANGBOY, THE SINISTER MR. CORPSE, and lots of others. Three-time Bram Stoker Award finalist. Three-time Bram Stoker Award loser. Four-time Bram Stoker Award Master of Ceremonies. Jeff Strand is the author of a bunch of demented books, including, and lots of others. Three-time Bram Stoker Award finalist. Three-time Bram Stoker Award loser. Four-time Bram Stoker Award Master of Ceremonies. My interest in horror came from the 1977 book THINGS YOU’VE ALWAYS WANTED TO KNOW ABOUT MONSTERS BUT WERE AFRAID TO ASK, by Tony Tallarico. I was more interested in that book than the movies themselves! (Without Internet, cable, VCRs, or even a local late night Creature Feature, the movies were pretty much inaccessible to me.)I enjoy writing in the genre because I have a very dark sense of humor, and writing horror lets me take it as dark as I can go! I’m terrible at “Where is the genre headed?” projections, but since it would be rude not to answer this question, I’ll say that novellas will become more and more prevalent as the popularity of e-books increases. It’s a length that’s extremely well suited to horror but not well suited to print publication. A few of my favorite horror novels are SWAN SONG by Robert McCammon, IT by Stephen King, NIGHTWORLD by F. Paul Wilson, SAVAGE by Richard Laymon, THIS BOOK IS FULL OF SPIDERS by David Wong, OFF SEASON by Jack Ketchum, and WATCHERS by Dean Koontz. Helen Marshall Hair Side, Flesh Side (ChiZine Publications, 2012) was named one of the top ten books of 2012 by January Magazine. It won the 2013 British Fantasy Award for Best Newcomer and was short-listed for an 2013 Aurora Award by the Canadian Society of Science Fiction and Fantasy. Helen Marshall is an award-winning Canadian author, editor, and doctor of medieval studies. Her poetry and fiction have been published in The Chiaroscuro, Abyss & Apex, Lady Churchill’s Rosebud Wristlet, Tor.com and have been reprinted in several Year’s Best anthologies. Her debut collection of short stories(ChiZine Publications, 2012) was named one of the top ten books of 2012 by January Magazine. It won the 2013 British Fantasy Award for Best Newcomer and was short-listed for an 2013 Aurora Award by the Canadian Society of Science Fiction and Fantasy. I came late to the love of horror, having been quite the terrified child growing up. Horror writing scared me silly, and I found the experience of being scared like that so unpleasant, so overpowering, that I didn’t go near the genre until very recently, that is, the last five years or so. Now, I find what draws me to horror writing is exactly what repulsed me as a child: I love the capacity of the genre to provoke such a strong visceral response. My early perception of horror was that it tended to hit the same note over and over again, that writers were only concerned with that visceral response. I’m sure that’s the sense that many readers not familiar with the genre have, but in fact it’s very wrong. Horror writing is remarkably capacious. It’s a big genre, and it does many, many different things. But mostly I find it is a genre that is free from limits, its writers are willing to follow the narrative without flinching. That makes it a potentially confrontational genre, and I like that .My approach to writing in early days was heavily influenced by Keith Johnstone’s theory of improvisational narrative, designed primarily for improv actors but still very adaptable to fiction. One of the interesting things an experienced improv actor once told me was that if you spend enough time on stage with your fellow actors, you will soon get a sense of all of the areas of narrative where they do not want to go, the narratives they automatically deflect away from. This is interesting to me, because improv creates a safe environment. Nothing is real. Nothing in the narrative has real consequences for the actor. But there are still stories we would rather not tell ourselves, lines of inquiry we would rather not confront, situations of powerlessness that we would rather not places ourselves in—even if it isn’t real. And the few times I went on stage, I discovered he was absolutely correct: there were places I didn’t want to go. I could sense those areas as “off limits.” But the remarkable thing I found was how tremendously exhilarating it could be to explore those places! Because you could see those were the moments the audience responded most strongly. Those were the things the audience wanted to see on stage, because they could sense it too. Those were the moments of most power—not because the audience or the actors were necessarily scared, but because they knew something was happening that was driven by real emotion and real risk-taking. That’s what attracts me to horror writing. It legitimizes the impulse to explore fully and deeply, to delve into genuinely powerful stuff even if it is uncomfortable. It is hard to say where I imagine the genre going next because horror is so responsive to its particular cultural milieu. The things we are afraid of change, and so the genre doesn’t stay still. I have noticed that some writers in the field are gaining more mainstream or literary recognition, that’s a good thing in my mind because it will attract new readers and new sensibilities. I think the greatest challenge any genre faces is the “photocopy effect”—when new writers simply mimic the material that came before and produce stories that seem like bad photocopies of things the reader has already seen. That is the potential danger of massively successful and influential writers like, for instance, Stephen King or Stephanie Meyer—publishers will want to capitalize on the success of those writers by asking for more of the same. Genres are enriched, I think, when they begin to look outside themselves, and I think we are seeing the effect of this in the New Weird movement, which has offered a name for dark stories that cross genre boundaries or experiment with new techniques. That’s all to the good. And it is particularly necessary for horror, which depends upon the ability of a story to surprise—and by this I don’t mean jump scares. Horror fails when it becomes predictable, and as a genre it constantly faces the threat of falling into parody. But when horror writing works, when it feels fresh and new, it’s like grabbing onto a live wire! There are so many really interesting writers at work at the moment that it is hard to pick ones to mention. Right now, I am finding great delight in some of the classic writers—Robert Aickman, Shirley Jackson, Roald Dahl—to name a few. Aickman, in particular, is a real gem. His stories are all about the slow burn, with this incredible wry sense of humour underneath it all. I’ve also really enjoyed Nathan Ballingrud’s North American Lake Monsters, which has some of the best final lines I’ve ever read. Gemma Files’s short fiction continues to impress with its keen attention to language and style. Silvia Moreno-Garcia brought out a brilliant collection of fiction entitled This Strange Way of Dying, which I highly recommend, and Sarah Pinborough’s The Language of Dying is one of the most beautiful and heart-wrenching works I’ve encountered. And I’d offer a blanket recommendation for the books put out by ChiZine Publications, which hit my sweet spot for weird fiction. All this makes me feel very encouraged by the quality of work produced in the genre, and excited to see what happens next. Elizabeth Massie Hell Gate and Desper Hollow. She can be contacted through her website at Elizabeth Massie is a two-time Bram Stoker Award- and Scribe Award-winning author. Her most recent novels includeand. She can be contacted through her website at http://www.elizabethmassie.com/ and through Facebook. As a kid, I was incredibly sensitive to the world around me (and still am); I was also very imaginative (and still am.) I was sure there was a witch down our block who wanted to cook and eat me, a crushed and disembodied doll head in my closet waiting to attack me, and volcanoes and tsunamis nearby ready to melt me down or wash me away. Creating scary stories was a way of dealing with things that frightened me. Writing allowed me an imagined control over bad circumstances. As an adult, I still writing horror because I want to explore the darkness, and in doing so better appreciate the light. I’m no longer terrified of the witch down the block or the doll head in the closet. I’m fairly certain that living in western Virginia will keep me safe from most volcanoes and tsunamis. But there are terrorists and power-crazed leaders in the world. There is greed and hatred and rage and ignorance and poverty, all which contribute to some of the worst horrors imaginable. I write about what scares me because I want to examine it and know it. Knowing is always better than not. I use both psychological and supernatural horror to accomplish this. As to where the horror genre might be heading in the future, I really have no clue. I would guess there will be a popular (as in promoted) swing back toward more quiet, introspective horror to counter the extreme horror that is now popular, but both will remain favorites to certain readers. However it goes, horror will remain at the core what it has always – a delving into what frightens us to see what is there and how we – vicariously through characters – might deal with it all. I have quite a few favorites in the horror genre. Stephen King’s The Stand and The Shining, Thomas Tryon’s Harvest Home and The Other, Dalton Trumbo’s Johnny Got His Gun (not considered horror but it is terrifying), Cormac McCarthy’s The Road and Child of God, Shirley Jackson’s The Haunting of Hill House, and many, many more. Each packs an emotional wallop and tells a grand story. Kaaron Warren Slights, Walking the Tree and Mistification) and four short story collections. Her most recent collection, Through Splintered Walls, won a Canberra Critic’s Circle Award for Fiction, two Ditmar Awards, two Australian Shadows Awards and a Shirley Jackson Award. Her story “Air, Water and the Grove” will appear in Paula Guran’s Year’s Best Dark Fantasy and Horror. Her latest collection is The Gate Theory. You can find her at Bram Stoker Nominee and Shirley Jackson Award winner Kaaron Warren has lived in Melbourne, Sydney, Canberra and Fiji. She’s sold many short stories, three novels (the multi-award-winningand) and four short story collections. Her most recent collection,, won a Canberra Critic’s Circle Award for Fiction, two Ditmar Awards, two Australian Shadows Awards and a Shirley Jackson Award. Her story “Air, Water and the Grove” will appear in Paula Guran’s. Her latest collection is. You can find her at http://kaaronwarren.wordpress.com/ and she Tweets @KaaronWarren My interest in horror was more like a slow realisation. I adored the nastier fairy stories from a young age, especially Bluebeard, which gave me chills and nightmares. I loved it. I read Tales from the Crypt horror comics early on, hiding the covers from my parents. I devoured the news, fascinated like many others by the stories of loss and destruction and cruelty. I always wondered about the back story. How did we get to there? The song Horror Movie, from Australian 70s glam band Skyhooks, resonated with me a lot. I’m not sure I enjoy writing horror, because I go to places most people wouldn’t want to go. But I do love the freedom of exploring the depths, of trying to understand nightmares and of answering some of my own questions about the nature of evil. I sometimes feel wrung out, as if I’ve been crying, when I finish a story. It’s cathartic, though, and I feel the better for it afterwards. The best elements of the genre will move more into the mainstream in the future. We’re writing accessible fiction, using similar themes to those explored in the mainstream, but we’re making it hurt. I love that, and I think readers outside the genre are looking for that pain. Horror writers provide a visceral reaction, and they don’t weaken at the end and make everything all right. The good stuff is heart and guts, and I don’t mean exposed organs. Writers I’m loving now, or have loved for a long time: Nathan Ballingrud. His collection “North American Lake Monsters” is very good. is very good. Laird Barron. Lisa Tuttle. Lynda Rucker. Kirstyn McDermott. “Caution: Small Parts” , her short story collection, is chilling. , her short story collection, is chilling. Joe Lansdale. Steve Rasnic Tem. Shirley Jackson. Michael Marshall Smith. Terry Dowling. Gemma Files. Norman Prentiss. Jeffrey Ford. Favourite novels include The Shining and The Stand, The Haunting of Hill House, Russell Bank’s “Lost Memory of Skin” Sheri Holmes’ “Witches on the Road” and Pete Dexter’s “Paris Trout” Anthologies: Anything Ellen Datlow has done. And the two amazing works of fantasy, “Black Water” and “White Fire” edited by Alberto Manguel. Sandy DeLuca MESSAGES FROM THE DEAD and DESCENT. She was a finalist for the BRAM STOKER for poetry award in 2001, and again in 2014. She lives with three faithful felines in an old Cape Cod House in Rhode Island. At present Sandy DeLuca is a full-time writer and painter. She’s written and published numerous novels, two poetry collections and several novellas, including the critically acclaimedand. She was a finalist for the BRAM STOKER for poetry award in 2001, and again in 2014. She lives with three faithful felines in an old Cape Cod House in Rhode Island. My father loved horror and suspense movies. He’d take me to see movies every Sunday afternoon, and among them were some of Alfred Hitchcock’s early films. In addition, both my parents were Italian, and their parents arrived from the old country with a rich tradition of superstition and supernatural tales. Hopefully the genre will begin to listen to the female voice a bit closer in the future, and I see that coming, because more of us are publishing our books and we’re being taken seriously. I believe that we have a long road ahead of us, but I’m hopeful. Shirley Jackson, Kathe Koja, Robert Dunbar and Greg F. Gifune are great voices in horror. Ira Levin’s Rosemary’s Baby and The Exorcist, by William Peter Blatty have always resonated with me. I love good, deep and dark fiction, and I often read outside the genre for inspiration as well. Books like Midnight Cowboy, Deliverance and Suicide Blonde are quite disturbing, and display a horrific side of life. Lucy A. Snyder Spellbent, Shotgun Sorceress, Switchblade Goddess, and the collections Sparks and Shadows, Chimeric Machines, and Installing Linux on a Dead Badger. Her writing has appeared in Strange Horizons, Weird Tales, Hellbound Hearts, Dark Faith, Chiaroscuro, GUD, and Lady Churchill’s Rosebud Wristlet. You can learn more about her at Lucy A. Snyder is the Bram Stoker Award-winning author of the novels, and the collectionsand. Her writing has appeared in Strange Horizons, Weird Tales, Hellbound Hearts, Dark Faith, Chiaroscuro, GUD, and Lady Churchill’s Rosebud Wristlet. You can learn more about her at www.lucysnyder.com The first books that really enthralled me as a kid were science fiction and fantasy, and so when I was a teenager that’s what I wanted to write. However, thanks to my tendency to have vivid nightmares, much of what I began writing was horror, despite my attempts to turn it into something different. That hasn’t changed; my imagination is a fairly dark place. I finally quit fighting my horror instincts after I met Gary A. Braunbeck. He gave me a proper introduction to the genre and helped me realize that many of the books I loved contained strong horror elements. For instance, there’s a lot of dark, disturbing stuff in Neil Gaiman’s Sandman series, but reading it in college I never thought “Hey, this is horror.” And much of Ray Bradbury’s excellent work is extremely dark, but his books had been presented to me as fantasy and science fiction. My genre disconnect was largely due to marketing. I was a teenager in the 80s during the horror boom, and all I saw were the gory, garish covers publishers put on horror novels. I found those really off-putting. The blood and evil clowns and keytar-brandishing skeletons were terrible, and I assumed that the contents of said books would surely be just as dumb as their covers. While I blissfully avoided a lot of crap, I also missed out on some good novels by writers such as JN Williamson. My initial experiences reading slush for magazines didn’t improve my opinion of horror much. Most of the really badly-written, poorly-plotted submissions I got were about psycho killers etc. Sure, I got bad SF submissions, too, but they were forgettably bad. As an editor you will never forget your first batch of necrophiliac poetry, no matter how hard you try. Conversely, the dark fiction I received that was good always seemed like it could be something else: fantasy, usually, or SF. If a story has a lot going on — and good stories always do — chances are the reader will manage to see it in light of his or her favorite genre. Anyhow, Gary gave me a copy of his collection Things Left Behind, and it blew me away. It was emotional, and beautifully-written, and most of all, it was smart. I asked for more, and he obliged. He started showing me the good stuff I’d missed, and he introduced me to the horror stories and novels of Joyce Carol Oates, Shirley Jackson, Clive Barker, Dan Simmons, Joe R. Lansdale, and Jonathan Carroll, just to name a few. I’m not sure where horror is headed. I suspect it’ll go wherever our imaginations can take it! Tananarive Due Tananarive Due is the Cosby Chair in the Humanities at Spelman College. She also teaches in the creative writing MFA program at Antioch University Los Angeles. The American Book Award winner and NAACP Image Award recipient is the author of twelve novels and a civil rights memoir. She recently received a Lifetime Achievement Award in the Fine Arts from the Congressional Black Caucus Foundation. In 2010, she was inducted into the Medill School of Journalism’s Hall of Achievement at Northwestern University. Due and her husband/collaborator, Steven Barnes, wrote and co-produced a short film, Danger Word, based on their novel Devil’s Wake—starring Frankie Faison (“The Wire,” The Silence of the Lambs), nominated for Best Short Narrative Film at the Pan African Film Festival and BronzeLens Film Festival. Due also collaborates on the Tennyson Hardwick mystery series with Barnes, in partnership with actor Blair Underwood. Due’s novella “Ghost Summer,” published in the 2008 anthology The Ancestors, received the 2008 Kindred Award from the Carl Brandon Society, and her short fiction has appeared in best-of-the-year anthologies of science fiction and fantasy. Due is a leading voice in black speculative fiction. Due wrote The Black Rose, a historical novel about the life of Madam C.J. Walker, based on the research of Alex Haley – and Freedom in the Family: A Mother-Daughter Memoir of the Fight for Civil Rights, which she co-authored with her mother, the late civil rights activist Patricia Stephens Due. Freedom in the Family was named 2003’s Best Civil Rights Memoir by Black Issues Book Review. As a screenwriter, she is a member of the Writers’ Guild of America (WGA). Due lives in Southern California with Steven Barnes and their son, Jason. Her writing blog is at www.tananarivedue.wordpress.com. Her website is at www.tananarivedue.com. My first exposure to horror was through the classic horror movies my late mother loved so much: The Fly, The Wolf Man, Dracula. At 16, I fell in love with the work of Stephen King. I also loved Shirley Jackson’s “The Lottery.” But I was also steeped in the history and literature of the black experience, so I found myself drawn to literary writers who wrote about the supernatural: Gloria Naylor’s MAMA DAY, Toni Morrison’s BELOVED. As a civil rights activist, I think my mother sought out horror as a relief from the true terrors in daily life. Over time, I have come to realize that horror fiction served as the perfect counterbalance to the uncertainties of integrating new neighborhoods as a child, as well as my ongoing fear of mortality. I create characters who must rise up and be as strong as I would like to be when my own monsters come. Favorite horror: THE STAND and PET SEMATARY by Stephen King; SONG OF KALI by Dan Simmons; BELOVED by Toni Morrison; a new short story collection, SALSA NOCTURNA, by Daniel José Older. Joe McKinney Dead World series, Quarantined, Inheritance, Lost Girl of the Lake, The Savage Dead, Crooked House and Dodging Bullets. His short fiction has been collected in The Red Empire and Other Stories and Dating in Dead World. His latest novel is the werewolf thriller, Dog Days, set in the summer of 1983 in the little Texas town of Clear Lake, where the author grew up. In 2011, McKinney received the Horror Writers Association’s Bram Stoker Award for Best Novel. For more information go to Joe McKinney has been a patrol officer for the San Antonio Police Department, a homicide detective, a disaster mitigation specialist, a patrol commander, and a successful novelist. His books include the four partseries,and. His short fiction has been collected inand. His latest novel is the werewolf thriller,, set in the summer of 1983 in the little Texas town of Clear Lake, where the author grew up. In 2011, McKinney received the Horror Writers Association’s Bram Stoker Award for Best Novel. For more information go to http://joemckinney.wordpress.com When did I first catch the horror bug? Looking back, it seems like it should be an easy question to answer. After all, how hard can it be to remember that moment that a younger version of yourself first said, “Wow, this is cool!” It should be like losing one’s virginity, shouldn’t it? Like a line drawn in the sand, a moment that defines before and after? And yet I find it hard to pin that moment down in my memory. I like to think it was that summer I first watched George Romero’s Night of the Living Dead. I was thirteen, I think. I watched the movie on cable, and got so thoroughly freaked out that I went to bed clutching a baseball bat, convinced the zombie’s were going to be coming through the walls the moment I closed my eyes. In fact, the film scared me so badly I went to bed every night for the next month clutching that same baseball bat. But was that the beginning? I find it hard to pin an origin on my love affair for the genre, for there were precursors. I remember the house in which I grew up. We had these saloon style doors that separated the living room from the kitchen. During the summer – I guess I would have been about six or seven at the time – I’d stay home with my mother during the summer. Come mid afternoon, she’d turn the TV to reruns of Dark Shadows, and I would go behind those saloon doors and quake in fear as the shows would play out, trying to pretend that I was going for a bag of chips rather than hiding from the vampire on the TV. And then there was the time I was seven, and I found Lon Chaney Jr’s The Wolfman on cable. I watched that transformation scene, where he’s walking behind the pillars, and slipping further and further into his animal self, and I was terrified. Later that night, I went into my parents’ room and told them I’d had a bad dream. My dad, a bit frustrated and irritable – as dads who work too hard and sleep too little so often are – sat up in bed and ran a hand over his face and said, “What’s bothering you?” I picked up on his tone and realized that telling them a dream about werewolves would be dismissed as nonsense and get me sent back to my bed. So I picked on a non-supernatural entity as my object of fear. “I had a dream about a bear in my room,” I said. “A bear,” my dad said, and rolled over and went to sleep. My mom slapped the bed between herself and my dad and that was all I needed. I jumped into their bed and slept soundly the rest of the night, convinced that no werewolf would ever dare fuck with me while my parents were so close. So, yeah, horror was there all along. I suspect some sort of predisposition to it. I was the kid who felt bored on Space Mountain, but kept pulling my parents toward the Haunted Mansion every visit to Disney World. The heart knows what it wants. But I didn’t believe it, really honestly believe it, and understand it, until I became a father. I remember, right after my oldest daughter was born, looking in on the glass at the nursery, and thinking to myself that the world had suddenly gotten so much more complex. I’d gone from being a young police officer with nothing but fun on his mind, to a DAD, who suddenly had to think about house payments and which schools his kids would attend and health insurance and saving for college and all the rest of it, and I freaked out a little. I suddenly had all these responsibilities rushing in at me from every side, and I hardly knew what to do with myself. Luckily there was writing, something I’d been doing for fun since I was a kid – starting right about the same time that I first watched Night of the Living Dead, in fact. I thought maybe I could write a book about the things I was feeling. Maybe that would help. I started writing an SF novel called The Edge of the Map, and it totally sucked ass. I got about eighty pages into it and started pulling my hair out. I wondered why I was even bothering with it. Nothing of what I’d written felt genuine. Not a word of it. And then I thought about why I was even bothering in the first place. I eventually realized that I was trying to explain why love had me so turned around. I loved that little girl I saw in her cradle in the nursery, loved her more than my own life, and since love was the root cause for me trying to write, I should write what I love. I may seem a little dense for failing to recognize it sooner, but I eventually came to understand that what I loved to write was horror. I thought back on how great it felt to be scared – because the real me, the grown up me, the father me, was scare for the future – and I thought of going to bed clutching a baseball bat. I decided I wanted to put my present fears of being a parent into my past fears of zombies. I was a young cop with more responsibilities than he could count weighing in on him from every side, so I decided to write about a young cop with zombies weighing in on him from every side. Once I did that, the book practically jumped onto the page. The zombies in that first book – Dead City– became a metaphor for the fear of being a parent. I managed to get it published, and today, nearly ten years later, the book is in its sixteenth printing and I’ve gone from a cop who sometimes scribbles down stories to an author who sometimes still has to put on a uniform and go into the day job. Horror is putting my kids through college, but it took me forever and a day to understand how that alchemy came to be. Now I do. And that was my – extended – first horror moment. John Hornor Jacobs John Hornor Jacobs has worked in advertising for the last fifteen years, played in bands, and pursued art in various forms. He is also, in his copious spare time, a novelist, represented by Stacia Decker of the Donald Maass Literary Agency. His first novel, Southern Gods, was published by Night Shade Books and shortlisted for the Bram Stoker Award. His second novel, This Dark Earth, was published in July, 2012, by Gallery/Pocket Books, an imprint of Simon & Schuster. His young adult series, The Incarcerado Trilogy comprised of The Twelve Fingered Boy, Incarcerado, and The End of All Things, will be published by Carolrhoda Labs, an imprint of Lerner Publishing. My father is an interesting dude. Typical southern male, a lawyer, he wore a suit every weekday when I was growing up. He hunted, fished, played golf. Had a closet full of camouflage clothing, gun cabinets, a pool table in the basement. Drove big cars with knobby tires. But underneath that southern male façade was a man who loved fantastic literature and dark stories. When I was very young, and we’d be on car trips, he’d retell me the tales of the Illiad and Odyssey – drawling out the story in his southern accent. On weekends we’d watch Ray Harryhausen movies, Bela Lugosi’s Dracula, monster films Tarantula and Frankenstein. That’s the fun father son stuff. He’d also terrorize me with stories of nuclear annihilation. Anyway, I think my love of story – horror and fantasy – comes from my father. Pretty easy to draw the lines of influence there. I’ve talk more about that here – and recently discussed how he terrorized me with threats of nuclear Armageddon at LitReactor. As for the future of horror, I see much more of the same that’s being currently released. The perennial favorites – monsters, psychos, ghost stories – will be produced to varying quality. The awareness of cosmic horror is on the rise, thanks to folks like Laird Barron, John Langan, Molly Tanzer, and maybe even little ole me. I’ve seen more mash-ups of horror in recent years and I think that will continue, gumshoe zombies, spy vampires, lothario werewolves. What’s interesting to me is how horror and horror tropes, monsters, and devices are permeating and informing fantasy now. Peter Brett recently mentioned on Twitter that his The Warded Man was his attempt to tell a horror-based fantasy, and you can see the presence of horror in “grimdark” works, at the least of the psychological bent, at the most, full of frightening and dark creatures and the characters hazarded by them. Folks who’re writing currently that are my favorite and doing interesting things in these fields? Chuck Wendig’s Miriam Black series which is fast paced and profane and hilarious and dark – a perfect mixture of the horrific and fantastic. Mark Lawrence’s Broken Empire fantasy, beginning with Prince of Thorns, is incredible and satisfying, full of broken people that may be overcompensating a tad bit for the bad things that’ve happened to them. Teresa Frohock’s wonderful Miserere is a fantasy that folds some horror elements into the plot. I think with the success of shows like True Detective and Hannibal, we’ll start seeing a stripe of more cerebral horror, as opposed to the schlocky True Blood and Supernatural, though those are both fun shows. Sleepy Hollow exceeded my expectations simply because it was bonkers (though the writers held it together, and it doesn’t hurt to have John Noble on board). Rachel Aukes 100 Days in Deadland, which was named one of the best books of 2013 by Suspense Magazine and one of the best zombie books by the Huffington Post. When not writing, she can be found flying old airplanes and trying (not so successfully) to prepare for the zombie apocalypse. Learn more at Rachel Aukes is the bestselling author of, which was named one of the best books of 2013 by Suspense Magazine and one of the best zombie books by the Huffington Post. When not writing, she can be found flying old airplanes and trying (not so successfully) to prepare for the zombie apocalypse. Learn more at www.RachelAukes.com As a kid, I was absolutely obsessed with comic books (okay, I still am). Through comics such as Weird Tales, I was introduced to fantastical, terrifying stories of horror. After those early comics, I ventured into Poe’s tales, and many of them remain my favorites. It wasn’t long before I read I Am Legend by Richard Matheson, the story that story shaped me most as a writer. In that story, just like in Frankenstein by Mary Shelley, the lines between good and evil were blurred: with a slight shift in perception, the monsters aren’t perhaps the worst thing to fear; sometimes the worst thing to fear is ourselves. Good horror will always be character-driven, delving into the deepest, darkest parts within us. That’s why I love writing it—that exposure of the human element. Regardless of fads and socio-economic impacts on the genre, horror will always remain focused on the darkness within, and good stories are timeless. Stephen King, Shirley Jackson, F. Paul Wilson, Joe McKinney, William Massa, DJ Molles, Rhiannon Frater, and DA Wearmouth are some of my auto-buy favorites. And, I’m sure I’ll find another favorite author as I work through my current to-be-read list because there’s so much great horror out there! Robert Pobi Robert Pobi is an international bestselling novelist. His debut novel, Bloodman, was compared to “Thomas Harris in his prime” by Sarah Weinman, in her National Post review and O – the Oprah Magazine chose Bloodman as one of the Must-Reads for 2012. Pobi does most of his writing at an isolated cabin in the mountains. The rest of the time he is busy getting speeding tickets. Visit him at www.robertpobi.com When I was a kid, there was an episode of Kolchak: the Night Stalker that changed the way I saw stories. If no one remembers, that lost nugget of television gold starred Darren McGavin as a newspaper reporter who covered mysterious events, basically a sweatier drunker version of Agent Mulder. In that particular episode – The Sentry – an actor in a lizard suit killed off a bunch of guys foolish enough to drive their golf cart close to the lizard/actor’s lair. This seemed like pretty brilliant stuff at the time. I was seven. Besides my love of actors in lizard outfits (oh, there have been a few) I have a strong suspicion I write this kind of stuff for the same reason anyone does – it’s what comes out when I sit down to work. I’m not going to say that it’s an unconscious act, but I certainly have less control over it than I’d like – no matter where I start out with a story, things very quickly turn bad for my characters. Sure, I can do all the stuff the romance book tutorials tell me to: I picture a happy couple with nice hair living in a nice house with their children (who also have nice hair). They have a nice dog and their label-appropriate clothing hangs in the nice walk-in closet. I can picture their trip to the nice lake where they have a nice cabin. The boat is named Daddy’s L’il Girl. And by the time I get to page three, someone’s fingers have been lost to the propeller. From there it usually goes downhill for all concerned. It’s just the way I was built. Some guys are good at computer programming. Others are good with a chef’s knife and dead prawns. Me? Keeping people up at night seems to be where it’s at. The future of horror? It might be a cheap shot (I know it certainly is an easy one), but scientific hubris seems like a limitless supply of oops moments. If it was good enough for Mary Shelley, it’s most certainly good enough for a world hopped up on GMO foods, nanotechnology, malicious computer code that corrects itself, and Botox. Favorites in the genre? Mr. King of course. When people start using your name as an adjective, you’ve covered some important mileage. Thomas Harris is another poet laureate in the creepies – Red Dragon is a remarkable book. Jack Ketchum really has his chops down. I still like old Clive Barker. Bram Stoker’s Dracula. The 1933 film, King Kong. I re-watched The Lords of Salem and Dead Snow up at my cabin a few weeks back – both of them were perfect films for a Sunday afternoon. One more? John Carpenter’s The Thing.
# -*- coding: <utf-8> -*- from __future__ import print_function import requests import bs4 import json import re import sys import os class Paper(): """ A class that holds the information for an Arxiv paper. """ def __init__(self, number=None, title=None, auths=None,abstract=None,fromfile=None): """ Initialize a paper with the arxiv number, title, authors, and abstract. """ if fromfile is not None: self.load(fromfile) else: self.number = number self.title = title if auths is not None: self.authors = list(auths.values()) self.author_ids = list(auths.keys()) self.author_dict = auths.copy() else: self.authors = None self.author_ids = None self.author_dict = None self.abstract = abstract self.link = u'http://arxiv.org/abs/' + number def format_line(self,strval, maxlength,pad_left,pad_right): """ Function to format a line of a given length. Used by the __str__ routine.""" temp = re.sub("(.{" + "{:d}".format(maxlength) + "})", u"\\1-\n", strval.replace('\n',''), 0, re.DOTALL).strip() temp = temp.split('\n') temp[-1] = temp[-1] +''.join([u'\u0020']*(maxlength-len(temp[-1]))) if len(temp) > 1: temp[0] = temp[0][:-1]+temp[0][-1] return pad_left + (pad_right + '\n' + pad_left).join(temp) + pad_right def get_search_string(self): return ' '.join([self.abstract.lower(),self.title.lower(), self.number] + [a.lower() for a in self.author_ids] + [a.lower() for a in self.authors]) def save(self,filename): with open(filename,"a") as f: json.dump(vars(self),f) def load(self,filename): try: if os.path.exists(filename): with open(filename, 'r') as f: dat = json.load(f) else: dat = filename except TypeError: dat = filename for key,val in dat.items(): setattr(self,key,val) def __eq__(self,paper): return (self.number == paper.number) def __ne__(self,paper): return not self.__eq__(paper) def __le__(self,paper): return float(self.number) <= float(paper.number) def __ge__(self,paper): return float(self.number) >= float(paper.number) def __lt__(self,paper): return float(self.number) < float(paper.number) def __gt__(self,paper): return float(self.number) > float(paper.number) def __str__(self): """ Display the paper in a somewhat nice looking way. """ maxlen = 80 pad_char = u"\u0025" newline_char = u"\u000A" space_char = u"\u0020" tab_char = space_char + space_char + space_char + space_char comma_char = u"\u002C" and_char = u"\u0026" pad_left = pad_char + pad_char + pad_char + tab_char pad_right = tab_char + pad_char + pad_char + pad_char if len(self.authors) == 1: authstr = self.authors[0] else: authstr = (comma_char + space_char).join(self.authors[:-1]) authstr += comma_char + space_char + and_char + space_char + self.authors[-1] authstr = self.format_line(authstr, maxlen, pad_left, pad_right) titlestr = self.format_line(self.title, maxlen, pad_left, pad_right) linkstr = self.format_line(self.link, maxlen, pad_left, pad_right) border = ''.join([pad_char]*(maxlen + len(pad_left) + len(pad_right))) blank_line = pad_left + ''.join([space_char] * maxlen) + pad_right strbody = newline_char + \ border + newline_char + \ blank_line + newline_char + \ titlestr + newline_char + \ blank_line + newline_char + \ linkstr + newline_char + \ blank_line + newline_char + \ authstr + newline_char + \ blank_line + newline_char + \ border + newline_char + \ newline_char # Check for python 2 to convert from unicode if sys.version_info < (3,): strbody = strbody.encode("utf8","ignore") return strbody def save_many(papers,filename): try: papers = list(papers.values()) except AttributeError: try: papers = list(papers) except TypeError: papers = [papers] dat = [vars(paper) for paper in papers] with open(filename,'w') as f: json.dump(dat,f) def load_many(filename): with open(filename,'r') as f: dat = json.load(f) papers =[Paper(fromfile=d) for d in dat] return {paper.number: paper for paper in papers} def authors_list_to_dict(author_list): authors_dict = {} for a in author_list: if '(' in a: # We have an affiliation a = a.split('(')[0] #a = ' ' .join(a.split('(')[0]) temp = a.split() if len(temp) > 2: # More than two names, take first and last name = (temp[0],temp[-1]) elif len(temp) == 1: # Just one name, probably a spacing error temp = temp[0].split('.') name = (temp[0],temp[-1]) else: # Two names name = (temp[0],temp[1]) authors_dict[name[1]+'_'+name[0][0].upper()] = ' '.join(temp) return authors_dict def read_paper_from_url(number): bowl = requests.get('http://arxiv.org/abs/'+ str(number)) soup = bs4.BeautifulSoup(bowl.text, 'html.parser') title = soup.find_all('h1',attrs={'class':'title mathjax'})[0].text.split('Title:')[-1].strip() authors = [x.strip() for x in soup.find_all('div',attrs={'class': 'authors'})[0].text.split('Authors:')[-1].split(',')] abstract = soup.find_all('blockquote',attrs={'class':'abstract mathjax'})[0].text.split('Abstract:')[-1].strip() return Paper(number,title,authors_list_to_dict(authors),abstract) def scrape_arxiv(arxiv_names,new=True,recent=False,month=None,year=None,number=200,skip=0,silent=False,mute=False): """ Scrape the given arxiv pages. By default we grab all of the papers in the latest listing. You can also specify a certain year and month using the month and year arguments. Setting month = 'all' will grab all of the papers for the year. Use the number argument to only select a certain number of papers. Note that it takes roughly 30-40 seconds to grab ~1,500 papers. """ if month is None and year is None: if recent: new = False else: new = False recent = False month_dict = {'jan':1, 'january':1, 'feb':2, 'feburary':2, 'mar':3, 'march':3, 'apr':4, 'april':4, 'may':5, 'jun':6, 'june':6, 'jul':7, 'july':7, 'aug':8, 'august':8, 'sep':9, 'september':9, 'oct':10, 'october':10, 'nov':11, 'november':11, 'dec':12, 'december':12, 'all':'all'} try: month = month_dict[month.lower()] except AttributeError: pass try: year = int(str(year)[-2:]) # Last 2 digits of the year except ValueError: pass if hasattr(arxiv_names, 'lower') and hasattr(arxiv_names, 'upper'): # We have just a single arxiv arxiv_names = [arxiv_names] res = {} for arxiv_name in arxiv_names: url_str = u'http://arxiv.org/list/' if new: url_str = url_str + arxiv_name + u'/new' elif recent: url_str = url_str + arxiv_name + u'/pastweek?skip={:d}&show={:d}'.format(skip,number) else: try: if month.lower() == 'all': url_str = url_str + '?year={:02d}&month=all&archive={}&show={:d}'.format(year,arxiv_name,number) except AttributeError: url_str = url_str + '?year={:02d}&month={:02d}&archive={}&show={:d}'.format(year,month,arxiv_name,number) if not mute: print(u'\tChecking ' + url_str) bowl = requests.get(url_str) if not silent: print(u'\tParsing data...') soup = bs4.BeautifulSoup(bowl.text, 'html.parser') # Every new paper is enclosed in <dd> </dd> tags entries = soup.find_all('dd') cutoff = 0 for list_item in soup.find_all('li'): temp = list_item.find_next('a') if temp.text == 'Replacements': cutoff = int(re.findall(r'\d+',temp['href'])[0]) numbers = [re.findall('\d*\.\d+|\d+',num.text)[0] for num in soup.find_all('span', {'class': 'list-identifier'})][:cutoff-1] for i, (num, entry) in enumerate(zip(numbers, entries[:cutoff-1]),start=1): authors = entry.find_next('div', {'class': 'list-authors'}).text.split('Authors:')[-1].strip().split(', \n') authors = authors_list_to_dict(authors) title = entry.find_next('div', {'class': 'list-title'}).text.split('Title:')[-1].strip() abstract = entry.find_next('p', {'class': 'mathjax'}) if abstract is None: abstract = '' else: abstract = abstract.text res[num] = Paper(num,title,authors,abstract) return res def check_keywords(arxiv_names, keywords,new=True,recent=False,month=None,year=None,number=200, skip=0, silent=True, mute=False): """ Check the given arxivs against a list of keywords. The keywords can either be in a text file or in a list. Returns a list of papers that contain the keywords in either their title, abstract, or author list.""" papers = scrape_arxiv(arxiv_names,new=new,recent=recent,month=month,year=year,number=number,skip=skip,silent=silent,mute=mute) return check_keywords_from_papers(papers,keywords,silent=silent,mute=mute) def load_keywords(keywords): try: if os.path.exists(keywords): with open(keywords, 'r') as f: keyword_list = f.readlines() keyword_list = [line.strip().lower() for line in keyword_list] else: keyword_list = [keywords.strip().lower()] except TypeError: keyword_list = [line.strip().lower() for line in keywords] res_list = [] for key in keyword_list: res_list.append(key) if ',' in key: key = key.split(',') last_name = key[0].strip().title() first_name = key[1].strip().title() res_list.append((first_name +' ' + last_name).lower()) res_list.append((last_name + '_' + first_name[0]).lower()) return res_list def check_authors_from_papers(papers,authors,silent=False,mute=False): """ Check the given papers against a list of authors. The authors can either be in a text file or in a list. Returns a list of papers that contain the authors in either their title, abstract, or author list.""" return check_keywords_from_papers(papers,authors,silent=silent,mute=mute) def check_keywords_from_papers(papers,keywords,silent=False,mute=False): """ Check the given papers against a list of keywords. The keywords can either be in a text file or in a list. Returns a list of papers that contain the keywords in either their title, abstract, or author list.""" keyword_list = load_keywords(keywords) record_list = [] try: paper_list = list(papers.values()) except AttributeError: paper_list = list(papers) for paper in paper_list: hits = [paper.get_search_string().count(key) for key in keyword_list] res_count = sum(hits) if res_count > 0: found_keys = [key for hit,key in zip(hits,keyword_list) if hit >0] record_list.append((res_count,found_keys,paper)) if len(record_list) > 0: record_list = sorted(record_list,reverse=True) if not mute: print("Found {:d} {}".format(len(record_list),'papers' if len(record_list)>1 else 'paper')) for record in record_list: print('{:d} {}'.format(record[0], 'hits' if record[0]>1 else 'hit')) print(record[1]) print(record[-1]) return {temp[-1].number:temp[-1] for temp in record_list} else: if not mute: print('No results.') return None def check_authors(arxiv_names, authors, new=True,recent=False, month=None, year=None, number=200,skip=0,silent=True,mute=False): """ Check the given arxivs against a list of authors given in the form Last, First. The authors can either be in a text file or in a list. Returns a list of papers that contain the authors.""" papers = scrape_arxiv(arxiv_names,new=new,recent=recent,month=month,year=year,number=number,skip=skip,silent=silent,mute=mute) return check_keywords_from_papers(papers,authors,silent=silent,mute=mute)
#include "stdafx.h" #include "TreeViewLocals.h" #include "ide2.h" #include "MainFrame.h" #include "LuaDoc.h" #include "LuaView.h" IMPLEMENT_DYNCREATE(CTreeViewLocals, CTreeView) CTreeViewLocals::CTreeViewLocals() {} CTreeViewLocals::~CTreeViewLocals() {} //ON_NOTIFY_REFLECT(NM_RCLICK, OnRclick) //ON_NOTIFY_REFLECT(NM_DBLCLK, OnDblclk) //ON_NOTIFY_REFLECT(TVN_KEYDOWN, OnKeydown) //ON_COMMAND(ID_PROJECT_ADD_FILES, OnProjectAddFiles) //ON_COMMAND(ID_PROJECT_PROPERTIES, OnProjectProperties) BEGIN_MESSAGE_MAP(CTreeViewLocals, CTreeView) //{{AFX_MSG_MAP(CTreeViewLocals) ON_WM_CREATE() ON_NOTIFY_REFLECT(TVN_ITEMEXPANDED, OnItemexpanded) ON_NOTIFY_REFLECT(NM_DBLCLK, OnDblclk) //}}AFX_MSG_MAP END_MESSAGE_MAP() int CTreeViewLocals::OnCreate(LPCREATESTRUCT lpCreateStruct) { if (CTreeView::OnCreate(lpCreateStruct) == -1) return -1; m_pTree = &GetTreeCtrl(); InsertRoot(); return 0; } void CTreeViewLocals::InsertRoot() { TV_INSERTSTRUCT var; var.hParent = NULL; var.hInsertAfter = TVI_LAST; var.item.pszText = "\\"; var.item.mask = TVIF_TEXT; m_pTree->InsertItem(&var); } void CTreeViewLocals::RemoveAll() { m_pTree->DeleteAllItems(); InsertRoot(); } BOOL CTreeViewLocals::PreCreateWindow(CREATESTRUCT& cs) { cs.style |= TVS_HASBUTTONS | TVS_HASLINES | TVS_LINESATROOT; return CTreeView::PreCreateWindow(cs); } void CTreeViewLocals::OnItemexpanded(NMHDR* pNMHDR, LRESULT* pResult) { NM_TREEVIEW* pNMTreeView = (NM_TREEVIEW*)pNMHDR; /* if ( pNMTreeView->itemNew.hItem == m_hFilesFolder ) { if ( pNMTreeView->action==TVE_EXPAND ) m_pTree->SetItemImage(pNMTreeView->itemNew.hItem, 3, 3); else if ( pNMTreeView->action==TVE_COLLAPSE ) m_pTree->SetItemImage(pNMTreeView->itemNew.hItem, 2, 2); } */ *pResult = 0; } CString CTreeViewLocals::GetItemName(HTREEITEM itm) { CString s =m_pTree->GetItemText(itm); return s.Left(s.Find(" ")); } CString CTreeViewLocals::GetItemFullName(HTREEITEM itm) { CArray<CString,CString> names; while( itm!=m_pTree->GetRootItem() ){ names.Add(GetItemName(itm)); itm = m_pTree->GetParentItem(itm); }; CString res; BOOL bFirst = TRUE; while( names.GetCount() ){ if(!bFirst){ res.Append("."); }else bFirst = FALSE; res.Append(names.GetAt(names.GetCount()-1)); names.RemoveAt(names.GetCount()-1); } return res; } HTREEITEM CTreeViewLocals::FindParentItem(HTREEITEM start_from, char* name) { HTREEITEM itm = m_pTree->GetChildItem(start_from); while ( itm != NULL ){ CString varName =GetItemName(itm); if( varName.Compare(name)==0 ) return itm; itm = m_pTree->GetNextSiblingItem(itm); } return NULL; } void CTreeViewLocals::AddVariable(Variable* var) { HTREEITEM itm_root = m_pTree->GetRootItem(); if(!itm_root){ AddVariable(itm_root,var); return; } char name[255]; strcpy(name,var->szName); char seps[] = "."; char *token; token = strtok( name, seps ); while( token != NULL ) { HTREEITEM itm = FindParentItem(itm_root, token); if(itm==NULL) break; itm_root = itm; token = strtok( NULL, seps ); } strcpy(var->szName,token); AddVariable(itm_root,var); m_pTree->Expand(m_pTree->GetRootItem(),TVE_EXPAND ); } void CTreeViewLocals::AddVariable(HTREEITEM parent, Variable* variable) { //var->szName, var->szType,var->szValue //char* szName, char* szType, char* szValue char sText[512]; sprintf(sText,"%s %s %s",variable->szName,variable->szType, variable->szValue); TV_INSERTSTRUCT var; var.hParent = parent; var.hInsertAfter = TVI_LAST; var.item.pszText = sText; var.item.mask = TVIF_TEXT; m_pTree->InsertItem(&var); } void CTreeViewLocals::OnDblclk(NMHDR* pNMHDR, LRESULT* pResult) { CPoint pt; GetCursorPos(&pt); m_pTree->ScreenToClient(&pt); UINT nFlags; HTREEITEM hItem = m_pTree->HitTest(pt, &nFlags); if(m_pTree->GetItemText(hItem).Find(" table ") == -1) return; CString sVarName = GetItemFullName(hItem); g_mainFrame->OpenVarTable(sVarName.GetBuffer()); *pResult = 0; }
Correlation of Blood Pressure Variability as Measured By Clinic, Self-measurement at Home, and Ambulatory Blood Pressure Monitoring BACKGROUND Blood pressure variability (BPV) has been postulated as a potential predictor of cardiovascular outcomes. No agreement exists as to which measurement method is best for BPV estimation. We attempt to assess the correlation between BPV obtained at the doctor's office, self-measurement at home (SMBP) and ambulatory BP monitoring (ABPM). METHODS Eight weekly clinic BP measurements, 2 SMBP series, and 1 24-hour ABPM recording were carried out in a sample of treated hypertensive patients. BPV was calculated using the SD, the "coefficient of variation" and the "average real variability." Determinants of short-, mid-, and long-term BPV (within each measurement method) were also calculated. The different BPV determinants were correlated "intramethod" and "intermethod" by linear regression test. RESULTS For the 104 patients (66.5 ± 7.7 years, 58.7% males), the ABPM BPV (SD, systolic/diastolic: 14.5 ± 3.1/9.8 ± 2.5 mm Hg) was higher than the SMBP (12.2 ± 9.8/7.4 ± 5.8 mm Hg; P < 0.001) and clinic BPV (10 ± 8.9/5.9 ± 4.9 mm Hg; P = 0.001). The main BPV correlation between methods was weak, with a maximum R2 = 0.17 (P < 0.001) between clinic and SMBP systolic BPV. The "intramethod" correlation of BPV yielded a maximum R2 = 0.21 (P < 0.001) between morning diastolic SMBP intershift/intermeans variability. The "intermethod" correlation of short-, mid-, and long-term BPV determinants was weak (maximum R2 = 0.22, P < 0.001, between clinic intraday variability/SMBP morning intershift variability). CONCLUSIONS The "intramethod" and "intermethod" correlation between BPV determinants was weak or nonexistent, even when comparing determinants reflecting the same type of temporal BPV. Our data suggest that BPV reflects a heterogeneous phenomenon that strongly depends on the estimation method and the time period evaluated.
/** * Class to implement the asynchronous processing in the interface blockade * status modification. * * @author NTT * */ public class FcInterfaceMaintenanceUpdateRunnner extends FcAbstractInterfaceMaintenanceRunnerBase { private MsfLogger logger = MsfLogger.getInstance(FcInterfaceMaintenanceUpdateRunnner.class); private InterfaceChangeStateRequest request; private InterfaceChangeStateRequestBody requestBody; /** * Constructor. * * @param request * Request for the IF blockade status modification. * @param requestBody * Request body for the IF blockade status modification. */ public FcInterfaceMaintenanceUpdateRunnner(InterfaceChangeStateRequest request, InterfaceChangeStateRequestBody requestBody) { this.request = request; this.requestBody = requestBody; } @Override protected RestResponseBase executeImpl() throws MsfException { try { logger.methodStart(); SessionWrapper sessionWrapper = new SessionWrapper(); RestResponseBase responseBase = null; try { sessionWrapper.openSession(); FcNodeDao nodeDao = new FcNodeDao(); FcNode fcNode = getNode(sessionWrapper, nodeDao, request.getFabricTypeEnum(), Integer.valueOf(request.getNodeId())); List<FcNode> nodes = new ArrayList<>(); nodes.add(fcNode); sessionWrapper.beginTransaction(); switch (request.getFabricTypeEnum()) { case LEAF: logger.performance("start get leaf resources lock."); FcDbManager.getInstance().getLeafsLock(nodes, sessionWrapper); logger.performance("end get leaf resources lock."); break; case SPINE: logger.performance("start get spine resources lock."); FcDbManager.getInstance().getSpinesLock(nodes, sessionWrapper); logger.performance("end get spine resources lock"); break; default: throw new MsfException(ErrorCode.UNDEFINED_ERROR, "NodeType = " + NodeType.getEnumFromCode(fcNode.getNodeType()).getSingularMessage()); } logger.performance("start wait for IF state change process."); synchronized (FcNodeManager.getInstance().getFcIfStateChangeLockObject()) { logger.performance("end wait for IF state change process."); checkIf(sessionWrapper, request); sendRequest(request, requestBody, fcNode); responseBase = creatIfBlockageStatusUpdateAsyncData(); sessionWrapper.rollback(); return responseBase; } } catch (MsfException msfException) { logger.error(msfException.getMessage(), msfException); sessionWrapper.rollback(); throw msfException; } finally { sessionWrapper.closeSession(); } } finally { logger.methodEnd(); } } private FcNode getNode(SessionWrapper sessionWrapper, FcNodeDao fcNodeDao, NodeType nodeType, Integer nodeId) throws MsfException { try { logger.methodStart(new String[] { "fcNodeDao", "nodeType", "nodeId" }, new Object[] { fcNodeDao, nodeType, nodeId }); FcNode fcNode = fcNodeDao.read(sessionWrapper, nodeType.getCode(), nodeId); if (fcNode == null) { throw new MsfException(ErrorCode.TARGET_RESOURCE_NOT_FOUND, "target resource is not found. Node Id = " + nodeId); } return fcNode; } finally { logger.methodEnd(); } } private void checkIf(SessionWrapper sessionWrapper, InterfaceChangeStateRequest request) throws MsfException { try { logger.methodStart(new String[] { "request" }, new Object[] { request }); switch (request.getIfTypeEnum()) { case PHYSICAL_IF: FcPhysicalIfDao physicalIfDao = new FcPhysicalIfDao(); FcPhysicalIf physicalIf = physicalIfDao.read(sessionWrapper, request.getFabricTypeEnum().getCode(), Integer.valueOf(request.getNodeId()), request.getIfId()); if (physicalIf == null) { String errMsg = "The specified physical IF does not exist. Physical If Id = " + request.getIfId(); logger.error(errMsg); throw new MsfException(ErrorCode.TARGET_RESOURCE_NOT_FOUND, errMsg); } break; case LAG_IF: FcLagIfDao lagIfDao = new FcLagIfDao(); FcLagIf lagIf = lagIfDao.read(sessionWrapper, request.getFabricTypeEnum().getCode(), Integer.valueOf(request.getNodeId()), Integer.valueOf(request.getIfId())); if (lagIf == null) { String errMsg = "The specified Lag IF does not exist. Lag If Id = " + request.getIfId(); logger.error(errMsg); throw new MsfException(ErrorCode.TARGET_RESOURCE_NOT_FOUND, errMsg); } break; case BREAKOUT_IF: FcBreakoutIfDao breakoutIfDao = new FcBreakoutIfDao(); FcBreakoutIf breakoutIf = breakoutIfDao.read(sessionWrapper, request.getFabricTypeEnum().getCode(), Integer.valueOf(request.getNodeId()), request.getIfId()); if (breakoutIf == null) { String errMsg = "The specified Breakout IF does not exist. Breakout If Id = " + request.getIfId(); logger.error(errMsg); throw new MsfException(ErrorCode.TARGET_RESOURCE_NOT_FOUND, errMsg); } break; default: String errMsg = "The specified IF Type Error"; logger.error(errMsg); throw new MsfException(ErrorCode.TARGET_RESOURCE_NOT_FOUND, errMsg); } } finally { logger.methodEnd(); } } protected RestResponseBase sendRequest(InterfaceChangeStateRequest request, InterfaceChangeStateRequestBody requestBody, FcNode fcNode) throws MsfException { try { logger.methodStart(new String[] { "request", "requestBody" }, new Object[] { request, requestBody }); InterfaceChangeStateEcRequestBody ecRequestBody = new InterfaceChangeStateEcRequestBody(); ecRequestBody.setStatus(requestBody.getBlockadeStatusEnum().getMessage()); RestRequestBase requestBase = new RestRequestBase(); requestBase.setRequestBody(JsonUtil.toJson(ecRequestBody)); String ecControlIpAddress = FcConfigManager.getInstance().getSystemConfSwClusterData().getSwCluster() .getEcControlAddress(); int ecControlPort = FcConfigManager.getInstance().getSystemConfSwClusterData().getSwCluster().getEcControlPort(); RestResponseBase responseBase = RestClient.sendRequest( EcRequestUri.CHANGE_IF_STATUS.getHttpMethod(), EcRequestUri.CHANGE_IF_STATUS .getUri(String.valueOf(fcNode.getEcNodeId()), request.getIfType(), request.getIfId()), requestBase, ecControlIpAddress, ecControlPort); String errorCode = null; if (StringUtils.isNotEmpty(responseBase.getResponseBody())) { ErrorInternalResponseBody body = JsonUtil.fromJson(responseBase.getResponseBody(), ErrorInternalResponseBody.class, ErrorCode.EC_CONTROL_ERROR); errorCode = body.getErrorCode(); } checkRestResponseHttpStatusCode(responseBase.getHttpStatusCode(), HttpStatus.OK_200, errorCode, ErrorCode.EC_CONTROL_ERROR); return responseBase; } finally { logger.methodEnd(); } } private RestResponseBase creatIfBlockageStatusUpdateAsyncData() { try { logger.methodStart(); RestResponseBase responseBase = new RestResponseBase(); responseBase.setHttpStatusCode(HttpStatus.OK_200); return responseBase; } finally { logger.methodEnd(); } } }
/** * Unit test for {@link AdaptiveOperationTracker} that tests the adaptability based on request latencies. This class * only tests features not already tested in {@link OperationTrackerTest}. * * The status of an operation is represented as in the following format: * * [local unsent count] - [local inflight count] - [local succeeded count] - [local failed count]; * [remote unsent count] - [remote inflight count] - [remote succeeded count] - [remote failed count] * * For example: 3-0-0-0; 9-0-0-0 */ public class AdaptiveOperationTrackerTest { private static final int REPLICA_COUNT = 6; private static final int PORT = 6666; private static final double QUANTILE = 0.9; private static final Pair<Long, Long> LOCAL_COLO_LATENCY_RANGE = new Pair<>(0L, 58L); private static final Pair<Long, Long> CROSS_COLO_LATENCY_RANGE = new Pair<>(120L, 220L); private final List<MockDataNodeId> datanodes; private final MockPartitionId mockPartition; private final String localDcName; private final MockClusterMap mockClusterMap; private final Map<PartitionId, LinkedList<ReplicaId>> partitionAndInflightReplicas = new HashMap<>(); private final Set<ReplicaId> repetitionTracker = new HashSet<>(); private final Time time = new MockTime(); private final Histogram localColoTracker; private final Histogram crossColoTracker; private final Histogram putLocalColoTracker; private final Counter pastDueCounter; private final Counter putPastDueCounter; private final long MIN_DATA_POINTS_REQUIRED; private NonBlockingRouterMetrics routerMetrics; private RouterConfig defaultRouterConfig; private OperationTrackerScope trackerScope; /** * Constructor that sets up state. */ public AdaptiveOperationTrackerTest() { List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT)); List<String> mountPaths = Arrays.asList("mockMountPath0", "mockMountPath1", "mockMountPath2"); datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"))); localDcName = datanodes.get(0).getDatacenterName(); mockPartition = new MockPartitionId(); for (int i = 0; i < REPLICA_COUNT; i++) { mockPartition.replicaIds.add(new MockReplicaId(PORT, mockPartition, datanodes.get(i % datanodes.size()), 0)); } mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName); Properties props = new Properties(); props.setProperty("router.hostname", "localhost"); props.setProperty("router.datacenter.name", localDcName); defaultRouterConfig = new RouterConfig(new VerifiableProperties(props)); routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, defaultRouterConfig); localColoTracker = routerMetrics.getBlobLocalDcLatencyMs; crossColoTracker = routerMetrics.getBlobCrossDcLatencyMs; putLocalColoTracker = routerMetrics.putBlobLatencyMs; pastDueCounter = routerMetrics.getBlobPastDueCount; putPastDueCounter = routerMetrics.putBlobPastDueCount; MIN_DATA_POINTS_REQUIRED = defaultRouterConfig.routerOperationTrackerMinDataPointsRequired; trackerScope = OperationTrackerScope.Datacenter; } /** * Tests that requests are discounted from the parallelism count once they move beyond the tolerance quantile. * @throws InterruptedException */ @Test public void adaptationTest() throws InterruptedException { primeTracker(localColoTracker, MIN_DATA_POINTS_REQUIRED, LOCAL_COLO_LATENCY_RANGE); primeTracker(crossColoTracker, MIN_DATA_POINTS_REQUIRED, CROSS_COLO_LATENCY_RANGE); double localColoCutoff = localColoTracker.getSnapshot().getValue(QUANTILE); double crossColoCutoff = crossColoTracker.getSnapshot().getValue(QUANTILE); OperationTracker ot = getOperationTracker(createRouterConfig(true, REPLICA_COUNT, 2, 6, null, true), mockPartition); // 3-0-0-0; 3-0-0-0 sendRequests(ot, 2); // 1-2-0-0; 3-0-0-0 // sleep for less than the cutoff time.sleep((long) localColoCutoff - 2); sendRequests(ot, 0); // push it over the edge time.sleep(5); // should send two requests because both of the oldest requests are past their due times // the second of the two requests is a cross colo request sendRequests(ot, 2); // 0-3-0-0; 2-1-0-0 time.sleep((long) localColoCutoff + 2); // second cross colo request sent (local colo request is past due but the first cross colo request is not past due). sendRequests(ot, 1); // 0-3-0-0; 1-2-0-0 long sleepTime = (long) localColoCutoff + 2; time.sleep(sleepTime); // no requests should be sent. // for first cross colo request, 2 * (localColoCutoff + 2) <= 2 * (57 * 0.9 + 2) = 106.6 < 120 * 0.9 <= crossColoCutoff sendRequests(ot, 0); // 0-3-0-0; 1-2-0-0 sleepTime = (long) (crossColoCutoff - localColoCutoff) + 2; time.sleep(sleepTime); // third cross colo request sent (first cross colo request is past due) sendRequests(ot, 1); // 0-3-0-0; 0-3-0-0 time.sleep((long) crossColoCutoff + 2); // no more replicas left to send requests to sendRequests(ot, 0); // generate a response for every request and make sure there are no errors for (int i = 0; i < REPLICA_COUNT; i++) { assertFalse("Operation should not be done", ot.isDone()); ot.onResponse(partitionAndInflightReplicas.get(mockPartition).poll(), TrackedRequestFinalState.SUCCESS); } assertTrue("Operation should have succeeded", ot.hasSucceeded()); // past due counter should be REPLICA_COUNT - 2 (note that pastDueCounter is updated only when Iterator.remove() is called) assertEquals("Past due counter is inconsistent", REPLICA_COUNT - 2, pastDueCounter.getCount()); } /** * Test that PutOperation can use adaptive operation tracker. * @throws Exception */ @Test public void putOperationSuccessTest() throws Exception { primeTracker(putLocalColoTracker, MIN_DATA_POINTS_REQUIRED, LOCAL_COLO_LATENCY_RANGE); double localColoCutoff = putLocalColoTracker.getSnapshot().getValue(QUANTILE); OperationTracker ot = getOperationTracker(createRouterConfig(true, 2, 2, 6, null, true), mockPartition, RouterOperation.PutOperation); sendRequests(ot, 2); // sleep for less than the cutoff time.sleep((long) localColoCutoff - 2); sendRequests(ot, 0); // Acknowledging one success shouldn't change the state of the operation tracker ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.SUCCESS); sendRequests(ot, 0); // push it over the edge time.sleep(5); sendRequests(ot, 1); ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.SUCCESS); assertTrue("Operation should be done", ot.isDone()); assertTrue("Operation should have succeeded", ot.hasSucceeded()); assertEquals("Past due counter is inconsistent", 1, putPastDueCounter.getCount()); } /** * Test that PutOperation can use adaptive operation tracker when there is a failure response. * @throws Exception */ @Test public void putOperationFailureTest() throws Exception { primeTracker(putLocalColoTracker, MIN_DATA_POINTS_REQUIRED, LOCAL_COLO_LATENCY_RANGE); double localColoCutoff = putLocalColoTracker.getSnapshot().getValue(QUANTILE); OperationTracker ot = getOperationTracker(createRouterConfig(true, 2, 2, 6, null, true), mockPartition, RouterOperation.PutOperation); sendRequests(ot, 2); // sleep for less than the cutoff time.sleep((long) localColoCutoff - 2); sendRequests(ot, 0); // Acknowledging one failure would immediately trigger another send ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.FAILURE); sendRequests(ot, 1); // push it over the edge time.sleep(5); // Put only has 3 nodes sendRequests(ot, 0); ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.SUCCESS); ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.SUCCESS); // two success, one failure assertTrue("Operation should be done", ot.isDone()); assertTrue("Operation should have succeeded", ot.hasSucceeded()); } /** * Test that the max number of inflight requests should not exceed configured number. * @throws InterruptedException */ @Test public void clampMaxInflightRequestsTest() throws InterruptedException { primeTracker(localColoTracker, MIN_DATA_POINTS_REQUIRED, LOCAL_COLO_LATENCY_RANGE); primeTracker(crossColoTracker, MIN_DATA_POINTS_REQUIRED, CROSS_COLO_LATENCY_RANGE); double localColoCutoff = localColoTracker.getSnapshot().getValue(QUANTILE); double crossColoCutoff = crossColoTracker.getSnapshot().getValue(QUANTILE); // set max inflight number = 2 and excludeTimeout = false in this test OperationTracker ot = getOperationTracker(createRouterConfig(true, 3, 2, 3, null, false), mockPartition); // 3-0-0-0; 3-0-0-0 sendRequests(ot, 2); // 1-2-0-0; 3-0-0-0 // sleep for less than the cutoff time.sleep((long) localColoCutoff - 2); sendRequests(ot, 0); // push it over the edge time.sleep(5); // should only send one request because (inflight num + 1) == routerConfig.routerOperationTrackerMaxInflightRequests sendRequests(ot, 1); // 0-3-0-0; 3-0-0-0 // mark one request TIMED_OUT ot.onResponse(partitionAndInflightReplicas.get(mockPartition).poll(), TrackedRequestFinalState.TIMED_OUT); // should send out 1 request sendRequests(ot, 1); // 0-2-0-1; 2-1-0-0 // mark one request FAILURE ot.onResponse(partitionAndInflightReplicas.get(mockPartition).poll(), TrackedRequestFinalState.FAILURE); time.sleep((long) (crossColoCutoff - localColoCutoff) + 2); // should send out 1 request sendRequests(ot, 1); // 0-1-0-2; 1-2-0-0 // mark 3 inflight requests SUCCESS while (!partitionAndInflightReplicas.get(mockPartition).isEmpty()) { assertFalse("Operation should not be done", ot.isDone()); ot.onResponse(partitionAndInflightReplicas.get(mockPartition).poll(), TrackedRequestFinalState.SUCCESS); } assertTrue("Operation should have succeeded", ot.hasSucceeded()); // past due counter should be 3 (note that pastDueCounter is updated only when Iterator.remove() is called) assertEquals("Past due counter is not expected", 3, pastDueCounter.getCount()); } /** * Tests that adaptive tracker uses separate partition-level histogram to determine if inflight requests are past due. * @throws Exception */ @Test public void partitionLevelAdaptiveTrackerTest() throws Exception { MockPartitionId mockPartition1 = new MockPartitionId(0L, MockClusterMap.DEFAULT_PARTITION_CLASS); MockPartitionId mockPartition2 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS); for (int i = 0; i < REPLICA_COUNT; i++) { mockPartition1.replicaIds.add(new MockReplicaId(PORT, mockPartition1, datanodes.get(i % datanodes.size()), 1)); mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, datanodes.get(i % datanodes.size()), 2)); } MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 3, Arrays.asList(mockPartition1, mockPartition2), localDcName); trackerScope = OperationTrackerScope.Partition; RouterConfig routerConfig = createRouterConfig(true, 2, 1, 6, null, true); NonBlockingRouterMetrics originalMetrics = routerMetrics; routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig); Counter pastDueCount = routerMetrics.getBlobPastDueCount; Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency; Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency; // mock different distribution of Histogram for two partitions Histogram localHistogram1 = localColoMap.get(mockPartition1); Histogram localHistogram2 = localColoMap.get(mockPartition2); Histogram remoteHistogram1 = crossColoMap.get(mockPartition1); primeTracker(localHistogram1, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L)); primeTracker(localHistogram2, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L)); primeTracker(remoteHistogram1, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L)); OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1); OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2); double localColoCutoff1 = localHistogram1.getSnapshot().getValue(QUANTILE); double localColoCutoff2 = localHistogram2.getSnapshot().getValue(QUANTILE); double crossColoCutoff1 = remoteHistogram1.getSnapshot().getValue(QUANTILE); sendRequests(tracker2, 1); sendRequests(tracker1, 1); // partition1: 2-1-0-0, partition2: 2-1-0-0 time.sleep((long) localColoCutoff1 + 1); // partition1 should send 2nd request, partition2 won't because its 1st request isn't past due. sendRequests(tracker1, 1); sendRequests(tracker2, 0); // partition1: 1-2-0-0, partition2: 2-1-0-0 time.sleep((long) (localColoCutoff2 - localColoCutoff1) + 2); // note that localColoCutoff2 > 2 * localColoCutoff1, then 2nd request of partition1 and 1st request of partition are both past due sendRequests(tracker1, 1); sendRequests(tracker2, 1); // partition1: 0-3-0-0, partition2: 1-2-0-0 time.sleep((long) localColoCutoff1 + 1); // 3rd local request of partition1 is past due and starts sending 1st cross-colo request sendRequests(tracker1, 1); sendRequests(tracker2, 0); // partition1: 0-3-0-0(local), 2-1-0-0(remote); partition2: 1-2-0-0(local) time.sleep((long) crossColoCutoff1 + 1); // 1st cross-colo request of partition1 is past due and 2nd local request of partition2 is past due. sendRequests(tracker1, 1); sendRequests(tracker2, 1); // partition1: 0-3-0-0(local), 1-2-0-0(remote); partition2: 0-3-0-0(local) // generate response for each request to make them successful for (int i = 0; i < 2; ++i) { assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone()); tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS); tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS); } assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded()); assertEquals("Past due counter is not expected", 4 + 2, pastDueCount.getCount()); // complete remaining inflight requests and test different final state of request LinkedList<ReplicaId> inflightRequests1 = partitionAndInflightReplicas.get(mockPartition1); LinkedList<ReplicaId> inflightRequests2 = partitionAndInflightReplicas.get(mockPartition2); while (!inflightRequests1.isEmpty()) { tracker1.onResponse(inflightRequests1.poll(), TrackedRequestFinalState.FAILURE); } while (!inflightRequests2.isEmpty()) { tracker2.onResponse(inflightRequests2.poll(), TrackedRequestFinalState.TIMED_OUT); } // The number of data points in local colo histogram should be 5 (3 from partition1, 2 from partition2). Note that, // 3rd request of partition2 timed out which shouldn't be added to histogram. assertEquals("Mismatch in number of data points in local colo histogram", 5, routerMetrics.getBlobLocalDcLatencyMs.getCount()); // The number of data points in cross colo histogram should be 2 (both of them come from partition1) assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount()); // additional test: mock new partition is dynamically added and adaptive operation track should be able to create // histogram on demand. MockPartitionId mockPartition3 = (MockPartitionId) clusterMap.createNewPartition(datanodes); OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3); // send 1st request sendRequests(tracker3, 1); // attempt to send 2nd request to make tracker check histogram and create a new one associated with this partition // the oldest one hasn't passed due (because there are not enough data points in histogram), so 2nd is not sent sendRequests(tracker3, 0); tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS); // now it should be able to send 2nd request sendRequests(tracker3, 1); tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS); assertTrue("Operation should have succeeded", tracker3.hasSucceeded()); // restore the tracer scope and routerMetrics trackerScope = OperationTrackerScope.Datacenter; routerMetrics = originalMetrics; } /** * Tests that adaptive tracker uses separate node-level histogram to determine if inflight requests are past due. * @throws Exception */ @Test public void nodeLevelAdaptiveTrackerTest() throws Exception { // Mock a simple partition layout for this test: Partition1 has two replicas, one on LocalHost1 and the other on RemoteHost1; // Similarly, Partition2 has two replicas, one on LocalHost2 and the other on RemoteHost1. MockPartitionId mockPartition1 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS); MockPartitionId mockPartition2 = new MockPartitionId(2L, MockClusterMap.DEFAULT_PARTITION_CLASS); // create a new list mock datanodes instead of using the default class member List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT)); List<String> mountPaths = Arrays.asList("mockMountPath0", "mockMountPath1", "mockMountPath2"); MockDataNodeId localHost1 = new MockDataNodeId("LocalHost1", portList, mountPaths, "dc-0"); MockDataNodeId localHost2 = new MockDataNodeId("LocalHost2", portList, mountPaths, "dc-0"); MockDataNodeId remoteHost1 = new MockDataNodeId("RemoteHost1", portList, mountPaths, "dc-1"); List<MockDataNodeId> datanodes = new ArrayList<>(Arrays.asList(localHost1, localHost2, remoteHost1)); // distribute replicas to nodes (Note that localDC name is still "dc-0" in current setup) mockPartition1.replicaIds.add(new MockReplicaId(PORT, mockPartition1, localHost1, 1)); mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, localHost2, 2)); mockPartition1.replicaIds.add(new MockReplicaId(PORT, mockPartition1, remoteHost1, 1)); mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 2)); MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 3, Arrays.asList(mockPartition1, mockPartition2), localDcName); trackerScope = OperationTrackerScope.DataNode; RouterConfig routerConfig = createRouterConfig(true, 1, 1, 6, null, true); NonBlockingRouterMetrics originalMetrics = routerMetrics; routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig); Counter pastDueCount = routerMetrics.getBlobPastDueCount; Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency; Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency; // mock different latency distribution of local hosts and remote host Histogram localHistogram1 = localColoMap.get(localHost1); Histogram localHistogram2 = localColoMap.get(localHost2); primeTracker(localHistogram1, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L)); primeTracker(localHistogram2, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L)); double localHostCutoff1 = localHistogram1.getSnapshot().getValue(QUANTILE); double localHostCutoff2 = localHistogram2.getSnapshot().getValue(QUANTILE); OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1); OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2); // issue first request for both partitions in local DC sendRequests(tracker2, 1); sendRequests(tracker1, 1); // partition1: 0-1-0-0, partition2: 0-1-0-0 time.sleep((long) localHostCutoff1 + 1); // partition1 should send 2nd request to RemoteNode1, partition2 won't because its 1st request isn't past due. sendRequests(tracker1, 1); sendRequests(tracker2, 0); // partition1: 0-1-0-0(local), 0-1-0-0(remote); partition2: 0-1-0-0(local), 1-0-0-0(remote) time.sleep((long) (localHostCutoff2 - localHostCutoff1) + 2); sendRequests(tracker1, 0); sendRequests(tracker2, 1); // partition1: 0-1-0-0(local), 0-1-0-0(remote); partition2: 0-1-0-0(local), 0-1-0-0(remote) assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone()); // make local requests failed tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.TIMED_OUT); tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.FAILURE); // make remote requests successful tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS); tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS); assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded()); // past due count should be 2 because requests to two local nodes didn't get response within threshold assertEquals("Past due counter is not expected", 2, pastDueCount.getCount()); // number of data points in local colo histogram should be 1 because LocalHost2 finally responded FAILURE which would // update the histogram. Note that request to LocalHost1 became TIMED_OUT in the end which should not be counted. assertEquals("Mismatch in number of data points in local colo histogram", 1, routerMetrics.getBlobLocalDcLatencyMs.getCount()); // number of data points in cross colo histogram should be 2 because both requests to RemoteHost1 succeeded and histogram // should be updated twice in this case. assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount()); // additional test: dynamically add 1 new partition and 2 new nodes. Each new node hosts a replica from new partition MockDataNodeId newNode1 = clusterMap.createNewDataNodes(1, "dc-0").get(0); MockDataNodeId newNode2 = clusterMap.createNewDataNodes(1, "dc-1").get(0); MockPartitionId mockPartition3 = new MockPartitionId(3L, MockClusterMap.DEFAULT_PARTITION_CLASS); mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode1, 1)); mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode2, 2)); OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3); // send 1st request sendRequests(tracker3, 1); // attempt to send 2nd one. This will trigger router metrics to create a histogram that associated with new node // However, there is no 2nd request out because new created histogram doesn't of enough data points. sendRequests(tracker3, 0); // make the 1st request fail tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.FAILURE); // 2nd request is sent sendRequests(tracker3, 1); // make the 2nd request succeed tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS); assertTrue("Operation should have succeeded", tracker3.hasSucceeded()); // restore the tracer scope and routerMetrics trackerScope = OperationTrackerScope.Datacenter; routerMetrics = originalMetrics; } /** * Tests that adaptive tracker uses separate disk-level histogram to determine if inflight requests are past due. * Mock a partition layout as follows for this test. This test also tests the case where new nodes and new partition * are dynamically added. * | | Partition 1 | Partition 2 | Partition 3 (added at runtime) * ------------------------------------------------------------------------------------- * LocalHost1 | Disk0 | Replica_1 | | * | Disk1 | | Replica_1 | * ------------------------------------------------------------------------------------- * RemoteHost1 | Disk0 | Replica_2 | Replica_2 | * | Disk1 | Replica_3 | Replica_3 | * ------------------------------------------------------------------------------------- * NewNode1 | Disk0 | | | Replica_1 * | Disk1 | | | * ------------------------------------------------------------------------------------- * NewNod2 | Disk0 | | | * | Disk1 | | | Replica_2 * @throws Exception */ @Test public void diskLevelAdaptiveTrackerTest() throws Exception { MockPartitionId mockPartition1 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS); MockPartitionId mockPartition2 = new MockPartitionId(2L, MockClusterMap.DEFAULT_PARTITION_CLASS); // create a new list mock datanodes instead of using the default class member List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT)); List<String> mountPaths = Arrays.asList("mockMountPath0", "mockMountPath1"); MockDataNodeId localHost1 = new MockDataNodeId("LocalHost1", portList, mountPaths, "dc-0"); MockDataNodeId remoteHost1 = new MockDataNodeId("RemoteHost1", portList, mountPaths, "dc-1"); List<MockDataNodeId> datanodes = new ArrayList<>(Arrays.asList(localHost1, remoteHost1)); // distribute replicas to nodes (Note that localDC name is still "dc-0" in current setup) ReplicaId partition1Replica1 = new MockReplicaId(PORT, mockPartition1, localHost1, 0); ReplicaId partition1Replica2 = new MockReplicaId(PORT, mockPartition1, remoteHost1, 0); ReplicaId partition1Replica3 = new MockReplicaId(PORT, mockPartition1, remoteHost1, 1); ReplicaId partition2Replica1 = new MockReplicaId(PORT, mockPartition2, localHost1, 1); mockPartition1.replicaIds.add(partition1Replica1); mockPartition1.replicaIds.add(partition1Replica2); mockPartition1.replicaIds.add(partition1Replica3); mockPartition2.replicaIds.add(partition2Replica1); mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 0)); mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 1)); MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 2, Arrays.asList(mockPartition1, mockPartition2), localDcName); trackerScope = OperationTrackerScope.Disk; RouterConfig routerConfig = createRouterConfig(true, 1, 1, 6, null, true); NonBlockingRouterMetrics originalMetrics = routerMetrics; routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig); Counter pastDueCount = routerMetrics.getBlobPastDueCount; Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency; Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency; // mock different latency distribution of different disks Histogram localHostDisk0Histogram = localColoMap.get(partition1Replica1.getDiskId()); Histogram localHostDisk1Histogram = localColoMap.get(partition2Replica1.getDiskId()); Histogram remoteHostDisk0Histogram = crossColoMap.get(partition1Replica2.getDiskId()); Histogram remoteHostDisk1Histogram = crossColoMap.get(partition1Replica3.getDiskId()); primeTracker(localHostDisk0Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L)); primeTracker(localHostDisk1Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L)); primeTracker(remoteHostDisk0Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L)); primeTracker(remoteHostDisk1Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L)); double localHostDisk0Cutoff = localHostDisk0Histogram.getSnapshot().getValue(QUANTILE); double localHostDisk1Cutoff = localHostDisk1Histogram.getSnapshot().getValue(QUANTILE); double remoteHostDisk0Cutoff = remoteHostDisk0Histogram.getSnapshot().getValue(QUANTILE); OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1); OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2); // issue first request for both partitions in local DC sendRequests(tracker2, 1); sendRequests(tracker1, 1); // partition1: 0-1-0-0, partition2: 0-1-0-0 time.sleep((long) localHostDisk0Cutoff + 1); // partition1 should send 2nd request to RemoteNode1, partition2 won't because its 1st request isn't past due. sendRequests(tracker1, 1); sendRequests(tracker2, 0); // partition1: 0-1-0-0(local), 1-1-0-0(remote); partition2: 0-1-0-0(local), 2-0-0-0(remote) time.sleep((long) (localHostDisk1Cutoff - localHostDisk0Cutoff) + 2); // partition2 Replica1 on localhost Disk1 is past due, so the request should be sent to remote host sendRequests(tracker1, 0); sendRequests(tracker2, 1); // partition1: 0-1-0-0(local), 1-1-0-0(remote); partition2: 0-1-0-0(local), 2-0-0-0(remote) time.sleep((long) remoteHostDisk0Cutoff + 1); // both requests are past due (Note that they have same latency histogram) sendRequests(tracker1, 1); sendRequests(tracker2, 1); assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone()); // make local requests successful tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS); tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS); // make remote requests failed tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.FAILURE); tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.TIMED_OUT); tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.TIMED_OUT); tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.FAILURE); assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded()); // past due count should be 4 because for each partition there were one local and one remote request that didn't get // response within threshold. In total, it should be 2 * (1 + 1) = 4 assertEquals("Past due counter is not expected", 4, pastDueCount.getCount()); // number of data points in local colo histogram should be 2 because both requests finally succeeded assertEquals("Mismatch in number of data points in local colo histogram", 2, routerMetrics.getBlobLocalDcLatencyMs.getCount()); // number of data points in cross colo histogram should be 2 because two timed-out requests should be counted assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount()); // additional test: dynamically add 1 new partition and 2 new nodes. Each new node hosts a replica from new partition MockDataNodeId newNode1 = clusterMap.createNewDataNodes(1, "dc-0").get(0); MockDataNodeId newNode2 = clusterMap.createNewDataNodes(1, "dc-1").get(0); MockPartitionId mockPartition3 = new MockPartitionId(3L, MockClusterMap.DEFAULT_PARTITION_CLASS); mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode1, 0)); mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode2, 1)); OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3); // send 1st request sendRequests(tracker3, 1); // attempt to send 2nd one. This will trigger router metrics to create a histogram that associated with new disk // However, there is no 2nd request out because new created histogram doesn't of enough data points. sendRequests(tracker3, 0); // make the 1st request fail tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.FAILURE); // 2nd request is sent sendRequests(tracker3, 1); // make the 2nd request succeed tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS); assertTrue("Operation should have succeeded", tracker3.hasSucceeded()); // restore the tracer scope and routerMetrics trackerScope = OperationTrackerScope.Datacenter; routerMetrics = originalMetrics; } /** * Tests that the {@link Histogram} instances used by {@link AdaptiveOperationTracker} are updated correctly on * successful requests. * @throws InterruptedException */ @Test public void trackerUpdateOnSuccessTest() throws InterruptedException { doTrackerUpdateTest(true); } /** * Tests that the {@link Histogram} instances used by {@link AdaptiveOperationTracker} are updated correctly on failed * requests. * @throws InterruptedException */ @Test public void trackerUpdateOnFailureTest() throws InterruptedException { doTrackerUpdateTest(false); } /** * Tests the case where there are no unexpired requests because the only unexpired request returned a failure. In * that case, the tracker must allow sending more requests. * @throws InterruptedException */ @Test public void noUnexpiredRequestsTest() throws InterruptedException { primeTracker(localColoTracker, MIN_DATA_POINTS_REQUIRED, LOCAL_COLO_LATENCY_RANGE); primeTracker(crossColoTracker, MIN_DATA_POINTS_REQUIRED, CROSS_COLO_LATENCY_RANGE); double localColoCutoff = localColoTracker.getSnapshot().getValue(QUANTILE); OperationTracker ot = getOperationTracker(createRouterConfig(false, 1, 1, 6, null, true), mockPartition); // 3-0-0-0 sendRequests(ot, 1); // 2-1-0-0 // sleep for a time greater than cutoff time.sleep((long) localColoCutoff + 2); sendRequests(ot, 1); // 1-2-0-0 // provide a response to the second request that is not a success ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.FAILURE); // 1-1-0-1 assertFalse("Operation should not be done", ot.isDone()); // should now be able to send one more request sendRequests(ot, 1); // 0-2-0-1 ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.SUCCESS); // 0-1-1-1 assertTrue("Operation should have succeeded", ot.hasSucceeded()); // past due counter should be 1 assertEquals("Past due counter is inconsistent", 1, pastDueCounter.getCount()); } /** * Tests the case where the tracker is updated b/w the {@link Iterator#hasNext()} and {@link Iterator#next()} calls. * @throws InterruptedException */ @Test public void trackerUpdateBetweenHasNextAndNextTest() throws InterruptedException { primeTracker(localColoTracker, MIN_DATA_POINTS_REQUIRED, LOCAL_COLO_LATENCY_RANGE); primeTracker(crossColoTracker, MIN_DATA_POINTS_REQUIRED, CROSS_COLO_LATENCY_RANGE); double localColoCutoff = localColoTracker.getSnapshot().getValue(1); OperationTracker ot = getOperationTracker(createRouterConfig(false, 1, 1, 6, null, true), mockPartition); // 3-0-0-0 sendRequests(ot, 1); // 2-1-0-0 // sleep for a time greater than cutoff time.sleep((long) localColoCutoff + 2); // now get an iterator and call hasNext() on it Iterator<ReplicaId> replicaIterator = ot.getReplicaIterator(); assertTrue("There should be a replica to send to", replicaIterator.hasNext()); // now insert a value in the tracker such that it is the max value. However, the return value of hasNext() must // not change even though the tracker has changed its return value for getSnapshot().getValue(1). long valueToInsert = 2 * (long) localColoCutoff; localColoTracker.update(valueToInsert); assertEquals("Tracker's snapshot should return the max value", valueToInsert, (long) localColoTracker.getSnapshot().getValue(1)); // hasNext() should not change it's return value assertTrue("There should be a replica to send to", replicaIterator.hasNext()); sendRequests(ot, 1); // 1-2-0-0 ot.onResponse(partitionAndInflightReplicas.get(mockPartition).pollLast(), TrackedRequestFinalState.SUCCESS); // 1-1-1-0 assertTrue("Operation should have succeeded", ot.hasSucceeded()); // past due counter should be 1 assertEquals("Past due counter is inconsistent", 1, pastDueCounter.getCount()); } /** * Test that {@link NonBlockingRouterMetrics} can correctly register custom percentiles. An example of metric name is: * "com.github.ambry.router.GetOperation.LocalColoLatencyMs.91.0.thPercentile" */ @Test public void customPercentilesMetricsRegistryTest() { // test that if custom percentile is not set, no corresponding metrics would be generated. MetricRegistry metricRegistry = routerMetrics.getMetricRegistry(); MetricFilter filter = new MetricFilter() { @Override public boolean matches(String name, Metric metric) { return name.endsWith("thPercentile"); } }; SortedMap<String, Gauge> gauges = metricRegistry.getGauges(filter); assertTrue("No gauges should be created because custom percentile is not set", gauges.isEmpty()); // test that dedicated gauges are correctly created for custom percentiles. String customPercentiles = "0.91,0.97"; RouterConfig routerConfig = createRouterConfig(false, 1, 1, 6, customPercentiles, true); String[] percentileArray = customPercentiles.split(","); Arrays.sort(percentileArray); List<String> sortedPercentiles = Arrays.stream(percentileArray) .map(p -> String.valueOf(Double.parseDouble(p) * 100)) .collect(Collectors.toList()); routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig); gauges = routerMetrics.getMetricRegistry().getGauges(filter); // Note that each percentile creates 4 metrics (GetBlobInfo/GetBlob joins LocalColo/CrossColo). So, the total number of // metrics should equal to 4 * (# of given custom percentiles) assertEquals("The number of custom percentile gauge doesn't match", sortedPercentiles.size() * 4, gauges.size()); Iterator mapItor = gauges.keySet().iterator(); Iterator<String> listItor = sortedPercentiles.iterator(); while (listItor.hasNext()) { String gaugeName = (String) mapItor.next(); String percentileStr = listItor.next(); assertTrue("The gauge name doesn't match", gaugeName.endsWith(percentileStr + ".thPercentile")); } // reset router metrics to clean up registered custom percentile metrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, defaultRouterConfig); } /** * Test that if metric scope in router config is invalid, the {@link IllegalArgumentException} is thrown explicitly. */ @Test public void invalidOperationTrackerScopeTest() { Properties props = new Properties(); props.setProperty("router.hostname", "localhost"); props.setProperty("router.datacenter.name", localDcName); props.setProperty("router.operation.tracker.metric.scope", "Invalid Scope"); props.setProperty("router.get.success.target", Integer.toString(1)); props.setProperty("router.get.request.parallelism", Integer.toString(1)); RouterConfig routerConfig = null; try { routerConfig = new RouterConfig(new VerifiableProperties(props)); } catch (IllegalArgumentException e) { //exception is expected and set valid metric scope to instantiate routerConfig for subsequent test. props.setProperty("router.operation.tracker.metric.scope", "Datacenter"); routerConfig = new RouterConfig(new VerifiableProperties(props)); } NonBlockingRouterMetrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig); AdaptiveOperationTracker tracker = new AdaptiveOperationTracker(routerConfig, routerMetrics, RouterOperation.GetBlobInfoOperation, mockPartition, null, time); // test that operation tracker works correctly with default Datacenter scope sendRequests(tracker, 1); assertFalse("Operation should not be done", tracker.isDone()); tracker.onResponse(partitionAndInflightReplicas.get(mockPartition).poll(), TrackedRequestFinalState.SUCCESS); assertTrue("Operation should have succeeded", tracker.hasSucceeded()); // test that no other resource-level metrics have been instantiated. assertTrue("Partition histogram in RouterMetrics should be empty", routerMetrics.getBlobInfoLocalDcResourceToLatency.isEmpty()); assertTrue("Partition histogram in RouterMetrics should be empty", routerMetrics.getBlobInfoCrossDcResourceToLatency.isEmpty()); assertTrue("Partition histogram in OperationTracker should be empty", tracker.getResourceToLatencyMap(RouterOperation.GetBlobInfoOperation, true).isEmpty()); assertTrue("Partition histogram in OperationTracker should be empty", tracker.getResourceToLatencyMap(RouterOperation.GetBlobInfoOperation, false).isEmpty()); // extra test: invalid router operation try { tracker.getResourceToLatencyMap(RouterOperation.TtlUpdateOperation, true); fail("should fail due to invalid router operation"); } catch (IllegalArgumentException e) { //expected } } // helpers // general /** * Instantiate an adaptive operation tracker. * @param routerConfig the {@link RouterConfig} to use in adaptive tracker. * @param partitionId the {@link PartitionId} to use in adaptive tracker. * @return an instance of {@link AdaptiveOperationTracker} with the given parameters. */ private OperationTracker getOperationTracker(RouterConfig routerConfig, PartitionId partitionId) { return getOperationTracker(routerConfig, partitionId, RouterOperation.GetBlobOperation); } /** * Instantiate an adaptive operation tracker. * @param routerConfig the {@link RouterConfig} to use in adaptive tracker. * @param partitionId the {@link PartitionId} to use in adaptive tracker. * @param operation the {@link RouterOperation} to use in the adaptive tracker. * @return an instance of {@link AdaptiveOperationTracker} with the given parameters. */ private OperationTracker getOperationTracker(RouterConfig routerConfig, PartitionId partitionId, RouterOperation operation) { return new AdaptiveOperationTracker(routerConfig, routerMetrics, operation, partitionId, null, time); } /** * Generate an instance of {@link RouterConfig} based on input parameters. * @param crossColoEnabled {@code true} if cross colo needs to be enabled. {@code false} otherwise. * @param successTarget the number of successful responses required for the operation to succeed. * @param parallelism the number of parallel requests that can be in flight. * @param maxInflightNum the maximum number of inflight requests for adaptive tracker. * @param customPercentiles the custom percentiles to be reported. Percentiles are specified in a comma-separated * string, i.e "0.94,0.96,0.97". * @param excludeTimeout whether to exclude timed out requests in Histogram. * @return an instance of {@link RouterConfig} */ private RouterConfig createRouterConfig(boolean crossColoEnabled, int successTarget, int parallelism, int maxInflightNum, String customPercentiles, boolean excludeTimeout) { Properties props = new Properties(); props.setProperty("router.hostname", "localhost"); props.setProperty("router.datacenter.name", localDcName); props.setProperty("router.get.cross.dc.enabled", Boolean.toString(crossColoEnabled)); props.setProperty("router.get.success.target", Integer.toString(successTarget)); props.setProperty("router.get.request.parallelism", Integer.toString(parallelism)); props.setProperty("router.get.include.non.originating.dc.replicas", "true"); props.setProperty("router.get.replicas.required", Integer.toString(Integer.MAX_VALUE)); props.setProperty("router.put.request.parallelism", Integer.toString(parallelism)); props.setProperty("router.put.success.target", Integer.toString(successTarget)); props.setProperty("router.latency.tolerance.quantile", Double.toString(QUANTILE)); props.setProperty("router.operation.tracker.metric.scope", trackerScope.toString()); props.setProperty("router.operation.tracker.max.inflight.requests", Integer.toString(maxInflightNum)); props.setProperty("router.operation.tracker.exclude.timeout.enabled", Boolean.toString(excludeTimeout)); props.setProperty(RouterConfig.ROUTER_ADAPTIVE_OPERATION_TRACKER_WAITING_FOR_RESPONSE, "true"); if (customPercentiles != null) { props.setProperty("router.operation.tracker.custom.percentiles", customPercentiles); } return new RouterConfig(new VerifiableProperties(props)); } /** * Updates the {@code tracker} to mimic {@code numRequests} each taking {@code latency} ms. * @param tracker the {@link Histogram} to update * @param numRequests the number of requests (data points) * @param latencyRange the range of latencies (in ms) to generate and record. */ private void primeTracker(Histogram tracker, long numRequests, Pair<Long, Long> latencyRange) { for (long i = 0; i < numRequests; i++) { // Given latencyRange specifies boundaries of latency: low = latencyRange.getFirst(), high = latencyRange.getSecond(). // Any randomly generated latency should fall in the range [low, high). long latency = Utils.getRandomLong(TestUtils.RANDOM, latencyRange.getSecond() - latencyRange.getFirst()) + latencyRange.getFirst(); tracker.update(latency); } } /** * Send requests to all replicas provided by the {@link OperationTracker#getReplicaIterator()} * @param operationTracker the {@link OperationTracker} that provides replicas. * @param numRequestsExpected the number of requests expected to be sent out. */ private void sendRequests(OperationTracker operationTracker, int numRequestsExpected) { int sent = 0; Iterator<ReplicaId> replicaIdIterator = operationTracker.getReplicaIterator(); while (replicaIdIterator.hasNext()) { ReplicaId nextReplica = replicaIdIterator.next(); assertNotNull("There should be a replica to send a request to", nextReplica); assertFalse("Replica that was used for a request returned by iterator again", repetitionTracker.contains(nextReplica)); LinkedList<ReplicaId> infightReplicas = partitionAndInflightReplicas.getOrDefault(nextReplica.getPartitionId(), new LinkedList<>()); infightReplicas.offer(nextReplica); partitionAndInflightReplicas.put(nextReplica.getPartitionId(), infightReplicas); repetitionTracker.add(nextReplica); replicaIdIterator.remove(); sent++; } assertEquals("Did not send expected number of requests", numRequestsExpected, sent); } // trackerUpdateTest() helpers /** * Tests that the {@link Histogram} instances used by {@link AdaptiveOperationTracker} are updated correctly. * @param succeedRequests {@code true} if the requests should receive successful responses. {@code false} otherwise. * @throws InterruptedException */ private void doTrackerUpdateTest(boolean succeedRequests) throws InterruptedException { long timeIncrement = 10; OperationTracker ot = getOperationTracker(createRouterConfig(true, REPLICA_COUNT, REPLICA_COUNT, 6, null, true), mockPartition); // 3-0-0-0; 3-0-0-0 sendRequests(ot, REPLICA_COUNT); // 0-3-0-0; 0-3-0-0 Double[] localColoExpectedAverages = {(double) timeIncrement, (double) timeIncrement + timeIncrement / 2, 2 * (double) timeIncrement}; verifyHistogramRecording(ot, succeedRequests, timeIncrement, localColoExpectedAverages, localColoTracker); Double[] crossColoExpectedAverages = {4 * (double) timeIncrement, 4 * (double) timeIncrement + timeIncrement / 2, 5 * (double) timeIncrement}; verifyHistogramRecording(ot, succeedRequests, timeIncrement, crossColoExpectedAverages, crossColoTracker); assertEquals("Operation success state is unexpected", succeedRequests, ot.hasSucceeded()); assertTrue("Operation should be done", ot.isDone()); } /** * Verifies that the {@code tracker} is updated correctly when responses are received. * @param ot the {@link OperationTracker} to use. * @param succeedRequests {@code true} if the requests should receive successful responses. {@code false} otherwise. * @param timeIncrement increment of time (in ms) before responses are recorded for each request. * @param expectedAverages the expected averages after every response that is recorded. * @param tracker the {@link Histogram} that should be updated. * @throws InterruptedException */ private void verifyHistogramRecording(OperationTracker ot, boolean succeedRequests, long timeIncrement, Double[] expectedAverages, Histogram tracker) throws InterruptedException { for (double expectedAverage : expectedAverages) { time.sleep(timeIncrement); ot.onResponse(partitionAndInflightReplicas.get(mockPartition).poll(), succeedRequests ? TrackedRequestFinalState.SUCCESS : TrackedRequestFinalState.FAILURE); assertEquals("Average does not match. Histogram recording may be incorrect", expectedAverage, tracker.getSnapshot().getMean(), 0.001); } } }
#include <stdio.h> int main() { int n,k,m,i,count=0,flag=0,d=0; scanf("%d%d%d",&n,&m,&k); int a[n]; for(i=0;i<n;i++) { scanf("%d",&a[i]); if(a[i]==1) { count++; } else flag++; } if(m>= count && k>= flag) printf("%d",d); else if(m>=count && k<flag) { if(k + m-count >flag) printf("%d",d); else printf("%d",flag+count-m-k); } else if( m<count && k>=flag) printf("%d",count-m); else printf("%d",count-m + flag-k); return 0; }
//! Safe wrapper around externalities invokes. use wasm_std::{ self, types::{H256, U256, Address} }; /// Generic wasm error #[derive(Debug)] pub struct Error; mod external { extern "C" { // Various call variants /// Direct/classic call. /// Corresponds to "CALL" opcode in EVM pub fn ccall( gas: i64, address: *const u8, val_ptr: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Delegate call. /// Corresponds to "CALLCODE" opcode in EVM pub fn dcall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Static call. /// Corresponds to "STACICCALL" opcode in EVM pub fn scall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; // environmental blockchain functions (runtime might not provide all of these!) pub fn blockhash(number: i64, dest: *mut u8); pub fn balance(address: *const u8, dest: *mut u8); pub fn coinbase(dest: *mut u8); pub fn timestamp() -> i64; pub fn blocknumber() -> i64; pub fn difficulty(dest: *mut u8); pub fn gaslimit(dest: *mut u8); #[cfg(feature = "kip6")] pub fn gasleft() -> i64; pub fn sender(dest: *mut u8); pub fn address(dest: *mut u8); pub fn value(dest: *mut u8); pub fn origin(dest: *mut u8); pub fn elog( topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32 ); pub fn create( endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; #[cfg(feature = "kip4")] pub fn create2( endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; pub fn suicide(refund: *const u8) -> !; pub fn ret(ptr: *const u8, len: u32) -> !; pub fn input_length() -> u32; pub fn fetch_input(dst: *mut u8); } } /// Halt execution and register account for deletion. /// /// Value of the current account will be tranfered to `refund` address. pub fn suicide(refund: &Address) -> ! { unsafe { external::suicide(refund.as_ptr()); } } /// Get balance of the given account. /// /// If an account is not registered in the chain yet, /// it is considered as an account with `balance = 0`. pub fn balance(address: &Address) -> U256 { unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) } } /// Create a new account with the given code /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::zero(); unsafe { if external::create( endowment_arr.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } #[cfg(feature = "kip4")] /// Create a new account with the given code and salt, requires KIP-4. /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::new(); unsafe { if external::create2( endowment_arr.as_ptr(), salt.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } /// Message-call into an account /// /// # Arguments: /// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount /// * `address` - an address of contract to send a call /// * `value` - a value in Wei to send with a call /// * `input` - a data to send with a call /// * `result` - a mutable reference to be filled with a result data /// /// # Returns: /// /// Call is succeed if it returns `Result::Ok(())` /// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> { let mut value_arr = [0u8; 32]; value.to_big_endian(&mut value_arr); unsafe { if external::ccall( gas as i64, address.as_ptr(), value_arr.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but with code at the given `address` /// /// Effectively this function is like calling current account but with /// different code (i.e. like `DELEGATECALL` EVM instruction). /// /// [`call`]: fn.call.html pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::dcall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage. /// /// It will return an error in this case. /// /// [`call`]: fn.call.html pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::scall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Returns hash of the given block or H256::zero() /// /// Only works for 256 most recent blocks excluding current /// Returns H256::zero() in case of failure pub fn block_hash(block_number: u64) -> H256 { let mut res = H256::zero(); unsafe { external::blockhash(block_number as i64, res.as_mut_ptr()) } res } /// Get the current block’s beneficiary address (the current miner account address) pub fn coinbase() -> Address { unsafe { fetch_address(|x| external::coinbase(x) ) } } /// Get the block's timestamp /// /// It can be viewed as an output of Unix's `time()` function at /// current block's inception. pub fn timestamp() -> u64 { unsafe { external::timestamp() as u64 } } /// Get the block's number /// /// This value represents number of ancestor blocks. /// The genesis block has a number of zero. pub fn block_number() -> u64 { unsafe { external::blocknumber() as u64 } } /// Get the block's difficulty. pub fn difficulty() -> U256 { unsafe { fetch_u256(|x| external::difficulty(x) ) } } /// Get the block's gas limit. pub fn gas_limit() -> U256 { unsafe { fetch_u256(|x| external::gaslimit(x) ) } } #[cfg(feature = "kip6")] /// Get amount of gas left. pub fn gas_left() -> u64 { unsafe { external::gasleft() as u64 } } /// Get caller address /// /// This is the address of the account that is directly responsible for this execution. /// Use [`origin`] to get an address of external account - an original initiator of a transaction pub fn sender() -> Address { unsafe { fetch_address(|x| external::sender(x) ) } } /// Get execution origination address /// /// This is the sender of original transaction. /// It could be only external account, not a contract pub fn origin() -> Address { unsafe { fetch_address(|x| external::origin(x) ) } } /// Get deposited value by the instruction/transaction responsible for this execution. pub fn value() -> U256 { unsafe { fetch_u256(|x| external::value(x) ) } } /// Get address of currently executing account pub fn address() -> Address { unsafe { fetch_address(|x| external::address(x) ) } } /// Creates log entry with given topics and data. /// /// There could be only up to 4 topics. /// /// # Panics /// /// If `topics` contains more than 4 elements then this function will trap. pub fn log(topics: &[H256], data: &[u8]) { unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); } } /// Allocates and requests [`call`] arguments (input) /// /// Input data comes either with external transaction or from [`call`] input value. pub fn input() -> wasm_std::Vec<u8> { let len = unsafe { external::input_length() }; match len { 0 => wasm_std::Vec::new(), non_zero => { let mut data = wasm_std::Vec::with_capacity(non_zero as usize); unsafe { data.set_len(non_zero as usize); external::fetch_input(data.as_mut_ptr()); } data } } } /// Sets a [`call`] return value /// /// Pass return data to the runtime. Runtime SHOULD trap the execution. /// pub fn ret(data: &[u8]) -> ! { unsafe { external::ret(data.as_ptr(), data.len() as u32); } } unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) { let mut res = Address::zero(); f(res.as_mut_ptr()); res } unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) { let mut res = [0u8; 32]; f(res.as_mut_ptr()); U256::from_big_endian(&res) }
<filename>tests/lib/gui/lvgl/src/img.c<gh_stars>1000+ /* * Copyright (c) 2019 <NAME> <<EMAIL>> * * SPDX-License-Identifier: Apache-2.0 */ #include <zephyr.h> #include <lvgl.h> #include "img.h" static const uint8_t img_data[] = { 0x00, 0x00, 0x00, 0x00, /* Index color 0 */ 0x00, 0xff, 0x00, 0x00, /* Index color 1 */ /* 0 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 10 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 20 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 30 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 40 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 50 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 60 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 70 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 80 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 90 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */ }; static lv_img_dsc_t img_dsc = { .header.always_zero = 0, .header.w = 96, .header.h = 96, .data_size = sizeof(img_data), .header.cf = LV_IMG_CF_INDEXED_1BIT, .data = img_data, }; const lv_img_dsc_t *get_lvgl_img(void) { return &img_dsc; }
from sys import stdin _ = stdin.readline() r = 0 p = "" for l in stdin.readline()[:-1]: if p == l: r += 1 p = l print(r)
/** * Soft max function * row_maxes is a row vector (max for each row) * row_maxes = rowmaxes(input) * diff = exp(input - max) / diff.rowSums() * Outputs a probability distribution. * Note that this is a parameterized model and requires * the sum and max for the vector being calculated * * @author Adam Gibson */ public class Reverse extends BaseTransformOp { public Reverse(SameDiff sameDiff, DifferentialFunction i_v, boolean inPlace) { super(sameDiff, i_v, inPlace); } public Reverse(SameDiff sameDiff, DifferentialFunction i_v, int[] shape, boolean inPlace, Object[] extraArgs) { super(sameDiff, i_v, shape, inPlace, extraArgs); } public Reverse(SameDiff sameDiff, DifferentialFunction i_v, Object[] extraArgs) { super(sameDiff, i_v, extraArgs); } public Reverse() {} public Reverse(INDArray x, INDArray z) { super(x, z); } public Reverse(INDArray x, INDArray z, long n) { super(x, z, n); } public Reverse(INDArray x, INDArray y, INDArray z, long n) { super(x, y, z, n); } public Reverse(INDArray x, INDArray y, INDArray z) { super(x, y, z, x.lengthLong()); } public Reverse(INDArray x) { super(x); } @Override public int opNum() { return 70; } @Override public boolean isExecSpecial() { return true; } @Override public String opName() { return "reverse"; } @Override public String onnxName() { throw new NoOpNameFoundException("No onnx op opName found for " + opName()); } @Override public String tensorflowName() { throw new NoOpNameFoundException("No tensorflow op opName found for " + opName()); } @Override public void init(INDArray x, INDArray y, INDArray z, long n) { super.init(x, y, z, n); passThrough = true; } @Override public List<DifferentialFunction> doDiff(List<DifferentialFunction> f1) { return null; } }
def packbits(myarray): if myarray.dtype.kind not in 'biu': raise TypeError( 'Expected an input array of integer or boolean data type') myarray = myarray.ravel() packed_size = (myarray.size + 7) // 8 packed = cupy.zeros((packed_size,), dtype=cupy.uint8) return _packbits_kernel(myarray, myarray.size, packed)
<reponame>marinelli/malodivo {-# language ExistentialQuantification, Rank2Types, DeriveFunctor #-} module Column where import Redistribute (capped) import Control.Arrow ((&&&), second) import Data.Either (rights, partitionEithers) import Data.List (transpose, sortOn) import Data.List.OnPartition -- a capping structure for a column data Column a = Bottom [a] -- final caps | Layer [(a, Column a)] -- group caps deriving Show -- 2 groups capped at 1500 and 2500 each with its own subcaps per bill c0 :: Column Int c0 = Layer [(1500, Bottom [400,600,800]), (2500, Bottom [700,1200,500])] -- the tax redistributor for a set of caps type Capper a v = v -> [a] -> [v] {- -- unuseful flatting a Column to a set of capped values flattenStupid :: Capper a v -> v -> Column a -> [v] flattenStupid f v (Bottom xs) = f v xs flattenStupid f v (Layer ls ) = let (cs, bs) = unzip ls new_caps = f v cs new_ls = zip new_caps bs in concat $ zipWith (uncurry $ flattenStupid f) (f v cs) bs -} -- an active column as a list of -- either a fixed value or -- a machine able to recreate the column on cell deletion type ColumnP v a = [Either v (Cell v a)] -- a cell has a value and a function from a value to the new column -- the function is meant to implement row deletion -- it accept the cell assigned value to recompute the column -- it's eventually a form of Moore machine data Cell v a = Cell { proposal :: v , acceptance :: v -> ColumnP v a } unzipCells :: [Cell v a] -> ([v],[v -> ColumnP v a]) unzipCells = unzip . map (proposal &&& acceptance) data RColumn v a = RBottom [Either v a] | RLayer [Either (v, RColumn v v) (a, RColumn v a)] deriving Show makeRColumn :: Column a -> RColumn v a makeRColumn (Bottom ls) = RBottom $ map Right ls makeRColumn (Layer ls) = RLayer $ map Right . map (fmap makeRColumn) $ ls tackle :: Capper a v -> v -> RColumn v a -> RColumn v v tackle f v (RBottom ls) = RBottom $ onRights (f v) ls tackle f v (RLayer ls) = RLayer $ onRights g ls where g rs = let (cs, bs) = unzip rs in zipWith (\v b -> (v, tackle f v b)) (f v cs) bs -- the idea here is to support for deletion in the result of flattening -- we have to close on column values and capping structure, taking care of -- lefts flattenSmart :: (v -> [a] -> [v]) -> v -> Column a -> ColumnP v a flattenSmart = undefined {- flattenSmart f v c = let actuals v c undefined flatten v c' (Bottom ls) = let ls' = rights ls in flatten v (Right <$> c) ( -} -- a solution is just the list of columns type Solution v a = [ColumnP v a] -- to be modeled, the idea is that we need something to decide if there is one (?) promotable row -- based on the row proposals. -- When there is such a row, the final values should be enough to -- produce any result needed by the algo type Discriminant v = forall r -- any future . [([v],[v] -> r)] -- rows and relative future -> Maybe r -- Nothing when there is no future -- produce all the steps until no promotion is possible driver :: Discriminant v -> Solution v a -> [Solution v a] driver f xs = (xs:) -- produce solution . maybe [] (driver f) -- end production on Nothing . f -- let the discriminant produce the future -- for every row! keep rights, unzip proposal and acceptance -- and use acceptance on the incoming final values . map (second (zipWith ($)) . unzipCells . rights) . transpose $ xs -- transpose to rows
/******************************************************************************* * Copyright 2018 572682 * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. ******************************************************************************/ package us.dot.its.jpo.ode; import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.UUID; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.EnvironmentAware; import org.springframework.context.annotation.PropertySource; import org.springframework.core.env.Environment; import org.thymeleaf.util.StringUtils; import us.dot.its.jpo.ode.context.AppContext; import us.dot.its.jpo.ode.eventlog.EventLogger; import us.dot.its.jpo.ode.model.OdeMsgMetadata; import us.dot.its.jpo.ode.plugin.OdePlugin; import us.dot.its.jpo.ode.util.CommonUtils; @ConfigurationProperties("ode") @PropertySource("classpath:application.properties") public class OdeProperties implements EnvironmentAware { private static final Logger logger = LoggerFactory.getLogger(OdeProperties.class); @Autowired private Environment env; /* * General Properties */ private String version; public static final int OUTPUT_SCHEMA_VERSION = 6; private String pluginsLocations = "plugins"; private String kafkaBrokers = null; private static final String DEFAULT_KAFKA_PORT = "9092"; private String kafkaProducerType = AppContext.DEFAULT_KAFKA_PRODUCER_TYPE; private Boolean verboseJson = false; private int importProcessorBufferSize = OdePlugin.INPUT_STREAM_BUFFER_SIZE; private String hostId; private List<Path> uploadLocations = new ArrayList<>(); /* * RSU Properties */ private int rsuSrmSlots = 100; // number of "store and repeat message" indicies for RSU TIMs private String rsuUsername = ""; private String rsuPassword = ""; /* * Security Services Module Properties */ private String securitySvcsSignatureUri; private int securitySvcsPort = 8090; private String securitySvcsSignatureEndpoint = "sign"; // File import properties private String uploadLocationRoot = "uploads"; private String uploadLocationObuLogLog = "bsmlog"; private Integer fileWatcherPeriod = 5; // time to wait between processing inbox directory for new files /* * USDOT Situation Data Clearinghouse (SDC)/ Situation Data Warehouse (SDW), * a.k.a Data Distribution System (DDS) Properties */ // DDS WebSocket Properties private String ddsCasUrl = "https://cas.cvmvp.com/accounts/v1/tickets"; private String ddsCasUsername = ""; private String ddsCasPass = ""; private String ddsWebsocketUrl = "wss://webapp.cvmvp.com/whtools/websocket"; // Enable/disable depositing SDW messages over Websocket(true) or REST(false) @Value("${ode.depositSdwMessagesOverWebsocket:false}") private boolean depositSdwMessagesOverWebsocket = false; /* * UDP Properties */ private int trustRetries = 2; // if trust handshake fails, how many times to retry private int messagesUntilTrustReestablished = 10; // renew trust session every x messages /* * Kafka Topics * */ private String[] kafkaTopicsDisabled = { // disable all POJO topics by default except "topic.OdeBsmPojo". Never // "topic.OdeBsmPojo because that's the only way to get data into // "topic.OdeBsmJson "topic.OdeBsmRxPojo", "topic.OdeBsmTxPojo", "topic.OdeBsmDuringEventPojo", "topic.OdeTimBroadcastPojo" }; private Set<String> kafkaTopicsDisabledSet = new HashSet<>(); // BSM private String kafkaTopicOdeBsmPojo = "topic.OdeBsmPojo"; private String kafkaTopicOdeBsmJson = "topic.OdeBsmJson"; private String kafkaTopicOdeBsmRxPojo = "topic.OdeBsmRxPojo"; private String kafkaTopicOdeBsmTxPojo = "topic.OdeBsmTxPojo"; private String kafkaTopicOdeBsmDuringEventPojo = "topic.OdeBsmDuringEventPojo"; private String kafkaTopicFilteredOdeBsmJson = "topic.FilteredOdeBsmJson"; private String kafkaTopicOdeRawEncodedMessageJson = "topic.OdeRawEncodedMessageJson"; private int bsmReceiverPort = 46800; private int bsmBufferSize = 500; // TIM private String kafkaTopicOdeTimJson = "topic.OdeTimJson"; private String kafkaTopicOdeDNMsgJson = "topic.OdeDNMsgJson"; private String kafkaTopicOdeTimRxJson = "topic.OdeTimRxJson"; private String kafkaTopicOdeTimBroadcastPojo = "topic.OdeTimBroadcastPojo"; private String kafkaTopicOdeTimBroadcastJson = "topic.OdeTimBroadcastJson"; private String kafkaTopicJ2735TimBroadcastJson = "topic.J2735TimBroadcastJson"; private String kafkaTopicFilteredOdeTimJson = "topic.FilteredOdeTimJson"; // DriverAlerts private String kafkaTopicDriverAlertJson = "topic.OdeDriverAlertJson"; // ASN.1 CODEC private String kafkaTopicAsn1DecoderInput = "topic.Asn1DecoderInput"; private String kafkaTopicAsn1DecoderOutput = "topic.Asn1DecoderOutput"; private String kafkaTopicAsn1EncoderInput = "topic.Asn1EncoderInput"; private String kafkaTopicAsn1EncoderOutput = "topic.Asn1EncoderOutput"; // SDW Depositor Module private String kafkaTopicSdwDepositorInput = "topic.SDWDepositorInput"; //Signed Tim with expiration private String kafkaTopicSignedOdeTimJsonExpiration = "topic.OdeTIMCertExpirationTimeJson"; /* * Security Properties */ private String caCertPath; private String selfCertPath; private String selfPrivateKeyReconstructionFilePath; private String selfSigningPrivateKeyFilePath; private static final byte[] JPO_ODE_GROUP_ID = "jode".getBytes(); @PostConstruct void initialize() { String pomPropsFile = "/META-INF/maven/usdot.jpo.ode/jpo-ode-svcs/pom.properties"; try { InputStream resourceAsStream = this.getClass().getResourceAsStream(pomPropsFile); Properties properties = new Properties(); properties.load(resourceAsStream); setVersion(properties.getProperty("version")); logger.info("groupId: {}", properties.getProperty("groupId")); logger.info("artifactId: {}", properties.getProperty("artifactId")); logger.info("version: {}", version); } catch (IOException e) { logger.error("Error loading properties file " + pomPropsFile, e); } OdeMsgMetadata.setStaticSchemaVersion(OUTPUT_SCHEMA_VERSION); uploadLocations.add(Paths.get(uploadLocationRoot)); String hostname; try { hostname = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { // Let's just use a random hostname hostname = UUID.randomUUID().toString(); logger.info("Unknown host error: {}, using random", e); } hostId = hostname; logger.info("Host ID: {}", hostId); EventLogger.logger.info("Initializing services on host {}", hostId); if (kafkaBrokers == null) { logger.info("ode.kafkaBrokers property not defined. Will try DOCKER_HOST_IP => {}", kafkaBrokers); String dockerIp = CommonUtils.getEnvironmentVariable("DOCKER_HOST_IP"); if (dockerIp == null) { logger.warn( "Neither ode.kafkaBrokers ode property nor DOCKER_HOST_IP environment variable are defined. Defaulting to localhost."); dockerIp = "localhost"; } kafkaBrokers = dockerIp + ":" + DEFAULT_KAFKA_PORT; // URI for the security services /sign endpoint if (securitySvcsSignatureUri == null) { securitySvcsSignatureUri = "http://" + dockerIp + ":" + securitySvcsPort + "/" + securitySvcsSignatureEndpoint; } } List<String> asList = Arrays.asList(this.getKafkaTopicsDisabled()); logger.info("Disabled Topics: {}", asList); kafkaTopicsDisabledSet.addAll(asList); } public String getVersion() { return version; } public void setVersion(String version) { this.version = version; } public boolean dataSigningEnabled() { return getSecuritySvcsSignatureUri() != null && !StringUtils.isEmptyOrWhitespace(getSecuritySvcsSignatureUri()) && !getSecuritySvcsSignatureUri().startsWith("UNSECURE"); } public List<Path> getUploadLocations() { return this.uploadLocations; } public String getProperty(String key) { return env.getProperty(key); } public String getProperty(String key, String defaultValue) { return env.getProperty(key, defaultValue); } public Object getProperty(String key, int i) { return env.getProperty(key, Integer.class, i); } public String getHostId() { return hostId; } public String getPluginsLocations() { return pluginsLocations; } public void setPluginsLocations(String pluginsLocations) { this.pluginsLocations = pluginsLocations; } public String getKafkaBrokers() { return kafkaBrokers; } public void setKafkaBrokers(String kafkaBrokers) { this.kafkaBrokers = kafkaBrokers; } public String getKafkaProducerType() { return kafkaProducerType; } public void setKafkaProducerType(String kafkaProducerType) { this.kafkaProducerType = kafkaProducerType; } public Environment getEnv() { return env; } public void setEnv(Environment env) { this.env = env; } @Override public void setEnvironment(Environment environment) { env = environment; } public String getUploadLocationRoot() { return uploadLocationRoot; } public String getDdsCasPassword() { return ddsCasPass; } public void setDdsCasPassword(String ddsCasPass) { this.ddsCasPass = ddsCasPass; } public int getMessagesUntilTrustReestablished() { return messagesUntilTrustReestablished; } public void setMessagesUntilTrustReestablished(int messagesUntilTrustReestablished) { this.messagesUntilTrustReestablished = messagesUntilTrustReestablished; } public String getCaCertPath() { return caCertPath; } public void setCaCertPath(String caCertPath) { this.caCertPath = caCertPath; } public String getSelfCertPath() { return selfCertPath; } public void setSelfCertPath(String selfCertPath) { this.selfCertPath = selfCertPath; } public String getSelfPrivateKeyReconstructionFilePath() { return selfPrivateKeyReconstructionFilePath; } public void setSelfPrivateKeyReconstructionFilePath(String selfPrivateKeyReconstructionFilePath) { this.selfPrivateKeyReconstructionFilePath = selfPrivateKeyReconstructionFilePath; } public String getSelfSigningPrivateKeyFilePath() { return selfSigningPrivateKeyFilePath; } public void setSelfSigningPrivateKeyFilePath(String selfSigningPrivateKeyFilePath) { this.selfSigningPrivateKeyFilePath = selfSigningPrivateKeyFilePath; } public Boolean getVerboseJson() { return verboseJson; } public void setVerboseJson(Boolean verboseJson) { this.verboseJson = verboseJson; } public int getBsmReceiverPort() { return bsmReceiverPort; } public void setBsmReceiverPort(int bsmReceiverPort) { this.bsmReceiverPort = bsmReceiverPort; } public int getBsmBufferSize() { return bsmBufferSize; } public void setBsmBufferSize(int bsmBufferSize) { this.bsmBufferSize = bsmBufferSize; } public String getDdsCasUrl() { return ddsCasUrl; } public void setDdsCasUrl(String ddsCasUrl) { this.ddsCasUrl = ddsCasUrl; } public String getDdsCasUsername() { return ddsCasUsername; } public void setDdsCasUsername(String ddsCasUsername) { this.ddsCasUsername = ddsCasUsername; } public String getDdsWebsocketUrl() { return ddsWebsocketUrl; } public void setDdsWebsocketUrl(String ddsWebsocketUrl) { this.ddsWebsocketUrl = ddsWebsocketUrl; } public void setUploadLocationRoot(String uploadLocationRoot) { this.uploadLocationRoot = uploadLocationRoot; } public int getRsuSrmSlots() { return rsuSrmSlots; } public void setRsuSrmSlots(int rsuSrmSlots) { this.rsuSrmSlots = rsuSrmSlots; } public int getTrustRetries() { return trustRetries; } public void setTrustRetries(int trustRetries) { this.trustRetries = trustRetries; } public static byte[] getJpoOdeGroupId() { return JPO_ODE_GROUP_ID; } public int getImportProcessorBufferSize() { return importProcessorBufferSize; } public void setImportProcessorBufferSize(int importProcessorBufferSize) { this.importProcessorBufferSize = importProcessorBufferSize; } public String[] getKafkaTopicsDisabled() { return kafkaTopicsDisabled; } public void setKafkaTopicsDisabled(String[] kafkaTopicsDisabled) { this.kafkaTopicsDisabled = kafkaTopicsDisabled; } public Set<String> getKafkaTopicsDisabledSet() { return kafkaTopicsDisabledSet; } public void setKafkaTopicsDisabledSet(Set<String> kafkaTopicsDisabledSet) { this.kafkaTopicsDisabledSet = kafkaTopicsDisabledSet; } public String getKafkaTopicFilteredOdeBsmJson() { return kafkaTopicFilteredOdeBsmJson; } public void setKafkaTopicFilteredOdeBsmJson(String kafkaTopicFilteredOdeBsmJson) { this.kafkaTopicFilteredOdeBsmJson = kafkaTopicFilteredOdeBsmJson; } public String getKafkaTopicOdeBsmPojo() { return kafkaTopicOdeBsmPojo; } public void setKafkaTopicOdeBsmPojo(String kafkaTopicOdeBsmPojo) { this.kafkaTopicOdeBsmPojo = kafkaTopicOdeBsmPojo; } public String getKafkaTopicOdeBsmJson() { return kafkaTopicOdeBsmJson; } public void setKafkaTopicOdeBsmJson(String kafkaTopicOdeBsmJson) { this.kafkaTopicOdeBsmJson = kafkaTopicOdeBsmJson; } public String getKafkaTopicAsn1DecoderInput() { return kafkaTopicAsn1DecoderInput; } public void setKafkaTopicAsn1DecoderInput(String kafkaTopicAsn1DecoderInput) { this.kafkaTopicAsn1DecoderInput = kafkaTopicAsn1DecoderInput; } public String getKafkaTopicAsn1DecoderOutput() { return kafkaTopicAsn1DecoderOutput; } public void setKafkaTopicAsn1DecoderOutput(String kafkaTopicAsn1DecoderOutput) { this.kafkaTopicAsn1DecoderOutput = kafkaTopicAsn1DecoderOutput; } public String getKafkaTopicAsn1EncoderInput() { return kafkaTopicAsn1EncoderInput; } public void setKafkaTopicAsn1EncoderInput(String kafkaTopicAsn1EncoderInput) { this.kafkaTopicAsn1EncoderInput = kafkaTopicAsn1EncoderInput; } public String getKafkaTopicAsn1EncoderOutput() { return kafkaTopicAsn1EncoderOutput; } public void setKafkaTopicAsn1EncoderOutput(String kafkaTopicAsn1EncoderOutput) { this.kafkaTopicAsn1EncoderOutput = kafkaTopicAsn1EncoderOutput; } public String getKafkaTopicOdeDNMsgJson() { return kafkaTopicOdeDNMsgJson; } public void setKafkaTopicOdeDNMsgJson(String kafkaTopicOdeDNMsgJson) { this.kafkaTopicOdeDNMsgJson = kafkaTopicOdeDNMsgJson; } public String getKafkaTopicOdeTimJson() { return kafkaTopicOdeTimJson; } public void setKafkaTopicOdeTimJson(String kafkaTopicOdeTimJson) { this.kafkaTopicOdeTimJson = kafkaTopicOdeTimJson; } public String getUploadLocationObuLog() { return uploadLocationObuLogLog; } public void setUploadLocationObuLog(String uploadLocationObuLog) { this.uploadLocationObuLogLog = uploadLocationObuLog; } public String getKafkaTopicOdeBsmDuringEventPojo() { return kafkaTopicOdeBsmDuringEventPojo; } public void setKafkaTopicOdeBsmDuringEventPojo(String kafkaTopicOdeBsmDuringEventPojo) { this.kafkaTopicOdeBsmDuringEventPojo = kafkaTopicOdeBsmDuringEventPojo; } public String getKafkaTopicOdeBsmRxPojo() { return kafkaTopicOdeBsmRxPojo; } public void setKafkaTopicOdeBsmRxPojo(String kafkaTopicOdeBsmRxPojo) { this.kafkaTopicOdeBsmRxPojo = kafkaTopicOdeBsmRxPojo; } public String getKafkaTopicOdeBsmTxPojo() { return kafkaTopicOdeBsmTxPojo; } public void setKafkaTopicOdeBsmTxPojo(String kafkaTopicOdeBsmTxPojo) { this.kafkaTopicOdeBsmTxPojo = kafkaTopicOdeBsmTxPojo; } public String getKafkaTopicOdeTimRxJson() { return kafkaTopicOdeTimRxJson; } public void setKafkaTopicOdeTimRxJson(String kafkaTopicOdeTimRxJson) { this.kafkaTopicOdeTimRxJson = kafkaTopicOdeTimRxJson; } public String getKafkaTopicOdeTimBroadcastPojo() { return kafkaTopicOdeTimBroadcastPojo; } public void setKafkaTopicOdeTimBroadcastPojo(String kafkaTopicOdeTimBroadcastPojo) { this.kafkaTopicOdeTimBroadcastPojo = kafkaTopicOdeTimBroadcastPojo; } public String getKafkaTopicOdeTimBroadcastJson() { return kafkaTopicOdeTimBroadcastJson; } public void setKafkaTopicOdeTimBroadcastJson(String kafkaTopicOdeTimBroadcastJson) { this.kafkaTopicOdeTimBroadcastJson = kafkaTopicOdeTimBroadcastJson; } public String getKafkaTopicJ2735TimBroadcastJson() { return kafkaTopicJ2735TimBroadcastJson; } public void setKafkaTopicJ2735TimBroadcastJson(String kafkaTopicJ2735TimBroadcastJson) { this.kafkaTopicJ2735TimBroadcastJson = kafkaTopicJ2735TimBroadcastJson; } public String getKafkaTopicFilteredOdeTimJson() { return kafkaTopicFilteredOdeTimJson; } public void setKafkaTopicFilteredOdeTimJson(String kafkaTopicFilteredOdeTimJson) { this.kafkaTopicFilteredOdeTimJson = kafkaTopicFilteredOdeTimJson; } public String getKafkaTopicDriverAlertJson() { return kafkaTopicDriverAlertJson; } public void setKafkaTopicDriverAlertJson(String kafkaTopicDriverAlertJson) { this.kafkaTopicDriverAlertJson = kafkaTopicDriverAlertJson; } public Integer getFileWatcherPeriod() { return fileWatcherPeriod; } public void setFileWatcherPeriod(Integer fileWatcherPeriod) { this.fileWatcherPeriod = fileWatcherPeriod; } public String getSecuritySvcsSignatureUri() { return securitySvcsSignatureUri; } public void setSecuritySvcsSignatureUri(String securitySvcsSignatureUri) { this.securitySvcsSignatureUri = securitySvcsSignatureUri; } public String getRsuUsername() { return rsuUsername; } public void setRsuUsername(String rsuUsername) { this.rsuUsername = rsuUsername; } public String getRsuPassword() { return <PASSWORD>; } public void setRsuPassword(String rsuPassword) { this.rsuPassword = rsuPassword; } public String getKafkaTopicSdwDepositorInput() { return kafkaTopicSdwDepositorInput; } public void setKafkaTopicSdwDepositorInput(String kafkaTopicSdwDepositorInput) { this.kafkaTopicSdwDepositorInput = kafkaTopicSdwDepositorInput; } public boolean shouldDepositSdwMessagesOverWebsocket() { return depositSdwMessagesOverWebsocket; } public void setDepositSdwMessagesOverWebsocket(boolean depositSdwMessagesOverWebsocket) { this.depositSdwMessagesOverWebsocket = depositSdwMessagesOverWebsocket; } public String getKafkaTopicSignedOdeTimJsonExpiration() { return kafkaTopicSignedOdeTimJsonExpiration; } public void setKafkaTopicSignedOdeTimJsonExpiration(String kafkaTopicSignedOdeTimJsonExpiration) { this.kafkaTopicSignedOdeTimJsonExpiration = kafkaTopicSignedOdeTimJsonExpiration; } public String getKafkaTopicOdeRawEncodedMessageJson() { return kafkaTopicOdeRawEncodedMessageJson; } public void setKafkaTopicOdeRawEncodedMessageJson(String kafkaTopicOdeRawEncodedMessageJson) { this.kafkaTopicOdeRawEncodedMessageJson = kafkaTopicOdeRawEncodedMessageJson; } }
Monoclonal antibodies for the histopathological diagnosis of cervical neoplasia Dear Sir, Recent correspondents
Optimal designs for some bivariate cokriging models This article focuses on the estimation and design aspects of a bivariate collocated cokriging experiment. For a large class of covariance matrices, a linear dependency criterion is identified, which allows the best linear unbiased estimator of the primary variable in a bivariate collocated cokriging setup to reduce to a univariate kriging estimator. Exact optimal designs for efficient prediction for such simple and ordinary reduced cokriging models with one-dimensional inputs are determined. Designs are found by minimizing the maximum and the integrated prediction variance, where the primary variable is an Ornstein-Uhlenbeck process. For simple and ordinary cokriging models with known covariance parameters, the equispaced design is shown to be optimal for both criterion functions. The more realistic scenario of unknown covariance parameters is addressed by assuming prior distributions on the parameter vector, thus adopting a Bayesian approach to the design problem. The equispaced design is proved to be the Bayesian optimal design for both criteria. The work is motivated by designing an optimal water monitoring system for an Indian river. Introduction Kriging is a method for estimating a variable of interest, known as the primary variable, at unknown input sites. When multiple responses are collected, multivariate kriging, also known as cokriging, is a related method for estimating the variable of interest at a specific location using measurements of this variable at other input sites along with the measurements of auxiliary/secondary variables, which may provide useful information about the primary variable (Myers, 1983(Myers, , 1991Wackernagel, 2003;Chiles and Delfiner, 2009). For example, consider a water quality study in which a geologist is interested in estimating pH levels (primary response) at several unsampled locations along a river, but auxiliary information such as phosphate concentration or amount of dissolved oxygen may facilitate more accurate estimates of pH levels. We may also consider a computer experiment, where the engineering code produces the primary response and its partial derivatives. The partial derivatives (secondary variables) provide valuable information about the response (Santner et al., 2010). This scenario is typical when the responses measured are correlated, both non-spatially (at the same input sites) and spatially (over different sites, particularly those close to each other). Very little is known about designs for such cokriging models. Li and Zimmerman (2015), Madani and Emery (2019), Bueso et al. (1999), Le and Zidek (1994), Caselton and Zidek (1984) developed optimal designs for multivariate kriging models or multivariate spatial processes, however the designs were all based on numerical simulations. The key difficulty in using such multivariate models is specifying the cross-covariance between the different random processes. Unlike direct covariance matrices, cross-covariance matrices need not be symmetric; indeed, these matrices must be chosen in such a way that the second-order structure always yields a non-negative definite covariance matrix (Genton and Kleiber, 2015;Subramanyam and Pandalai, 2004). A broad list of valid covariance structures for multivariate kriging models has been proposed by Li and Zimmerman (2015). In this article, we address two issues for bivariate cokriging experiments, (i) estimation of the primary variable and (ii) determining optimal designs by minimizing the mean squared error of the estimation. In the first couple of sections, we discuss simple and ordinary bivariate collocated cokriging models, the various covariance functions available in the literature for such models, and their estimation aspects. Specifically, we consider two stationary and isotropic random functions, Z 1 and Z 2 over D ⊆ R, where Z 1 is the primary variable and Z 2 is the secondary/auxiliary variable. Our main interest is in the prediction of Z 1 , at a single location, say x 0 , in the region of interest. For defining covariance matrices for the bivariate responses, we mainly utilize two families of stationary covariances, namely the generalized Markov-type and the proportional covariance functions. The generalized Markov-type covariance, an extended version of Markov-type covariance, is a new function proposed in this article. Along with the generalized Markov-type and proportional covariances, the other covariance types mentioned by Li and Zimmerman (2015) are also studied. We prove a linear dependency condition under which the best linear unbiased predictor (BLUP) of Z 1 (x 0 ) in a bivariate cokriging model is shown to be equivalent to the BLUP in a univariate kriging setup. A wide class of covariance functions is identified which allows this reduction. In the later part of the article, we determine optimal designs for some cokriging models, particularly those for which the reduction holds true. We consider the maximum and the integrated cokriging variance of Z 1 (x 0 ) as the two design criterion functions. The pri-mary variable is assumed to have an isotropic exponential covariance, that is, it satisfies Cov = σ 11 e −θ|x−x | with marginal variance σ 11 > 0 and the exponential parameter θ > 0. Note, Z 1 (x 0 ) is also called an Ornstein-Uhlenbeck process (Antognini and Zagoraiou, 2010). For known covariance parameters in simple and ordinary cokriging models, we prove that the equispaced design minimizes the maximum and integrated prediction variance, that is, it is both G-optimal and I-optimal. In real life, however, covariance parameters are most likely unknown. To address the dependency of the design selection criterion on the unknown covariance parameters, we assume prior distributions on the parameter vector and instead determine pseudo-Bayesian optimal designs. The equispaced design is also proved to be the Bayesian I-and G-optimal design. The original contributions of this article include (i) a linear dependency condition for reduction of collocated bivariate kriging estimators to a kriging estimator, (ii) the generalized Markov-type covariance, (iii) G-optimal designs for known covariance parameters and Goptimal Bayesian designs, for such simple and ordinary reduced bivariate cokriging models and (iv) I-optimal Bayesian designs. We stress that our sole objective is to find theoretical, exact optimal designs, not numerical designs, for bivariate cokriging models. For this reason, we consider only the exponential covariance structure for the primary variable Z 1 . Note no theoretical exact optimal designs for covariance structures other than the exponential covariance are currently available in the statistical literature. Many researchers have studied D-and I-optimal designs for univariate kriging experiments with an exponential covariance structure. For single responses and one-dimensional inputs, Kisel'ák and Stehlík (2008), Zagoraiou and Antognini (2009), Antognini and Zagoraiou (2010) proved that equispaced designs are optimal for trend parameter estimation with respect to average prediction error minimization and the D-optimality criterion. For the information gain (entropy criterion) also, the equispaced design was proved to be optimal by Antognini and Zagoraiou (2010). Zimmerman (2006) studied designs for universal kriging models and showed how the optimal design differs depending on whether covariance parameters are known or estimated using numerical simulations on a two-dimensional grid. Diggle and Lophaven (2006) proposed Bayesian geostatistical designs focusing on efficient spatial prediction while allowing the parameters to be unknown. Exact optimal designs for linear and quadratic regression models with one-dimensional inputs and error structure of the autoregressive of order one form were determined by Dette et al. (2008). This work was further extended by Dette et al. (2013) to a broader class of covariance kernels, where they also showed that the arcsine distribution is universally optimal for the polynomial regression model with correlation structure defined by the logarithmic potential. Baran et al. (2013) and Baran and Stehlík (2015) investigated optimal designs for parameters of shifted Ornstein-Uhlenbeck sheets for two input variables. More recently, Sikolya and Baran (2020) worked with the prediction of a complex Ornstein-Uhlenbeck process and derived the optimal design with respect to the entropy maximization criterion. In Sections 2 and 3 we introduce bivariate cokriging models and the related functions, respectively. The linear dependency condition which allows the BLUP of a cokriging model to reduce to the BLUP of a kriging model is discussed in Section 4. In Section 5, we discuss optimal designs for some cokriging models with known and unknown parameters. An illustration using a water quality data set is provided in Section 6. Concluding remarks are given in Section 7. Cokriging models and their estimation In this section, multivariate kriging models along with their direct covariance and crosscovariance structures are defined. Our focus is on bivariate processes with one-dimensional inputs. Consider two simultaneous random functions Z 1 (·) and Z 2 (·), where Z 1 (·) is the primary response and Z 2 (·) the secondary response. We assume both responses are observed over the region D ⊆ R. In multivariate studies, usually the sets of points at which different random functions are observed might not coincide, but in the case that it does, the design is said to be completely collocated or simply collocated (Li and Zimmerman, 2015). In this article, we work with a completely collocated design and consider that Z 1 (·) and Z 2 (·) are both sampled at the same set of points S = {x 1 , x 2 , . . . , x n }, where S ⊆ D ⊆ R. We consider Z i to be the n × 1 vector of all observations for the random function Z i (·) for i = 1, 2. These random functions are characterized by their mean and covariance structures, with E = m i (x) and Cov(Z i (x), Z j (x )) = C ij (x, x ), for x, x ∈ D and i, j = 1, 2. The underlying linear model is given by: where F F F i is the n × p i matrix, with its k th row given by is the p i × 1 vector of known basis drift functions f l i (.) for l = 0, . . . , p i and π π π i is the p i × 1 vector of parameters. From equation (1) we see m i (x) = f f f T i (x)π π π i for i = 1, 2 and x ∈ D. We assume i to be a zero mean column vector of length n corresponding to the random variation of Z i . The error covariance is Cov( i (x), j (x )) = Cov(Z i (x), Z j (x )) = C ij (x, x ), for x, x ∈ D and i, j = 1, 2. Using matrix notation, the model in equation (1) can be rewritten as: where Z = (Z T 1 , Z T 2 ) T is a 2n×1 vector, = ( T 1 , T 2 ) T , π π π = (π π π T 1 , π π π T 2 ) T , and F F F = F F F 1 0 0 0 0 0 0 F F F 2 . We are interested in predicting the value of the primary random function Z 1 (·) at x 0 ∈ D, using the best linear unbiased predictor (BLUP). The true value of Z 1 (x 0 ) is denoted by Z 0 , that is, A cokriging estimator of Z 0 , as given by Chiles and Delfiner (2009, Chapter 5), is an affine function of all available information on Z 1 (·) and Z 2 (·) at the n sample points, given . . , λ in ) T is an n × 1 vector of weights for i = 1, 2. The cokriging estimators can be shown to be the BLUP of Z 0 (see Ver Hoef and Cressie, 1993, for more details). Some notations we use throughout the paper are: for i, j = 1, 2, and the covariance of the entire vector Z is denoted Σ Σ Σ = C C C 11 C C C 12 C C C 21 C C C 22 . Note, Σ Σ Σ is a 2n × 2n matrix. Estimation in simple cokriging models In a simple cokriging model, the means m i (x) are taken to be constant and known. Thus, without loss of generality, we may assume in such cases that the Z i 's are zero mean processes for i = 1, 2 and therefore in this case π π π = (0, 0) T . For known covariance parameters (Chiles and Delfiner, 2009, Chapter 5) the cokriging estimator of Z 0 , denoted by Z * * sck , and the cokriging variance, denoted by σ 2 sck (x 0 ), which is also the mean squared prediction error (M SP E) at x 0 , are given by: Bivariate covariance functions In Section 2, we noted the dependency of the cokriging estimators and their variances on the covariance functions. In this article, we consider only isotropic covariance functions, that is, where · is some norm function over D. We focus on two families of bivariate covariance functions, namely, i) the generalized Markov-type covariance and ii) the proportional covariance (see Journel (1999), Chiles and Delfiner (2009, Chapter 5), Banerjee et al. (2014, Chapter 9)). Note, that both of these families allow the primary variable to assume any valid covariance. Therefore we can generate a large number of covariance functions from these two families. Also, we will see that the most popularly used covariances belong to either one of these families. Optimal designs based on some of these covariance functions are discussed later. The first family of bivariate covariance functions that we discuss is, the newly proposed generalized Markov-type covariance function. This is an extended form of the Markov-type covariance function mentioned in Chiles and Delfiner (2009, Chapter 5) and Journel (1999). Suppose the two random functions Z 1 (·) and Z 2 (·) have respective variances σ 11 and σ 22 , where σ 11 , σ 22 > 0 and correlation coefficient ρ, |ρ| < 1. If (σ 22 − ρ 2 σ 11 ) > 0, then the generalized Markov-type function is given as follows: the cross-covariance function C 12 (·) is considered to be proportional to C 11 (·) that is, C 12 (h) = ρC 11 (h), and the direct covariance for the secondary variable is given by C 22 (h) = ρ 2 C 11 (h) + (σ 22 − ρ 2 σ 11 )C R (h) for some valid correlogram C R (.) and for h ∈ R. Thus, the covariance matrix for the bivariate vector Z under the generalized Markov-type structure has the form: where . . , n. The validity of the proposed generalized Markov-type covariance function is discussed in details in A.1. Bivariate covariance function Specifications A. Reduction of cokriging estimators to kriging In this section, we discuss conditions under which the cokriging BLUP for the primary variable is reduced to a kriging BLUP. From Sections 2.1 and 2.2, it is not apparent that the cokriging and kriging estimators may be similar, particularly given the potentially non-zero correlation suggesting dependency between Z 1 (·) and Z 2 (·). However, in Lemma 4.1, we show that a linear dependency condition allows this reduction. Some covariance functions for which the reduction does not hold are also discussed. We know that kriging is the univariate version of cokriging. Denoting the simple and ordinary kriging estimator of Z 0 by Z * sk and Z * ok , respectively, and the respective variances (M SP E) at x 0 by σ 2 sk (x 0 ) and σ 2 ok (x 0 ), from Chiles and Delfiner (2009) we have, Lemma 4.1. For a collocated bivariate cokriging problem with isotropic covariance structures, if the covariance functions C 11 (.) and C 12 (.) are linearly dependent; Proof. Consider Σ Σ Σ −1 , which can be written as: From the isotropy assumption we have C 12 (·) = C 21 (·), and from the assumption of linear dependence of C 12 (·) and C 11 (·), we have C 12 (·) = c C 11 (·) for some c ∈ R. Since our designs are collocated, we may write C C C 12 = C C C 21 and C C C 12 = c C C C 11 , which implies C C C 12 C C C −1 11 = cI I I n Also, note that σ σ σ 20 = c σ σ σ 10 . Hence, and σ σ σ T 0 = (σ σ σ T 10 , cσ σ σ T 10 ). For simple cokriging models, substituting (13) and (14) in (3) and (4), and after some simple matrix calculations, we note that the expressions for the estimator Z * * sck and variance σ 2 sck (x 0 ) are the same as that of a simple kriging estimator Z * sk and its variance σ 2 sk (x 0 ), respectively. Following similar steps for the ordinary cokriging model case, we substitute (13) and (14) in (5) and (6). The ordinary cokriging estimator and variance can similarly be shown to be the same as that of the ordinary kriging estimator and its variance, respectively. So, in particular, we can say that the equivalency of the kriging and cokriging estimation also holds good for Mat(0.5), Mat(1.5), and Mat(∞) (as they belong to the proportional covariance family) and NS1 (as it belongs to the generalized Markov type covariance family). However, this reduction does not always hold true for a collocated experiment. Case 3. NS2 covariance function: In this case, we see that the cokriging estimation is not the same as the kriging estimation. Similarly, in the case of an NS3 covariance function, it can be shown that the cokriging estimation differs from the kriging estimation. Optimal designs In this section and the following ones, we prove various results for optimally designing collocated bivariate cokriging experiments. The set on which the random functions Z 1 (·) and Z 2 (·) are observed is a connected subset of R, denoted by D, while the set on which they are sampled is denoted by In the context of finding a design, we are essentially interested in choosing a set of distinct points {x 1 , . . . , x n } which maximizes the prediction accuracy of the primary response Z 1 (·). To choose such a design, the supremum of M SP E, denoted as SM SP E, where or alternatively, an integrated version of M SP E denoted by IM SP E, where are minimized. Since replications are not allowed, the points are assumed to be ordered, that is, x i < x j for i < j, and the distance between two consecutive points is denoted by For kriging models, since extrapolation should be treated with caution (Sikolya and Baran, 2020), we take an approach similar to Sikolya and Baran (2020) and Antognini and Zagoraiou (2010). The starting and end points of the design, x 1 and x n are considered to be known and given by the extreme ends of the area under observation. This approach in fact reduces the number of variables in the design problem and makes it more simplified. Hence, We equivalently denote the design by the vector ξ ξ ξ = (x 1 , d 1 , d 2 , . . . , d n−1 , x n ) in terms of the starting point, consecutive distances between the points, and the end point. In this article, for the purpose of finding optimal designs we consider simple and ordinary bivariate collocated cokriging models, with isotropic random functions. The covariance functions belongs to generalized Markov-type or proportional covariance family. For these families of covariance functions, we have seen in the earlier sections that the cokriging to kriging reduction holds true. We also consider that the primary variable Z 1 (·) is an Ornstein-Uhlenbeck process with exponential parameter θ > 0 and variance σ 11 > 0. Hence, C P (|h|) = e −θ|h| would mean C 11 (|h|) = σ 11 C P (|h|) and the matrix P P P and vector σ σ σ p0 are given by (P P P ) ij = e −θ|x i −x j | and (σ σ σ p0 ) i = e −θ|x i −x 0 | for all i, j = 1, . . . , n and x 0 ∈ D. Note, the optimal designs found in this paper are applicable in particular, to collocated cokriging experiments with Mat(0.5) or NS1 covariance function as well (as they belong to proportional type and generalized Markov-type family, respectively and for both of these functions, the primary variable has an exponential covariance with exponential parameter θ = −log(λ) as per Table 1). Optimal design results We will show that optimal designs obtained for either criterion (SMSPE/IMSPE ), for both known and unknown covariance parameters, are equispaced. The following lemma gives the mathematical forms of M SP E sck (.) and M SP E ock (.), and are used in all the results in this article. Proof. Note that from Lemma 4.1, for the above two families of covariance function (the generalized Markov-type covariance and the proportional covariance) the cokriging estimation reduces to a kriging estimation. Using equation (46) from D, in equation (17) and doing simple algebraic computations gives the above expression of M SP E sck (x 0 ) (same as M SP E sk (x 0 ) in this case). Similarly, using equations (46) and (47) from D, in equation (18) gives the above expression of M SP E ock (x 0 ) (same as M SP E ok (x 0 ) in this case). Note: The M SP E expressions are the same as in Lemma 5.1 when the covariance functions are Mat(0.5) or NS1 (in that case θ = −log(λ)). To reduce the computational complexity, we further claim that a random process over could be viewed as a process over . Hence, if we need to find an n point optimal design with fixed end points for an exponential process with parameter θ defined over , we can equivalently find the n point optimal design with fixed end points for the exponential process with parameter θ(x n − x 1 ) and defined over . Conversely, if an (optimal) design over is given by {y 1 , . . . , y n }, where y 1 = 0 and y n = 1, we can get the equivalent (optimal) design over by taking the transformation So, from now onwards since D ⊆ R is connected, without loss of generality we assume Optimal designs for reduced bivariate simple cokriging model with known parameters In this section, we determine optimal designs for a simple cokriging model in Theorems 5.1 and 5.2. Theorem 5.1. Consider the reduced bivariate simple cokriging models as in Lemma 5.1, with the covariance parameters of the primary response, θ and σ 11 , being known. An equispaced design minimizes the SM SP E sck . Thus, the equispaced design is the G-optimal design. Proof. Consider a point x 0 ∈ D, such that x 0 ∈ for some i = 1, . . . , n − 1, then from Lemma 5.1, Consider, W sup (·) to be a function defined on , such that W sup (d) = 1 − e −θd 1 + e −θd . Then (51) From equation (22), for known θ and σ 11 , the SM SP E sck is a function of max i d i . Since W sup (d) is an increasing function, therefore SM SP E sck is minimized when max i d i is minimized, which occurs for an equispaced partition. Theorem 5.2. Consider the reduced bivariate simple cokriging models as in Lemma 5.1, with known covariance parameters θ and σ 11 . An equispaced design minimizes the IM SP E sck . Thus, the equispaced design is the I-optimal design. Proof. From Lemma 5.1 we can write, where Using F, we can say that IM SP E sck is a Schur-convex function and hence it is minimized for an equispaced design, that is, d i = 1 n−1 for all i = 1, . . . , n − 1. Optimal designs for reduced bivariate simple cokriging models with unknown parameters In real life, while designing an experiment, the exponential covariance parameters θ and σ 11 are usually unknown with very little prior information. In this section, we discuss optimal designs for simple cokriging models with the primary response having an exponential covariance structure but with unknown parameters. To address the dependency of the design selection criterion on the unknown covariance parameters, we assume prior distributions on the parameter vector and instead propose pseudo-Bayesian optimal designs. The prior distributions on the covariance parameters are incorporated into the optimization criteria by integrating over these distributions. This approach is known as the pseudo-Bayesian approach to optimal designs and has been used previously by Chaloner and Larntz (1989), Dette and Sperlich (1996) (2019). The Bayesian approach has been seen to yield more robust optimal designs which are less sensitive to fluctuations of the unknown parameters than locally optimal designs. We start by assuming θ and σ 11 are independent and their respective distributions are r(·) and t(·). A very high value of θ would mean that the covariance matrix for Z 1 (·) is approximately an identity matrix, implying zero dependence among neighboring points. Since this is not reasonable for such correlated data, we assume 0 < θ 1 < θ < θ 2 < ∞. Using a pseudo-Bayesian approach as in Chaloner and Larntz (1989) we define risk functions corresponding to each design criterion as: Our objective is to select the designs that minimize these risks. Theorem 5.3. Consider the reduced bivariate simple cokriging models as in Lemma 5.1. The parameters θ and σ 11 are assumed to be unknown and independent with prior probability density functions r(·) and t(·), respectively. The support of r(·) is of the form (θ 1 , θ 2 ), where 0 < θ 1 < θ 2 < ∞. Then, an equispaced design is optimal with respect to the risk function R 1 (ξ ξ ξ). Proof. From (22) we can write, Thus, As W sup (θ, d) is an increasing function of d, equation (26) shows R 1 is minimized for an equispaced design, since max i d i is minimized for an equispaced design. Theorem 5.4. Consider the reduced bivariate simple cokriging models as in Lemma 5.1. The parameters θ and σ 11 are assumed to be unknown and independent with prior probability density functions r(·) and t(·), respectively. The support of r(·) is of the form (θ 1 , θ 2 ), where 0 < θ 1 < θ 2 < ∞. Then, an equispaced design is optimal with respect to the risk function R 2 (ξ ξ ξ). Proof. Consider R 2 : I n−1 −→ R, where I = . R 2 (·) is symmetric on I n−1 as IM SP E sck is symmetric on I n−1 , that is R 2 is permutation invariant in d i . If we can show ∂R 2 (ξ ξ ξ) ∂d l − ∂R 2 (ξ ξ ξ) ∂d k ≥ 0, for any d l ≥ d k , where k, l = 1, . . . , n − 1, then as before in Theorem 5.2 using the Schur-convexity of R 2 we will prove the equispaced design is optimal. Consider, ( Using Leibniz's Rule as in Protter et al. (2012, chapter 8), which allows changing the order of differentiation and integration) For d l ≥ d k , the quantity ∆ in (27) is positive, since from (52) we have ∂φ(d l ) ∂d l − ∂φ(d k ) ∂d k > 0 for any d l > d k . Thus, R 2 (ξ ξ ξ) is Schur-convex and is minimized for an equispaced design. Thus, we have proved the equispaced design is both locally and Bayesian optimal with respect to the SM SP E and IM SP E criteria for simple cokriging models. Note, for the Bayesian designs we have assumed prior distribution of covariance parameter θ with bounded support not containing zero. So, our results are true for any prior of θ with support as mentioned before. Optimal designs for reduced bivariate ordinary cokriging models In this section, we discuss optimal designs for ordinary cokriging models with exponential covariance structures. The mean of the random function Z 1 (·) is assumed to be unknown and constant (for details see Section 2.2). Taking a similar approach as before, in this section, we prove in Theorem 5.5 that the equispaced design is the G-optimal design. Though it has already been shown by Antognini and Zagoraiou (2010) that for kriging models with unknown trend and known covariance parameter an equispaced design is I-optimal, we state the same result in Theorem 5.6, since we provide an alternative way of calculating M SP E ock (x 0 ) with simpler matrix calculations, which could be useful in the future. Also, in Theorems 5.7 and 5.8 we again are able to show that the equispaced design is both locally and Bayesian I-and G-optimal. Theorem 5.5. Consider the reduced bivariate ordinary cokriging models as in Lemma 5.1, where the covariance parameters, θ and σ 11 , are known. An equispaced design minimizes the SM SP E ock . Thus, the equispaced design is the G-optimal design. Proof. We calculate sup M SP E ock (x 0 ) and minimize it with respect to ξ ξ ξ. From Lemma 5.1 we have, From F and G, we can say that sup 1 − σ σ σ T p0 P P P −1 σ σ σ p0 and sup 1 − 1 1 1 T n P P P −1 σ σ σ p0 2 are attained at Usually, suprema are not additive. However, if two functions f 1 , f 2 : D 1 → D 2 , where D 1 , D 2 ⊆ R, both attain their suprema at the same point x 1 ∈ D 1 , then we will have sup x∈D 1 f 1 (x) + f 2 (x) = sup x∈D 1 f 1 (x) + sup x∈D 1 f 2 (x). Thus, we write, Hence, as W sup (·) and U sup (·) are increasing functions and Ω(ξ ξ ξ) is permutation invariant. Since, max for an equispaced partition. Also, 1 Ω(ξ ξ ξ) is minimized for an equispaced partition (C). So, we have proved that the equispaced design for known θ and σ 11 , minimizes SM SP E ock and therefore is G-optimal. Theorem 5.6. Consider the reduced bivariate ordinary cokriging models as in Lemma 5.1, with covariance parameters of the primary response, θ and σ 11 , being known. An equispaced design minimizes the IM SP E ock . Thus, the equispaced design is the I-optimal design. Proof. This result has been derived and proved in Theorem 4.2 by Antognini and Zagoraiou (2010). However, we still derive IM SP E ock in this paper, as we have used a different matrix approach for calculating IM SP E ock . The approach used here is much simpler. Consider a point x 0 ∈ D and x 0 ∈ , for some i = 1, ..., n − 1, then from Lemma 5.1, Using, After doing some careful calculations, we obtain the expression for IM SP E ock . where Now using similar steps as in Theorem 4.2 of Antognini and Zagoraiou (2010), it can be shown that IM SP E ock is I-optimal. Theorems 5.5 and 5.6 both deal with the scenario in which the covariance parameters are known. To address the situation of unknown covariance parameters, we take a similar approach as in Section 5.3. The prior distributions of θ and σ 11 are assumed to be known. We minimize the expected value of SM SP E ock and IM SP E ock of ordinary cokriging denoted by: Theorem 5.7. Consider the reduced bivariate ordinary cokriging model as in Lemma 5.1. The parameters θ and σ 11 are assumed to be unknown and independent with prior probability density functions r(·) and t(·), respectively. The support of r(·) is of the form (θ 1 , θ 2 ), where 0 < θ 1 < θ 2 < ∞. Then, an equispaced design is optimal with respect to the risk function R 3 (ξ ξ ξ). Proof. Denoting max i d i = d max we have: . Then, ( Using Leibniz's Rule as in Protter et al. (2012, chapter 8)) Note, Thus, Note, that for d l > d k , from (41) So, from equation (34) is Schur-convex and is minimized for an equispaced design. Theorem 5.8. Consider the reduced bivariate ordinary cokriging model as in Lemma 5.1. The parameters θ and σ 11 are assumed to be unknown and independent with prior probability density functions r(·) and t(·), respectively. The support of r(·) is of the form (θ 1 , θ 2 ), where 0 < θ 1 < θ 2 < ∞. Then, an equispaced design is optimal with respect to the risk function R 4 (ξ ξ ξ). Proof. Using the same line of proof as in Theorem (5.4) we can show that the equispaced design is I-optimal for an unknown parameter case as well. Case study In this section, we are interested in using the proposed optimality results in the earlier section to design a river monitoring network for the efficient prediction of water quality. A pilot data set of water quality data from river Neyyar in southern India is used to obtain preliminary information about parameters. We will illustrate how the theory that we developed in Sections 4 and 5 is applied to this problem. The image of the river is shown in Figure 1, where the monitoring stations on the river basin are marked with squares. We will compare the performance of the equispaced design with the given choice of stations for designing a cokriging experiment on this river. The location of each monitoring station is specified by its geographical coordinates, that is, latitude and longitude. At each of these stations, measurements are taken for two variables: pH and phosphate which are used to measure the quality of water. For carrying out the analysis, that is, gathering information on the direct covariance and cross-covariance functions and parameters of the two responses, we use data from a single branch of the river with 17 stations (see the encircled region in Figure 1). We denote this branch of the river by D 2 (⊆ R 2 ) and in this case we have n = 17. We denote the set of sampling points on this river branch by S 2 = {w 1 , . . . , w 17 } (⊆ D 2 ), where each w i = (latitude i , longitude i ), i = 1, . . . , 17. Let w 1 and w 17 respectively be the starting (station 6) and the end point (station 26) of the river branch, and suppose we assume w i is upstream of w j if i < j for all i, j = 1, . . . , 17. The results that we obtained for determining optimal designs in earlier sections were based on one-dimensional inputs, that is, where the region of interest was denoted by D ⊂ R. In fact, without loss of generality we had assumed D = . So, we first use a transformation on our two-dimensional input sets S 2 and D 2 given by: where || u − v || is the geodesic stream distance between the two points u and v along the river and u, v ∈ D 2 . The geodesic distance is used to calculate distance on the earth's surface and is discussed in Banerjee et al. (2014) in detail. The stream distance is the shortest distance between two locations on a stream, where the distance is computed along the stream (Ver Hoef et al., 2006). In this case it was not possible to calculate the exact stream distance using solely the coordinates of monitoring points. So, the stream distance between two adjacent points was approximated by the geodesic distance between the two points. The transformed region of interest ϕ(D 2 ) = D 1 = and the set of sampling points ϕ(S 2 ) = S 1 are one-dimensional. We had to constrain ourselves to a single branch of river as a single branch of river is connected and hence can be considered to be a one-dimensional object. For example, consider stations 10, 18 and 23 which are very close to the main branch, but if these points were included, then the transformation to a one-dimensional set would not work. The transformed set of observation points is given by S 1 = {x 1 , x 2 , . . . , x 17 } where ϕ(w i ) = x i for all i = 1, . . . , 17. Also, by definition of the function ϕ(·) x 1 = 0, x 17 = 1 and x i < x j for i < j, and d i = x i+1 − x i for i = 1, . . . , 16. We took the pH level (a scalar with no units) as the primary variable Z 1 (·), and phosphate concentration (measured in mg/l) as the secondary variable Z 2 (·), with both the variables centered and scaled. To investigate the covariance function and corresponding parameters we fitted a model by likelihood maximization, separately for each variable. Below we see Table 2, which was computed using the likf it function with a constant mean (that is, corresponding to unknown mean) from the geoR package (R-3.6.0 software). The likelihood values in Table 2 suggest that taking the random processes as a zero-mean process with an exponential variance structure and zero nugget effect is a reasonable choice for both variables. Using the information from the univariate analysis of pH and phosphate we next try to set up the appropriate bivariate simple cokriging model. Note that for both variables, we tried to fit a Gaussian covariance structure, however, the algorithm did not converge. We consider Z 1 (·) and Z 2 (·) to have the exponential parameters θ and φ, respectively. The results from Table 2 for pH and phosphate indicate a large difference betweenθ and φ. Thus, it seems more appropriate to assume a generalized Markov-type bivariate covariance rather than proportional covariances in the bivariate cokriging model. Based on the assumption of normal errors, the log-likelihood function is: where Z = (Z 1 , Z 2 ) T , Σ Σ Σ = C C C 11 ρC C C 11 ρC C C 11 ρ 2 C C C 11 + (σ 22 − ρ 2 σ 11 )C C C R , and C C C R is chosen to be the identity matrix. Relative efficiency based on IMSPE of design ξ ξ ξ 0 with respect to the optimal design ξ ξ ξ * is defined as the ratio, IMSPE(ξ ξ ξ * ) IMSPE(ξ ξ ξ 0 ) . For known parameters, using the expression of IMSPE in Theorem 5.2, the relative efficiency of the river network (or design) ξ ξ ξ 0 is found to be 0.797. Similarly, for the SMSPE criterion we define the ratio as SMSPE(ξ ξ ξ * ) SMSPE(ξ ξ ξ 0 ) . For the SMSPE criterion, using Theorem 5.1 the relative efficiency of the river network ξ ξ ξ 0 is 0.524. Note that relative efficiency values in both cases indicate a sizable increase in prediction accuracy if equispaced designs were used instead. Illustration 6.2. Relative efficiency for unknown parameters Consider, θ ∼ U nif (θ 1 , θ 2 ) for 0 < θ 1 < θ 2 < ∞, a common choice of prior for θ (see Stehlík et al. (2015)) and σ 11 ∼ t(·) for some density function t(·). Note we could have chosen any prior function for θ other than the uniform distribution as long as it had a finite support. The risks for the uniform prior are, and, where max The relative efficiency is then Note, these risks in (36) and (37) would differ if we change the prior. However ξ ξ ξ * would remain same. Usingθ = 17.12, we choose θ 1 and θ 2 such that the mean of the interval isθ. Varying the range of values for θ 1 and θ 2 , the relative risks are shown in the following Table 3. From Table 3: Relative risk of given design -IMSPE and SMSPE criterion Table 3, we note small changes in the relative efficiency for changes in θ 1 and θ 2 , suggesting that the criterion is robust to changes in the prior information regarding θ. This robustness persists when we change the values ofθ. We also checked relative efficiencies forθ = 7.12, 27.12 and 47.12, however the results are not shown here. Concluding remarks Multivariate kriging models are of particular practical interest in computer experiments, spatial and spatio-temporal applications. Very often, two or more correlated responses may be observed, and prediction from cokriging may improve prediction quality over kriging for each variable separately. In this article, we identify a class of cross-covariance functions, which in fact includes many popularly used bivariate covariance functions, for which the cokriging estimator reduces to a kriging estimator. Thereafter, we address the problem of determining designs for some of these cokriging models. Since the designs are dependent on the covariance parameters, Bayesian designs are proposed. We prove that the locally and Bayesian optimal designs are both equispaced. Intuitively, this could be explained due to the fact that the locally optimal designs are equispaced for all the values of covariance parameters. So, when we mathematically find the Bayesian optimal designs, both are equispaced. As a future extension, we are interested in determining optimal designs for universal cokriging models. However, as illustrated in Dette et al. (2008) and Dette et al. (2013), obtaining theoretical designs for such models is difficult. We have also come across situations in cokriging experiments where time and space (or multiple inputs) both may affect the responses. Thus, there is a need to extend optimal designs to cover such scenarios where the input space is a multidimensional grid of points. As a ∈ , defining the function U i (·) such that,
<reponame>threefoldtech/sandbox_osx # -*- coding: utf-8 -*- # copyright (c) 2018 gevent. See LICENSE. # cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False """ A collection of primitives used by the hub, and suitable for compilation with Cython because of their frequency of use. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from weakref import ref as wref from greenlet import greenlet from gevent.exceptions import BlockingSwitchOutError # In Cython, we define these as 'cdef inline' functions. The # compilation unit cannot have a direct assignment to them (import # is assignment) without generating a 'lvalue is not valid target' # error. locals()['getcurrent'] = __import__('greenlet').getcurrent locals()['greenlet_init'] = lambda: None locals()['_greenlet_switch'] = greenlet.switch __all__ = [ 'TrackedRawGreenlet', 'SwitchOutGreenletWithLoop', ] class TrackedRawGreenlet(greenlet): def __init__(self, function, parent): greenlet.__init__(self, function, parent) # See greenlet.py's Greenlet class. We capture the cheap # parts to maintain the tree structure, but we do not capture # the stack because that's too expensive for 'spawn_raw'. current = getcurrent() # pylint:disable=undefined-variable self.spawning_greenlet = wref(current) # See Greenlet for how trees are maintained. try: self.spawn_tree_locals = current.spawn_tree_locals except AttributeError: self.spawn_tree_locals = {} if current.parent: current.spawn_tree_locals = self.spawn_tree_locals class SwitchOutGreenletWithLoop(TrackedRawGreenlet): # Subclasses must define: # - self.loop # This class defines loop in its .pxd for Cython. This lets us avoid # circular dependencies with the hub. def switch(self): switch_out = getattr(getcurrent(), 'switch_out', None) # pylint:disable=undefined-variable if switch_out is not None: switch_out() return _greenlet_switch(self) # pylint:disable=undefined-variable def switch_out(self): raise BlockingSwitchOutError('Impossible to call blocking function in the event loop callback') def _init(): greenlet_init() # pylint:disable=undefined-variable _init() from gevent._util import import_c_accel import_c_accel(globals(), 'gevent.__greenlet_primitives')
/** * convert int to byte array * @param value int value 32 bits * @param buffer array of byte to write to * @param offset position to write to */ public static void writeIntToBytes(int value, byte[] buffer, int offset){ if(buffer.length - offset < 4){ return; } buffer[offset] = (byte)((value >> 24) & 0x000000FF); buffer[offset + 1] = (byte)((value >> 16)&0x000000FF); buffer[offset + 2] = (byte)((value >> 8)&0x000000FF); buffer[offset + 3] = (byte)(value & 0x000000FF); }
n = input() A = map(int, raw_input().split()) s = sum(A) if s*10.0/n >= 45: print 0 else: ans = 0 A.sort() for a in A: ans += 1 s = s-a+5 if s*10.0/n >= 45: break print ans
/** * StorageHelper class to access local storage. */ public class StorageHelper { /** * Name of preferences. */ private static final String PREFERENCES_NAME = "AppCenter"; /** * Application context instance. */ @SuppressLint("StaticFieldLeak") private static Context sContext; /** * Android SharedPreferences instance. */ private static SharedPreferences sSharedPreferences; /** * Initializes StorageHelper class. * * @param context The context of the application. */ public static synchronized void initialize(Context context) { if (sContext == null) { sContext = context; sSharedPreferences = sContext.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); } } /** * PreferencesStorage Helper class */ public static class PreferencesStorage { /** * Retrieve a boolean value. * * @param key The key for which the value is to be retrieved. * @return The value of {@code key} or false if key is not set. */ @SuppressWarnings("unused") public static boolean getBoolean(@NonNull String key) { return getBoolean(key, false); } /** * Retrieve a boolean value and provide a default value. * * @param key The key for which the value is to be retrieved. * @param defValue The default value to return if no value is set for {@code key}. * @return The value of {@code key} or the default value if key is not set. */ public static boolean getBoolean(@NonNull String key, boolean defValue) { return sSharedPreferences.getBoolean(key, defValue); } /** * Store a boolean value. * * @param key The key to store the value for. * @param value The value to store for the key. */ public static void putBoolean(@NonNull String key, boolean value) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.putBoolean(key, value); editor.apply(); } /** * Retrieve a float value. * * @param key The key for which the value is to be retrieved. * @return The value of {@code key} or 0f if key is not set. */ @SuppressWarnings({"WeakerAccess", "unused"}) public static float getFloat(@NonNull String key) { return getFloat(key, 0f); } /** * Retrieve a float value and provide a default value. * * @param key The key for which the value is to be retrieved. * @param defValue The default value to return if no value is set for {@code key}. * @return The value of {@code key} or the default value if key is not set. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static float getFloat(@NonNull String key, float defValue) { return sSharedPreferences.getFloat(key, defValue); } /** * Store a float value. * * @param key The key to store the value for. * @param value The value to store for the key. */ @SuppressWarnings({"WeakerAccess", "unused"}) public static void putFloat(@NonNull String key, float value) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.putFloat(key, value); editor.apply(); } /** * Retrieve an int value. * * @param key The key for which the value is to be retrieved. * @return The value of {@code key} or 0 if key is not set. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static int getInt(@NonNull String key) { return getInt(key, 0); } /** * Retrieve an int value and provide a default value. * * @param key The key for which the value is to be retrieved. * @param defValue The default value to return if no value is set for {@code key}. * @return The value of {@code key} or the default value if key is not set. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static int getInt(@NonNull String key, int defValue) { return sSharedPreferences.getInt(key, defValue); } /** * Store an int value. * * @param key The key to store the value for. * @param value The value to store for the key. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static void putInt(@NonNull String key, int value) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.putInt(key, value); editor.apply(); } /** * Retrieve a long value. * * @param key The key for which the value is to be retrieved. * @return The value of {@code key} or 0L if key is not set. */ @SuppressWarnings({"WeakerAccess", "unused", "SameParameterValue"}) public static long getLong(@NonNull String key) { return getLong(key, 0L); } /** * Retrieve a long value and provide a default value. * * @param key The key for which the value is to be retrieved. * @param defValue The default value to return if no value is set for {@code key}. * @return The value of {@code key} or the default value if key is not set. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static long getLong(@NonNull String key, long defValue) { return sSharedPreferences.getLong(key, defValue); } /** * Store a long value. * * @param key The key to store the value for. * @param value The value to store for the key. */ @SuppressWarnings({"WeakerAccess", "unused"}) public static void putLong(@NonNull String key, long value) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.putLong(key, value); editor.apply(); } /** * Retrieve a string value. * * @param key The key for which the value is to be retrieved. * @return The value of {@code key} or {@code null} if key is not set. */ @SuppressWarnings("unused") public static String getString(@NonNull String key) { return getString(key, null); } /** * Retrieve a string value and provide a default value. * * @param key The key for which the value is to be retrieved. * @param defValue The default value to return if no value is set for {@code key}. * @return The value of {@code key} or the default value if key is not set. */ public static String getString(@NonNull String key, String defValue) { return sSharedPreferences.getString(key, defValue); } /** * Store a string value. * * @param key The key to store the value for. * @param value The value to store for the key. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static void putString(@NonNull String key, String value) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.putString(key, value); editor.apply(); } /** * Retrieve a string set. * * @param key The key for which the value is to be retrieved. * @return The value of {@code key} or {@code null} if key is not set. */ @SuppressWarnings("unused") public static Set<String> getStringSet(@NonNull String key) { return getStringSet(key, null); } /** * Retrieve a string set and provide a default value. * * @param key The key for which the value is to be retrieved. * @param defValue The default value to return if no value is set for {@code key}. * @return The value of {@code key} or the default value if key is not set. */ @SuppressWarnings({"SameParameterValue", "WeakerAccess"}) public static Set<String> getStringSet(@NonNull String key, Set<String> defValue) { return sSharedPreferences.getStringSet(key, defValue); } /** * Store a string set. * * @param key The key to store the value for. * @param value The value to store for the key. */ @SuppressWarnings("unused") public static void putStringSet(@NonNull String key, Set<String> value) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.putStringSet(key, value); editor.apply(); } /** * Removes a value with the given key. * * @param key Key of the value to be removed. */ public static void remove(@NonNull String key) { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.remove(key); editor.apply(); } /** * Removes all keys and values. */ public static void clear() { SharedPreferences.Editor editor = sSharedPreferences.edit(); editor.clear(); editor.apply(); } } /** * InternalStorage Helper class */ public static class InternalStorage { /** * Read contents from a file. * * @param path The path of the file. * @return The contents of the file. */ @SuppressWarnings("SameParameterValue") public static String read(@NonNull String path) { return read(new File(path)); } /** * Read contents from a file. * * @param file The file to read from. * @return The contents of the file. */ public static String read(@NonNull File file) { try { BufferedReader reader = new BufferedReader(new FileReader(file)); StringBuilder contents; //noinspection TryFinallyCanBeTryWithResources (requires min API level 19) try { String line; String lineSeparator = System.getProperty("line.separator"); contents = new StringBuilder(); while ((line = reader.readLine()) != null) { contents.append(line).append(lineSeparator); } } finally { //noinspection ThrowFromFinallyBlock reader.close(); } return contents.toString(); } catch (IOException e) { AppCenterLog.error(AppCenter.LOG_TAG, "Could not read file " + file.getAbsolutePath(), e); } return null; } /** * Write contents to a file. * * @param path The path of the file. * @param contents The contents to be written to the file. * @throws IOException If an I/O error occurs */ public static void write(@NonNull String path, @NonNull String contents) throws IOException { write(new File(path), contents); } /** * Write contents to a file. * * @param file The file instance. * @param contents The content to be written to the file. Must not be empty or whitespace only. * @throws IOException If an I/O error occurs */ public static void write(@NonNull File file, @NonNull String contents) throws IOException { if (TextUtils.isEmpty(contents) || TextUtils.getTrimmedLength(contents) <= 0) { return; } BufferedWriter writer = new BufferedWriter(new FileWriter(file)); //noinspection TryFinallyCanBeTryWithResources try { writer.write(contents); } finally { //noinspection ThrowFromFinallyBlock writer.close(); } } /** * Read an object from a file (deserialization). * * @param file The file to read from. * @param <T> A type for the deserialized instance. * @return The deserialized instance. * @throws IOException If an I/O error occurs * @throws ClassNotFoundException If no class definition found for serialized instance. */ @SuppressWarnings("unchecked") public static <T extends Serializable> T readObject(@NonNull File file) throws IOException, ClassNotFoundException { ObjectInputStream inputStream = new ObjectInputStream(new FileInputStream(file)); //noinspection TryFinallyCanBeTryWithResources try { return (T) inputStream.readObject(); } finally { //noinspection ThrowFromFinallyBlock inputStream.close(); } } /** * Write an object to a file (serialization). * * @param file The file to write to. * @param object The object to be written to the file. * @param <T> A type for the object. * @throws IOException If an I/O error occurs */ public static <T extends Serializable> void writeObject(@NonNull File file, @NonNull T object) throws IOException { ObjectOutputStream outputStream = new ObjectOutputStream(new FileOutputStream(file)); //noinspection TryFinallyCanBeTryWithResources try { outputStream.writeObject(object); } finally { //noinspection ThrowFromFinallyBlock outputStream.close(); } } /** * Get an array of filenames in the path. * * @param path The directory path. * @param filter The filter to match file names against, may be {@code null}. * @return An array of filename that doesn't include paths. */ @SuppressWarnings("WeakerAccess") @NonNull public static String[] getFilenames(@NonNull String path, @Nullable FilenameFilter filter) { File dir = new File(path); if (dir.exists()) { return dir.list(filter); } return new String[0]; } /** * Get the most recently modified file in the directory specified. * * @param path The directory path. * @param filter The filter to match file names against, may be {@code null}. * @return The last modified file in the directory matching the specified filter, if any matches. {@code null} otherwise. */ @SuppressWarnings("WeakerAccess") @Nullable public static File lastModifiedFile(@NonNull String path, @Nullable FilenameFilter filter) { return lastModifiedFile(new File(path), filter); } /** * Get the most recently modified file in the directory specified. * * @param dir The directory. * @param filter The filter to match file names against, may be {@code null}. * @return The last modified file in the directory matching the specified filter, if any matches. {@code null} otherwise. */ @Nullable public static File lastModifiedFile(@NonNull File dir, @Nullable FilenameFilter filter) { if (dir.exists()) { File[] files = dir.listFiles(filter); long lastModification = 0; File lastModifiedFile = null; if (files != null) { for (File file : files) { if (file.lastModified() > lastModification) { lastModification = file.lastModified(); lastModifiedFile = file; } } return lastModifiedFile; } } return null; } /** * Delete a file or directory with the given path. * * @param path The path of the file or directory. * @return {@code true} if it was deleted, {@code false} otherwise. */ public static boolean delete(@NonNull String path) { return delete(new File(path)); } /** * Delete a file or directory. * * @param file The file or directory to delete. * @return {@code true} if it was deleted, {@code false} otherwise. */ public static boolean delete(@NonNull File file) { return file.delete(); } /** * Create a directory if it does not already exist. * Will create the whole directory tree if necessary. * * @param path An absolute path for the directory to be created. */ @SuppressWarnings({"ResultOfMethodCallIgnored", "SpellCheckingInspection"}) public static void mkdir(@NonNull String path) { new File(path).mkdirs(); } } /** * DatabaseStorage Helper class */ public static class DatabaseStorage implements Closeable { /** * DatabaseManager instance. */ private final DatabaseManager mDatabaseManager; /** * Private constructor. * * @param databaseManager An instance of {@code DatabaseManager}. */ private DatabaseStorage(@NonNull DatabaseManager databaseManager) { mDatabaseManager = databaseManager; } /** * Get a new instance of {@code DatabaseManager}. * * @param database The database name. * @param table The table name. * @param version The version. * @param schema The schema of the database. If the database has more than one table, * it should contain schemas for all the tables. * @param listener The error listener. * @return database storage. */ @SuppressWarnings("WeakerAccess") public static DatabaseStorage getDatabaseStorage(@NonNull String database, @NonNull String table, @IntRange(from = 1) int version, @NonNull ContentValues schema, @NonNull final DatabaseErrorListener listener) { return getDatabaseStorage(database, table, version, schema, 0, listener); } /** * Get a new instance of {@code DatabaseManager}. * * @param database The database name. * @param table The table name. * @param version The version. * @param schema The schema of the database. If the database has more than one table, * it should contain schemas for all tables. * @param maxRecords The maximum number of records allowed in the table. * @param listener The error listener. * @return database storage. */ public static DatabaseStorage getDatabaseStorage(@NonNull String database, @NonNull String table, @IntRange(from = 1) int version, @NonNull ContentValues schema, @IntRange(from = 0) int maxRecords, final DatabaseErrorListener listener) { return new DatabaseStorage(new DatabaseManager(sContext, database, table, version, schema, maxRecords, new DatabaseManager.ErrorListener() { @Override public void onError(String operation, RuntimeException e) { listener.onError(operation, e); } })); } /** * Store an entry in a table. * * @param values The entry to be stored. * @return The identifier of the created database entry. */ public long put(@NonNull ContentValues values) { return mDatabaseManager.put(values); } /** * Update an entry in a table. * * @param id The existing database identifier. * @param values The value to update. * @return {@code true} if the values were updated successfully, {@code false} otherwise. */ public boolean update(@IntRange(from = 0) long id, @NonNull ContentValues values) { return mDatabaseManager.update(id, values); } /** * Delete an entry in a table. * * @param id The identifier for the entry to be deleted. */ public void delete(@IntRange(from = 0) long id) { mDatabaseManager.delete(id); } /** * Deletes the entries by the identifier from the database. * * @param idList The list of database identifiers. */ public void delete(@NonNull List<Long> idList) { mDatabaseManager.delete(idList); } /** * Deletes the entries that matches key == value. * * @param key The optional key for query. * @param value The optional value for query. */ public void delete(@Nullable String key, @Nullable Object value) { mDatabaseManager.delete(key, value); } /** * Gets the entry by the identifier. * * @param id The database identifier. * @return An entry for the identifier or null if not found. */ public ContentValues get(@IntRange(from = 0) long id) { return mDatabaseManager.get(id); } /** * Gets the entry that matches key == value. * * @param key The optional key for query. * @param value The optional value for query. * @return A matching entry. */ public ContentValues get(@Nullable String key, @Nullable Object value) { return mDatabaseManager.get(key, value); } /** * Gets a scanner to iterate all values. * * @return A scanner to iterate all values. */ @SuppressWarnings("WeakerAccess") public DatabaseScanner getScanner() { return getScanner(null, null); } /** * Gets a scanner to iterate all values those match key == value. * * @param key The optional key for query. * @param value The optional value for query. * @return A scanner to iterate all values. */ public DatabaseScanner getScanner(@Nullable String key, @Nullable Object value) { return new DatabaseScanner(mDatabaseManager.getScanner(key, value)); } /** * Clears the table in the database. */ public void clear() { mDatabaseManager.clear(); } /** * Closes database and cleans up in-memory database. * * @throws IOException If an I/O error occurs */ @Override public void close() throws IOException { mDatabaseManager.close(); } /** * Gets the count of records in the table. * * @return The number of records in the table. */ public long size() { return mDatabaseManager.getRowCount(); } /** * Gets an array of column names in the table. * * @return An array of column names. */ @VisibleForTesting String[] getColumnNames() { return mDatabaseManager.getCursor(null, null).getColumnNames(); } /** * GroupListener specification, each callback is called only once per instance */ public interface DatabaseErrorListener { /** * Notifies an exception * * @param operation A name of operation that caused the error. * @param e A runtime exception for the error. */ void onError(String operation, RuntimeException e); } /** * Database scanner to iterate over values. */ public static class DatabaseScanner implements Iterable<ContentValues>, Closeable { private final DatabaseManager.Scanner mScanner; private DatabaseScanner(DatabaseManager.Scanner scanner) { mScanner = scanner; } @Override public void close() { mScanner.close(); } @Override public Iterator<ContentValues> iterator() { return mScanner.iterator(); } public int getCount() { return mScanner.getCount(); } } } }
// NewAdminTermsAndConditionsPayload builds a user service admin terms and // conditions endpoint payload. func NewAdminTermsAndConditionsPayload(body *AdminTermsAndConditionsRequestBody, auth string) *user.AdminTermsAndConditionsPayload { v := &user.AdminTermsAndConditionsFields{ Email: *body.Email, } res := &user.AdminTermsAndConditionsPayload{ Update: v, } res.Auth = auth return res }
//DownloadMedia is to download a media file from DingTalk func (c *DingTalkClient) DownloadMedia(mediaID string, write io.Writer) error { var data MediaResponse data.Writer = write params := url.Values{} params.Add("media_id", mediaID) err := c.HttpRPC("media/get", params, nil, &data) return err }
<filename>rlf-module/src/main/java/com/rlf/module/config/MyBatisConfig.java package com.rlf.module.config; import com.github.pagehelper.PageInterceptor; import org.apache.ibatis.type.JdbcType; import org.mybatis.spring.annotation.MapperScan; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.io.support.PathMatchingResourcePatternResolver; import org.springframework.transaction.annotation.EnableTransactionManagement; /** * MyBatis配置类 * Created by macro on 2019/4/8. */ @Configuration @MapperScan({"com.rlf.module.mapper"}) public class MyBatisConfig { }
<reponame>netrikare/substrate-api-client /* Copyright 2021 Integritee AG and Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! For querying runtime storage. //! //! This file is **not** from subxt. use crate::metadata::MetadataError; use codec::{Decode, Encode}; use frame_metadata::{StorageEntryMetadata, StorageEntryType, StorageHasher}; use log::debug; use scale_info::form::PortableForm; use sp_core::storage::StorageKey; use std::marker::PhantomData; #[derive(Clone, Debug)] pub struct StorageValue { module_prefix: Vec<u8>, storage_prefix: Vec<u8>, } impl StorageValue { pub fn key(&self) -> StorageKey { let mut bytes = sp_core::twox_128(&self.module_prefix).to_vec(); bytes.extend(&sp_core::twox_128(&self.storage_prefix)[..]); StorageKey(bytes) } } #[derive(Clone, Debug)] pub struct StorageMap<K, V> { _marker: PhantomData<K>, module_prefix: Vec<u8>, storage_prefix: Vec<u8>, hasher: StorageHasher, default: V, } impl<K: Encode, V: Decode + Clone> StorageMap<K, V> { pub fn key(&self, key: K) -> StorageKey { let mut bytes = sp_core::twox_128(&self.module_prefix).to_vec(); bytes.extend(&sp_core::twox_128(&self.storage_prefix)[..]); bytes.extend(key_hash(&key, &self.hasher)); StorageKey(bytes) } pub fn default(&self) -> V { self.default.clone() } } #[derive(Clone, Debug)] pub struct StorageDoubleMap<K, Q, V> { _marker: PhantomData<K>, _marker2: PhantomData<Q>, module_prefix: Vec<u8>, storage_prefix: Vec<u8>, hasher: StorageHasher, key2_hasher: StorageHasher, default: V, } impl<K: Encode, Q: Encode, V: Decode + Clone> StorageDoubleMap<K, Q, V> { pub fn key(&self, key1: K, key2: Q) -> StorageKey { let mut bytes = sp_core::twox_128(&self.module_prefix).to_vec(); bytes.extend(&sp_core::twox_128(&self.storage_prefix)[..]); bytes.extend(key_hash(&key1, &self.hasher)); bytes.extend(key_hash(&key2, &self.key2_hasher)); StorageKey(bytes) } pub fn default(&self) -> V { self.default.clone() } } /// trait to extract the storage based on the [`StorageEntryMetadata`]. pub trait GetStorage { fn get_double_map<K: Encode, Q: Encode, V: Decode + Clone>( &self, pallet_prefix: &str, ) -> Result<StorageDoubleMap<K, Q, V>, MetadataError>; fn get_map<K: Encode, V: Decode + Clone>( &self, pallet_prefix: &str, ) -> Result<StorageMap<K, V>, MetadataError>; fn get_map_prefix(&self, pallet_prefix: &str) -> Result<StorageKey, MetadataError>; fn get_value(&self, pallet_prefix: &str) -> Result<StorageValue, MetadataError>; } impl GetStorage for StorageEntryMetadata<PortableForm> { fn get_double_map<K: Encode, Q: Encode, V: Decode + Clone>( &self, pallet_prefix: &str, ) -> Result<StorageDoubleMap<K, Q, V>, MetadataError> { match &self.ty { StorageEntryType::Map { hashers, .. } => { let module_prefix = pallet_prefix.as_bytes().to_vec(); let storage_prefix = self.name.as_bytes().to_vec(); let hasher1 = hashers.get(0).ok_or(MetadataError::StorageTypeError)?; let hasher2 = hashers.get(1).ok_or(MetadataError::StorageTypeError)?; let default = Decode::decode(&mut &self.default[..]) .map_err(|_| MetadataError::MapValueTypeError)?; debug!( "map for '{}' '{}' has hasher1 {:?} hasher2 {:?}", pallet_prefix, self.name, hasher1, hasher2 ); Ok(StorageDoubleMap { _marker: PhantomData, _marker2: PhantomData, module_prefix, storage_prefix, hasher: hasher1.to_owned(), key2_hasher: hasher2.to_owned(), default, }) } _ => Err(MetadataError::StorageTypeError), } } fn get_map<K: Encode, V: Decode + Clone>( &self, pallet_prefix: &str, ) -> Result<StorageMap<K, V>, MetadataError> { match &self.ty { StorageEntryType::Map { hashers, .. } => { let hasher = hashers .get(0) .ok_or(MetadataError::StorageTypeError)? .to_owned(); let module_prefix = pallet_prefix.as_bytes().to_vec(); let storage_prefix = self.name.as_bytes().to_vec(); let default = Decode::decode(&mut &self.default[..]) .map_err(|_| MetadataError::MapValueTypeError)?; debug!( "map for '{}' '{}' has hasher {:?}", pallet_prefix, self.name, hasher ); Ok(StorageMap { _marker: PhantomData, module_prefix, storage_prefix, hasher, default, }) } _ => Err(MetadataError::StorageTypeError), } } fn get_map_prefix(&self, pallet_prefix: &str) -> Result<StorageKey, MetadataError> { match &self.ty { StorageEntryType::Map { .. } => { let mut bytes = sp_core::twox_128(pallet_prefix.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(self.name.as_bytes())[..]); Ok(StorageKey(bytes)) } _ => Err(MetadataError::StorageTypeError), } } fn get_value(&self, pallet_prefix: &str) -> Result<StorageValue, MetadataError> { match &self.ty { StorageEntryType::Plain { .. } => { let module_prefix = pallet_prefix.as_bytes().to_vec(); let storage_prefix = self.name.as_bytes().to_vec(); Ok(StorageValue { module_prefix, storage_prefix, }) } _ => Err(MetadataError::StorageTypeError), } } } /// generates the key's hash depending on the StorageHasher selected fn key_hash<K: Encode>(key: &K, hasher: &StorageHasher) -> Vec<u8> { let encoded_key = key.encode(); match hasher { StorageHasher::Identity => encoded_key.to_vec(), StorageHasher::Blake2_128 => sp_core::blake2_128(&encoded_key).to_vec(), StorageHasher::Blake2_128Concat => { // copied from substrate Blake2_128Concat::hash since StorageHasher is not public let x: &[u8] = encoded_key.as_slice(); sp_core::blake2_128(x) .iter() .chain(x.iter()) .cloned() .collect::<Vec<_>>() } StorageHasher::Blake2_256 => sp_core::blake2_256(&encoded_key).to_vec(), StorageHasher::Twox128 => sp_core::twox_128(&encoded_key).to_vec(), StorageHasher::Twox256 => sp_core::twox_256(&encoded_key).to_vec(), StorageHasher::Twox64Concat => sp_core::twox_64(&encoded_key) .iter() .chain(&encoded_key) .cloned() .collect(), } }
def name(self): core_name = self.tt_cores[0].name idx = core_name.rfind('/') return core_name[:idx]
<reponame>openharmony-gitee-mirror/communication_wifi /* * Copyright (C) 2021 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef OHOS_IDL_IWIFI_EVENT_P2P_CALLBACK_H #define OHOS_IDL_IWIFI_EVENT_P2P_CALLBACK_H #include "i_wifi_struct.h" #include <stddef.h> #ifdef __cplusplus extern "C" { #endif typedef struct IWifiEventP2pCallback { void (*onP2pSupplicantConnect)(int status); void (*onDeviceFound)(const HidlP2pDeviceInfo *device); void (*onDeviceLost)(const char *p2pDeviceAddress); void (*onGoNegotiationRequest)(const char *srcAddress, short passwordId); void (*onGoNegotiationSuccess)(void); void (*onGoNegotiationFailure)(int status); void (*onInvitationReceived)(const HidlP2pInvitationInfo *info); void (*onInvitationResult)(const char *bssid, int status); void (*onGroupFormationSuccess)(void); void (*onGroupFormationFailure)(const char *failureReason); void (*onGroupStarted)(const HidlP2pGroupInfo *group); void (*onGroupRemoved)(const char *groupIfName, int isGo); void (*onProvisionDiscoveryPbcRequest)(const char *p2pDeviceAddress); void (*onProvisionDiscoveryPbcResponse)(const char *p2pDeviceAddress); void (*onProvisionDiscoveryEnterPin)(const char *p2pDeviceAddress); void (*onProvisionDiscoveryShowPin)(const char *p2pDeviceAddress, const char *generatedPin); void (*onProvisionDiscoveryFailure)(void); void (*onFindStopped)(void); void (*onServiceDiscoveryResponse)( const char *srcAddress, short updateIndicator, const unsigned char *tlvs, size_t tlvsLength); void (*onStaDeauthorized)(const char *p2pDeviceAddress); void (*onStaAuthorized)(const char *p2pDeviceAddress); void (*connectSupplicantFailed)(void); void (*onP2pServDiscReq)(const HidlP2pServDiscReqInfo *info); } IWifiEventP2pCallback; #ifdef __cplusplus } #endif #endif
import { Directive, ElementRef, Input, OnInit } from '@angular/core'; @Directive({ selector: '[sprkTabbedNavigationTab]', }) /** * @deprecate This directive will be removed in * a future release in favor of the `sprk-tabs-button` directive. * Please use the `sprk-tabs-button` directive. * TODO: Remove this directive as part of Issue 1378. */ export class SprkTabbedNavigationTabDirective implements OnInit { /** * Expects a space separated string * of classes to be added to the * element. */ @Input() additionalClasses: string; /** * If `true`, the Tab will have * active styles and aria attributes on render. */ @Input() defaultActive: boolean; /** * The value supplied will be assigned to the * `data-analytics` attribute on the component. * Intended for an outside * library to capture data. */ @Input() analyticsString: string; /** * @ignore */ getClasses(): string[] { const classArray: string[] = ['sprk-c-Tabs__button']; if (this.defaultActive) { classArray.push('sprk-c-Tabs__button--active'); } if (this.additionalClasses) { this.additionalClasses.split(' ').forEach((className) => { classArray.push(className); }); } return classArray; } ngOnInit(): void { this.getClasses().forEach((item) => { this.ref.nativeElement.classList.add(item); }); this.ref.nativeElement.setAttribute('role', 'tab'); this.ref.nativeElement.setAttribute( 'aria-selected', this.defaultActive ? 'true' : 'false', ); if (this.analyticsString) { this.ref.nativeElement.setAttribute( 'data-analytics', this.analyticsString, ); } } /** * @ignore */ constructor(public ref: ElementRef) {} }
The Islamic State in Iraq and Syria (ISIS) is upping its destruction of historic monuments in the ancient city of Palmyra, Syria. The group this week destroyed monuments including part of a preserved Roman theatre and a tetrapylon, a series of pillars near the entrance to the city. “This is a horror film and we will see more of it, as long as the city is under their control it will remain a hostage,” said Maamoun Abdulkarim, Syrian director of antiquities, according to the Guardian. ADVERTISEMENT Russian and Syrian forces have been unsuccessful in their attempts to hold the city after pushing ISIS militants out in March. ISIS regained control of the region in December, while destroying numerous historical sites. The United Nations Educational Scientific and Cultural Organization (UNESCO) called the latest destruction of artifacts a "blow against cultural heritage." “This new blow against cultural heritage … shows that cultural cleansing led by violent extremists is seeking to destroy both human lives and historical monuments in order to deprive the Syrian people of its past and its future,” said UNESCO’s director-general, Irina Bokova. “This is a scandal. Palmyra is occupied and there is no outrage from the international community. We are trying to protect a civilization. It’s beyond political considerations. There needs to be international solidarity,” Abdulkarim said.
from pathlib import Path from macpie.pandas import file_to_dataframe from macpie.testing import assert_dfs_equal current_dir = Path(__file__).parent.absolute() # output_dir = current_dir output_dir = None primary = file_to_dataframe(current_dir / "primary.xlsx") secondary = file_to_dataframe(current_dir / "secondary.xlsx") cols_ignore = [] def test_merge_partial(): # partial merge merge_partial_result = primary.mac.date_proximity( secondary, id_on='pidn', date_on='dcdate', get='closest', when='earlier_or_later', days=90, left_link_id='instrid', merge='partial' ) # merge_partial_result.to_excel(current_dir / "merge_partial_result.xlsx", index=False) merge_partial_expected_result = file_to_dataframe(current_dir / "merge_partial_expected_result.xlsx") assert_dfs_equal(merge_partial_result, merge_partial_expected_result, cols_ignore=cols_ignore, output_dir=output_dir) # test that results are same when using equivalent id and date params test_id_on_params = primary.mac.date_proximity( secondary, id_left_on='pidn', id_right_on='pidn', date_on='dcdate', get='closest', when='earlier_or_later', days=90, left_link_id='instrid', merge='partial' ) test_date_on_params = primary.mac.date_proximity( secondary, id_on='pidn', date_left_on='dcdate', date_right_on='dcdate', get='closest', when='earlier_or_later', days=90, left_link_id='instrid', merge='partial' ) # test using id_left_on and id_right_on params assert merge_partial_result.equals(test_id_on_params) # test using date_left_on and date_right_on params assert merge_partial_result.equals(test_date_on_params) def test_merge_full(): # full merge merge_full_result = primary.mac.date_proximity( secondary, id_on='pidn', date_on='dcdate', get='closest', when='earlier_or_later', days=90, left_link_id='instrid', merge='full' ) # merge_full_result.to_excel(current_dir / "merge_full_result.xlsx", index=False) merge_full_expected_result = file_to_dataframe(current_dir / "merge_full_expected_result.xlsx") assert_dfs_equal(merge_full_result, merge_full_expected_result, cols_ignore=cols_ignore, output_dir=output_dir)
Fantasy Premier League 2017/18 - Top picks for Gameweek 12 Vishnu Rajesh FOLLOW ANALYST Top 5 / Top 10 12.46K // 15 Nov 2017, 16:12 IST SHARE Share Options × Facebook Twitter Flipboard Reddit Google+ Email FPL is back! The wait is almost over. The premier league can’t return soon enough, and it is finally time to start fretting over our fantasy teams again! With the teams having played their first 11 games in a span of 87 days, it’s now going to be a congested sprint all the way to the new year, with the next 11 being crammed in under 50 days! Though this may be a source of immense joy for us premier league enthusiasts, it isn’t so for the FPL addicts within us. With team rotation bound to be more frequent than ever now, a lot of us are going to face disappointment as some of the regular starters in our team may not be as regular anymore. It’s time to remove bench fodder, and start scouting for cheap regulars so that we have a well-balanced team of 15. Chelsea's deadly duo - Alvaro Morata (left) and Eden Hazard An interesting, proven FPL strategy is to attack fixtures, i.e. target players based on fixtures. This is because there are more than 15 excellent players over the season, and it’s all about having the right ones at the right time. Based on this strategy, here are our top scout picks for the upcoming weeks, segregated by price range. NOTE: Stats updated up to 15 November 2017 1 / 12 NEXT Advertisement
//! //! \brief OptionsDialog::getStrokeLengthProportion //! \return integer percentage - 0 is full stroke length (and try to increase speed to maintain this), //! 100 is constant speed, reducing stroke length as needed //! int OptionsDialog::getStrokeLengthProportion() { QSettings settings; return settings.value("Handy Scaling Proportion", 0).toInt(); }
<gh_stars>0 package main import "fmt" var ns []int var ans [][]int func dfs(i, n int, a []int) { if i == n { ans = append(ans, a) return } acopy := make([]int, len(a)) copy(acopy, a) dfs(i+1, n, a) dfs(i+1, n, append(acopy, ns[i])) } func subsets(nums []int) [][]int { ns = nums ans = make([][]int, 0) dfs(0, len(nums), []int{}) return ans } func main() { fmt.Println(subsets([]int{1, 2, 3})) }
<reponame>paterWang/EasyReader package com.laotan.easyreader.ui.fragment.home.child.zhihu; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import com.laotan.easyreader.R; import com.laotan.easyreader.bean.zhihu.CommentBean; import com.laotan.easyreader.injector.component.fragment.DaggerZhihuCommentComponent; import com.laotan.easyreader.injector.module.fragment.ZhihuCommentModule; import com.laotan.easyreader.injector.module.http.ZhihuHttpModule; import com.laotan.easyreader.presenter.ZhihuCommentPresenter; import com.laotan.easyreader.presenter.impl.ZhihuCommentPresenterImpl; import com.laotan.easyreader.ui.activity.zhihu.ZhiHuCommentActivity; import com.laotan.easyreader.ui.fragment.BaseFragment; import java.util.List; import butterknife.BindView; /** * Created by quantan.liu on 2017/3/25. */ public class ZhiHuCommentFragment extends BaseFragment<ZhihuCommentPresenterImpl> implements ZhihuCommentPresenter.View { @BindView(R.id.rv_zhihu_comment) RecyclerView rvZhihuComment; private boolean isShort; public static ZhiHuCommentFragment getInstance(boolean isShort) { ZhiHuCommentFragment instance = new ZhiHuCommentFragment(); instance.isShort = isShort; return instance; } @Override protected void initView() { rvZhihuComment.setLayoutManager(new LinearLayoutManager(getActivity())); } @Override protected void loadData() { ZhiHuCommentActivity mZhiHuCommentActivity = (ZhiHuCommentActivity) getActivity(); int id = mZhiHuCommentActivity.getId(); if (isShort) {//懒加载在可见的时候加载,会让非静态变量最终都是同一个值所以只能用静态变量。 mPresenter.fetchShortCommentInfo(id); } else { mPresenter.fetchLongCommentInfo(id); } } @Override protected int getLayoutId() { return R.layout.fragment_zhihu_comment; } @Override protected void initInject() { DaggerZhihuCommentComponent.builder() .zhihuHttpModule(new ZhihuHttpModule()) .zhihuCommentModule(new ZhihuCommentModule()) .build().injectZhihuComment(this); } @Override public void refreshView(List<CommentBean.CommentsBean> list) { mAdapter.setNewData(list); rvZhihuComment.setAdapter(mAdapter); } }
/** * @author Josh Cummings */ @ExtendWith({ SpringExtension.class, SpringTestContextExtension.class }) @SecurityTestExecutionListeners public class MethodSecurityBeanDefinitionParserTests { private static final String CONFIG_LOCATION_PREFIX = "classpath:org/springframework/security/config/method/MethodSecurityBeanDefinitionParserTests"; private final UsernamePasswordAuthenticationToken bob = new UsernamePasswordAuthenticationToken("bob", "bobspassword"); @Autowired(required = false) MethodSecurityService methodSecurityService; @Autowired(required = false) BusinessService businessService; public final SpringTestContext spring = new SpringTestContext(this); @WithMockUser(roles = "ADMIN") @Test public void preAuthorizeWhenRoleAdminThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::preAuthorize) .withMessage("Access Denied"); } @WithAnonymousUser @Test public void preAuthorizePermitAllWhenRoleAnonymousThenPasses() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); String result = this.methodSecurityService.preAuthorizePermitAll(); assertThat(result).isNull(); } @WithAnonymousUser @Test public void preAuthorizeNotAnonymousWhenRoleAnonymousThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); assertThatExceptionOfType(AccessDeniedException.class) .isThrownBy(this.methodSecurityService::preAuthorizeNotAnonymous).withMessage("Access Denied"); } @WithMockUser @Test public void preAuthorizeNotAnonymousWhenRoleUserThenPasses() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); this.methodSecurityService.preAuthorizeNotAnonymous(); } @WithMockUser @Test public void securedWhenRoleUserThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::secured) .withMessage("Access Denied"); } @WithMockUser(roles = "ADMIN") @Test public void securedWhenRoleAdminThenPasses() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); String result = this.methodSecurityService.secured(); assertThat(result).isNull(); } @WithMockUser(roles = "ADMIN") @Test public void securedUserWhenRoleAdminThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::securedUser) .withMessage("Access Denied"); } @WithMockUser @Test public void securedUserWhenRoleUserThenPasses() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); String result = this.methodSecurityService.securedUser(); assertThat(result).isNull(); } @WithMockUser @Test public void preAuthorizeAdminWhenRoleUserThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::preAuthorizeAdmin) .withMessage("Access Denied"); } @WithMockUser(roles = "ADMIN") @Test public void preAuthorizeAdminWhenRoleAdminThenPasses() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); this.methodSecurityService.preAuthorizeAdmin(); } @WithMockUser(authorities = "PREFIX_ADMIN") @Test public void preAuthorizeAdminWhenRoleAdminAndCustomPrefixThenPasses() { this.spring.configLocations(xml("CustomGrantedAuthorityDefaults")).autowire(); this.methodSecurityService.preAuthorizeAdmin(); } @WithMockUser @Test public void postHasPermissionWhenParameterIsNotGrantThenAccessDeniedException() { this.spring.configLocations(xml("CustomPermissionEvaluator")).autowire(); assertThatExceptionOfType(AccessDeniedException.class) .isThrownBy(() -> this.methodSecurityService.postHasPermission("deny")).withMessage("Access Denied"); } @WithMockUser @Test public void postHasPermissionWhenParameterIsGrantThenPasses() { this.spring.configLocations(xml("CustomPermissionEvaluator")).autowire(); String result = this.methodSecurityService.postHasPermission("grant"); assertThat(result).isNull(); } @WithMockUser @Test public void postAnnotationWhenParameterIsNotGrantThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); assertThatExceptionOfType(AccessDeniedException.class) .isThrownBy(() -> this.methodSecurityService.postAnnotation("deny")).withMessage("Access Denied"); } @WithMockUser @Test public void postAnnotationWhenParameterIsGrantThenPasses() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); String result = this.methodSecurityService.postAnnotation("grant"); assertThat(result).isNull(); } @WithMockUser("bob") @Test public void methodReturningAListWhenPrePostFiltersConfiguredThenFiltersList() { this.spring.configLocations(xml("BusinessService")).autowire(); List<String> names = new ArrayList<>(); names.add("bob"); names.add("joe"); names.add("sam"); List<?> result = this.businessService.methodReturningAList(names); assertThat(result).hasSize(1); assertThat(result.get(0)).isEqualTo("bob"); } @WithMockUser("bob") @Test public void methodReturningAnArrayWhenPostFilterConfiguredThenFiltersArray() { this.spring.configLocations(xml("BusinessService")).autowire(); List<String> names = new ArrayList<>(); names.add("bob"); names.add("joe"); names.add("sam"); Object[] result = this.businessService.methodReturningAnArray(names.toArray()); assertThat(result).hasSize(1); assertThat(result[0]).isEqualTo("bob"); } @WithMockUser("bob") @Test public void securedUserWhenCustomBeforeAdviceConfiguredAndNameBobThenPasses() { this.spring.configLocations(xml("CustomAuthorizationManagerBeforeAdvice")).autowire(); String result = this.methodSecurityService.securedUser(); assertThat(result).isNull(); } @WithMockUser("joe") @Test public void securedUserWhenCustomBeforeAdviceConfiguredAndNameNotBobThenAccessDeniedException() { this.spring.configLocations(xml("CustomAuthorizationManagerBeforeAdvice")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::securedUser) .withMessage("Access Denied"); } @WithMockUser("bob") @Test public void securedUserWhenCustomAfterAdviceConfiguredAndNameBobThenGranted() { this.spring.configLocations(xml("CustomAuthorizationManagerAfterAdvice")).autowire(); String result = this.methodSecurityService.securedUser(); assertThat(result).isEqualTo("granted"); } @WithMockUser("joe") @Test public void securedUserWhenCustomAfterAdviceConfiguredAndNameNotBobThenAccessDeniedException() { this.spring.configLocations(xml("CustomAuthorizationManagerAfterAdvice")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::securedUser) .withMessage("Access Denied for User 'joe'"); } @WithMockUser(roles = "ADMIN") @Test public void jsr250WhenRoleAdminThenAccessDeniedException() { this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.methodSecurityService::jsr250) .withMessage("Access Denied"); } @WithAnonymousUser @Test public void jsr250PermitAllWhenRoleAnonymousThenPasses() { this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); String result = this.methodSecurityService.jsr250PermitAll(); assertThat(result).isNull(); } @WithMockUser(roles = "ADMIN") @Test public void rolesAllowedUserWhenRoleAdminThenAccessDeniedException() { this.spring.configLocations(xml("BusinessService")).autowire(); assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(this.businessService::rolesAllowedUser) .withMessage("Access Denied"); } @WithMockUser @Test public void rolesAllowedUserWhenRoleUserThenPasses() { this.spring.configLocations(xml("BusinessService")).autowire(); this.businessService.rolesAllowedUser(); } @WithMockUser(roles = { "ADMIN", "USER" }) @Test public void manyAnnotationsWhenMeetsConditionsThenReturnsFilteredList() throws Exception { List<String> names = Arrays.asList("harold", "jonathan", "pete", "bo"); this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); List<String> filtered = this.methodSecurityService.manyAnnotations(new ArrayList<>(names)); assertThat(filtered).hasSize(2); assertThat(filtered).containsExactly("harold", "jonathan"); } // gh-4003 // gh-4103 @WithMockUser @Test public void manyAnnotationsWhenUserThenFails() { List<String> names = Arrays.asList("harold", "jonathan", "pete", "bo"); this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); assertThatExceptionOfType(AccessDeniedException.class) .isThrownBy(() -> this.methodSecurityService.manyAnnotations(new ArrayList<>(names))); } @WithMockUser @Test public void manyAnnotationsWhenShortListThenFails() { List<String> names = Arrays.asList("harold", "jonathan", "pete"); this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); assertThatExceptionOfType(AccessDeniedException.class) .isThrownBy(() -> this.methodSecurityService.manyAnnotations(new ArrayList<>(names))); } @WithMockUser(roles = "ADMIN") @Test public void manyAnnotationsWhenAdminThenFails() { List<String> names = Arrays.asList("harold", "jonathan", "pete", "bo"); this.spring.configLocations(xml("MethodSecurityServiceEnabled")).autowire(); assertThatExceptionOfType(AccessDeniedException.class) .isThrownBy(() -> this.methodSecurityService.manyAnnotations(new ArrayList<>(names))); } // gh-3183 @Test public void repeatedAnnotationsWhenPresentThenFails() { this.spring.configLocations(xml("MethodSecurityService")).autowire(); assertThatExceptionOfType(AnnotationConfigurationException.class) .isThrownBy(() -> this.methodSecurityService.repeatedAnnotations()); } // gh-3183 @Test public void repeatedJsr250AnnotationsWhenPresentThenFails() { this.spring.configLocations(xml("Jsr250")).autowire(); assertThatExceptionOfType(AnnotationConfigurationException.class) .isThrownBy(() -> this.businessService.repeatedAnnotations()); } // gh-3183 @Test public void repeatedSecuredAnnotationsWhenPresentThenFails() { this.spring.configLocations(xml("Secured")).autowire(); assertThatExceptionOfType(AnnotationConfigurationException.class) .isThrownBy(() -> this.businessService.repeatedAnnotations()); } private static String xml(String configName) { return CONFIG_LOCATION_PREFIX + "-" + configName + ".xml"; } static class MyPermissionEvaluator implements PermissionEvaluator { @Override public boolean hasPermission(Authentication authentication, Object targetDomainObject, Object permission) { return "grant".equals(targetDomainObject); } @Override public boolean hasPermission(Authentication authentication, Serializable targetId, String targetType, Object permission) { throw new UnsupportedOperationException(); } } static class MyAuthorizationManager implements AuthorizationManager<MethodInvocation> { @Override public AuthorizationDecision check(Supplier<Authentication> authentication, MethodInvocation object) { return new AuthorizationDecision("bob".equals(authentication.get().getName())); } } static class MyAdvice implements MethodInterceptor { @Nullable @Override public Object invoke(@NotNull MethodInvocation invocation) { Authentication auth = SecurityContextHolder.getContext().getAuthentication(); if ("bob".equals(auth.getName())) { return "granted"; } throw new AccessDeniedException("Access Denied for User '" + auth.getName() + "'"); } } }
<reponame>Zazsona/DecorHeads package com.zazsona.decorheads.headdata; import com.mojang.authlib.GameProfile; import com.mojang.authlib.properties.Property; import com.zazsona.decorheads.Core; import org.bukkit.Bukkit; import org.bukkit.ChatColor; import org.bukkit.Material; import org.bukkit.inventory.ItemStack; import org.bukkit.inventory.meta.ItemMeta; import org.bukkit.inventory.meta.SkullMeta; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.UUID; public class TextureHead extends Head { private String name; private String texture; public TextureHead(String key, String name, String texture) { super(key); this.name = name; this.texture = texture; } public String getName() { return name; } public String getTextureEncoding() { return texture; } @Override public ItemStack createItem() { return createSkull(name, texture); } }
Social and educational environment modeling in future vision: infosphere tools The article is devoted to the study of information and communication environment tools for constructing a vision of the future. The research methodology is determined by the information paradigm providing a general theoretical basis for rational explanation of orienting interactions in socio-technical environment / system. The information model of interactions management in a social environment is noted to be of code nature, which is typical of an indirect influence on contemporaries' consciousness. The infosphere tools for the future modeling are presented by their structural, functional and motivational aspects. In the context of social and educational environment modeling, one should point out the material needs driven by the market code and background knowledge transmitting the stereotypes of thinking and behavior. Further, the authors specify the role of the worldview and educational paradigm in the future modeling. The results of mass expectations monitoring are presented, such as dynamic analysis of social sentiment indicators in Russia, youth assessment of the university’s image and prospects for online learning. The study materials include data of sociological surveys among Peter the Great St.Petersburg Polytechnic University students. In conclusion, the authors emphasize the need for mental protection of a person in the digital hyper-net, which transforms the infosphere of communication thus creating potential threat of community and mental structures disorganization. Introduction In the modern worldview, the information environment correlates with the space of potential action choice that determines the life horizon of a modern engineer for his successful social adaptation and professional orientation. New trends in shaping the global vision of the future, characteristic of the fourth industrial revolution are associated with the * Correspondent author: [email protected] key role of digital culture in social and professional activities organization . The megatrends of the fourth industrial revolution are determined by the development of convergent technologies connecting physical, digital and biological blocks. And all the innovations that appear within these trends are provided by the computing power of digital technologies. The tasks of education in the new vision of the future are dictated by the expansion of information technology environments, which form a special kind of virtual reality and a special mentality, which substantially corrects the development of personality and the dynamics of relations in society . The analysis of the infosphere tools for modeling the vision of the future is the current interdisciplinary task of modern science. In the logic of the fourth industrial revolution, the factors of production include not only natural and other material resources, but also the market of information and knowledge. In modeling the future, attention is focused, in addition to science and education commercialization, on the mental and ethical aspects of innovation activity. The specific objectives of our study are related to monitoring of potential expectations, which addresses mass sentiment indicators in terms of the global city strategy, as well as students' assessment of the negative effects of digital technologies on learning process. Literature review. In the scientific literature, the term infosphere appeared during the period of intensive development of electronic communications and programming languages. In a broad context, this term came into use in recent decades towards the whole symbolic environment of society and human life world . In the modern sense, this concept covers a multi-level system of virtual representations. The symbolic form of mediated representation of an object allows multiple variations of semantic meanings, as well as their visualization via an apparent illustration, thus provoking an involuntary emotional response. Certain symbology, realizable within the discourse or mental image, allows you to save meanings and translate them into the future . At the same time, the role of background knowledge ("pre-knowledge") is actualized, empowering an individual to directly perceive information in accordance with the common notions . In cognitive sciences, the studies of semantic orientation of a person in the information field develop an idea about mental spaces and representations . The mental representation of the present and the future has its own special structure and does not always coincide with the visual image, especially when it comes to representing an object through another one. In this case, mental activity of the subject is considered in terms of Model-Based Reasoning . The relevance of the present is determined by future prospects, but the vision of the future is rooted in the past. Nervous activity cyclising in the space of orienting interactions familiar to the system, is described by U. Maturana . In digital culture, the infosphere is associated with the global semantic network. In hierarchically organized semantic environments, orientation at various levels requires a certain specificity of information perception and processing. In particular, there are three qualitatively different ways of the infosphere's exposure upon a person: perceptual (similar to physical contact, through sensations); cognitive (influences through semantic structures, frames, sets of knowledge); reflexive (information perception along with value attitudes and limits awareness) . The heuristic capabilities of the information paradigm established in the modern system of knowledge are associated with the formation of rational models for explaining and prediction of orienting interactions that are of clearly non-physical nature. In a complex system, information characterizes its functional abilities in accordance with external influence factors and internal state parameters. Information models of interaction explaining emphasize the code character of the causal relationship, which is revealed through the launching of a specific sequence of actions and the system's self-determination markers. The informative code function is to rate some potential vital space of the system by indicating explicit or implicit boundaries for motives and actions. According to the normative principle, we can determine the matrix models represented by code dependence in predicting the future. Thus, the market code serves as a global modeling tool in the scenario of the future capitalization, which defines the global city strategy and the engineering education system as service trades . Theoretical research settings The study of the infosphere tools for the future modeling is conducted in two aspects; these are structural-functional and motivational ones. In terms of the structural-functional aspect, we focus on the technological information tools for the society life world formation, where one should distinguish the basic environments, namely the socio-economic environment and the socio-cultural space of life, which models are guided by market code. Forecast of the future state of these environments depends on their dynamics in the present. We use sociological survey methods and statistics that relate to the dynamics of socioeconomic indicators of mass expectations and environmental factors assessment. The infosphere tools in the motivational aspect of the future modeling are represented by mass expectations monitoring among the students of Peter the Great St. Petersburg Polytechnic University (SPbPU). Methods of mass expectations study within the market matrix For mass expectations monitoring in the market matrix, we use sociological survey methods that relate to socio-economic indicators dynamics (consumer sentiment index, social sentiment index, unemployment expectation index) and Levada-Center statistics (https://www.levada.ru/indikatory/sotsialno-ekonomicheskie-indikatory / appeal date 13.04. 2019). The consumer sentiment index is an important tool for predicting consumer behavior and the economic situation, which is largely determined by the population solvency. The method of consumer sentiment index constructing includes a survey on five questions. A representative sample reflects the views of an adult (over 16 years old), urban and rural population of the country (the number of respondents = 2000). For each question, an individual index is constructed as the difference between the parts of positive and negative answers and 100 is added to avoid negative index value. The social sentiment index allows tracking the mass consciousness reaction to the economic situation changes, prediction of such changes in the future. Levada-Center provides statistical data by year and quarter since 2008. Monitoring of public opinion on the public image of St. Petersburg Polytechnic University of Peter the Great (SPbPU) The purpose of the study is to obtain sociological information about the contours of the university's visual environment, its main characteristics, and to compare the real public image of the SPSPU with the desired one. Research method is online questioning in the social networking site "Vkontakte" (www.vk.com). The study is of reconnaissance character and utilizes random sampling. In total, 200 respondents were engaged in the survey, including students of both technical and humanitarian institutes of SPbPU. Respondents are aged from 17 to 35, with 57% men, 43% women among them. The research objectives: 1. To find out students' seeing of the university visual environment. 2. To learn students' opinion on the existing public image of the university. 3. To outline the public image of the head of university. 4. To refine the desired image of the head of university. 5. To compare the real public image with the desired one. 6. To identify the most preferred content of the university television to form a topical agenda. Monitoring of students' attitudes towards online learning The purpose of the study is optimization of the educational process forms and mode. Research method is sociological monitoring aimed at identifying adaptation problems of students who receive fully distance education. The methodology includes group discussions in study groups and written survey. The adaptation problems pointed out by the survey not only indicate a student's involvement into the educational process and the extent of educational modules mastering, but also reveal following important characteristics for obtaining an educational result: -Activity in educational technologies mastering; -Readiness for making choice and understanding of responsibility for it; -Ability to find and process the necessary information; -Skills of communication not only with the teacher, but also with other services included into the educational process; -The degree of awareness of the distance education opportunities; -Ability to resolve their problems immediately; -Student self-esteem for learning success. The results of mass expectations study within the market matrix The values of the mass sentiments indicators in relation to socio-economic environmental factors are presented in the general diagram (Fig. 1). The diagram data are taken from the statistics of the Levada-Center regular surveys (https://www.levada.ru/indikatory/sotsialnoekonomicheskie-indikatory/ as of 13.04.2019). Indicators are given by quarters of the current year. Level below 100 indicates negative dynamics of sentiments. The results of public opinion monitoring on the public image of St. Petersburg Polytechnic University of Peter the Great (SPbPU) The sociological questionnaire consists of four blocks of questions. The first block is on the symbols and image of the university. The second block addresses the public image of the university head. The third block of questions is devoted to the intra-university television: internal TV content, assessment of its necessity, and assessment of the TV influence on student life. The main characteristics of SPbPU. The first block is represented by 8 questions aimed at determining the image of the university, its characteristics, as well as the assessment of the visual environment and symbols of SPbPU. Respondents were offered both ready-made answers and the opportunity to give their own detailed answer. So, when asked about the reasons for entering the university, the majority of respondents pointed out the following qualities of the university from the students' point of view: the prestige of the university (50.5%), quality of education (43.5%), wide popularity (32%) and wide opportunities for career growth (26.5%). In addition, to a lesser extent, there are also noted convenience of location, opportunities for creative and social activities, competent academic staff, and suitable curriculum. Choosing among the positive qualities of SPbPU, expressed in one specific characteristic, the majority of respondents described the university as well-known (51.5%), interesting (40%), promising (34%), cozy (28%). Among the negative qualities of the university, respondents mainly note the low quality of dormitories (42.5%) and the inability of teachers to interest in their subject (43%) Value orientations in the formation of SPbPU public image. Characterizing the university and its public image through its intrinsic values, the respondents particularly highlight scientific discoveries and innovations (24%) and the mood of friendship and cooperation (19.5%). Other values were chosen by the respondents in approximately equal proportions, but the smallest vote is given for such values as openness and freedom (4.5%) and creativity (3%), see Fig.2 Results of monitoring of students' attitudes towards online learning The study tested the following hypotheses: -Evaluation of digital culture introduction into the educational process as a factor of improvement of efficiency of obtaining knowledge. -Along with digitalization, expectations increase in relation to the university educational environment, especially in the field of distance learning. -The digital culture development at the university contributes to the communication development due to the fact that communication becomes easier and more accessible. -Digital culture facilitates formation of creative educational environment. A written survey according to the method of Yasvin is held among students of "Psychology", "Advertising and PR", "Jurisprudence", "Computer Science and Computing" specialties; totally 114 respondents have competed the survey. The educational environment assessment is based on two projections: activity -passivity; freedomdependence. Each alternative is characterized by 10 parameters rated on a scale of 1 to 10. Survey data analysis shows that the university educational environment assessment differs for students of humanitarian areas (Table 1) and technical areas ( Table 2). The format of distance learning is positive: access is from anywhere in the world, at any convenient time The introduction of remote objects in the university has a negative impact: not productive, information is not remembered Convenient to search for information Does not discipline and is not always The learning process is slower, because no one to explain and show everything personally Most humanities students advocate distance learning, but for further introduction of digital culture into the educational process it is necessary to consider the field of education, the proposed learning material, technical equipment and the level of students' adaptation to computer technology. During the group discussion, there appeared the proponents of prospective integration of classical and digital learning, since the learning process not only involves obtaining specific knowledge, but also develops communication skills, and this is facilitated by traditional education. Distance education technologies widen the opportunities for high-quality education, but do not provide it themselves. Here, the educational process organization and issue of motivation play a valuable role. Active mastering and introduction of digital tools and means of communication into the educational process becomes an objective necessity. They increase the degree of students' responsibility for their education, make changes in socialization by reducing an interpersonal communication, increase individual achievements and simplify access to knowledge. The main factor of digital competence of modern students should be the readiness of students not only to master new information technologies, assess their capabilities and risks, but also to accept the ever-increasing rate of change and constantly update their knowledge, as well as acquire new competencies. The role of the worldview for constructing the future living environment The subjective perception of time is determined by the present, its content is determined by the combination of the past and the desired future in the form of fantasy, dreams, utopia, ideals, which are represented in the mental image and carry a certain chronotope and context. The vision of the future is connected with education and understanding of the world which depends on the scientific or religious picture of the world order and the vector of the global development which is not disclosed since the subject of global governance is always hidden and available only in interpretations traditionally attributed to the super knowledge of the priests. At the same time, a person is directed to the future. The lack of life prospects is a mental trauma which can lead to suicide. Let's note that the general condition for future modeling is initial uncertainty of any scenario, since it deals with something that still does not exist. Rational or irrational basis for accepting this or that vision of the future consists of 1) the logic of circumstances, 2) the worldview paradigm, 3) intuition related to the set of knowledge and archetypes of the collective unconscious. The normative forecast is determined by the target vector which is not always clearly stated. There is a problem of the ideal social order which can be different, for example, in paradigmatic settings of cognitive capitalism and digital socialism, even though they utilize the same technologic base. The educational paradigm in the future modeling The problem of the ideal social order of the future is connected with the issue of ideal education which inevitably arises in every historical era and is associated with the fundamental model of socially adapted subject who has mastered cultural experience of his contemporaries. This issue is complicated in the temporal aspect of the generational bridge, since the process of education means formation of human consciousness of the future and thus violates the boundaries of historical and cultural experience and everyday life. The scenario of the future should be based on the generational bridge extended in time. It determines the content and role of the educational strategy in the future modeling. Events and persons are assessed from a certain ideological position which defines the boundaries of understanding of the past and forecast of the future. Any program or educational system that does not have an ideological base is a headless system that creates an unpredictable and uncontrollable subject, because the basic value orientation indicating the criterion of truth is not set in advance. Changing worldview reorients educational strategies. Thus, in the history of European culture, the ancient cosmism, the philosophical basis of which was used by the intuitive-discursive educational strategy, is replaced by medieval theocentrism and exegetically-apologetic educational strategy, and then by the anthropocentrism of the New Age. This worldview also defines educational models in the present . Trends of e-culture in modeling the educational environment of the future Prospects for an information technology culture formation in a global society are determined by network technologies. The vision of the future in the context of information age evolution is associated with formation of a knowledge society with a pronounced economic attitude to education which is included into the system of services and focused on profit. The expansion of the network technologies format in educational practices that create new informational interactive environments, virtual games and multimedia teaching tools instead of traditional discursive means, however, does not answer the question about the ideal educational model in its time perspective. Moreover, the value aspect of the educational strategy is virtually eliminated except for the principle of freedom of choice of behavior in the virtual space. The ideal education, according to Aristotle, is formation of the abilities of thinking and virtue. Since the transfer of knowledge and skills can be carried out at the imitative wordless level, discourse is of particular importance as a tool for the formation of the individual's mental experience and mental activity (reasoning, understanding and evaluation). Discursive technology in educational practice is a fundamental tool used to translate values and create semantic orientations, synthesizing cognitive, critical and value abilities in the process of emotional, intellectual and behavioral responses. The intuitive- discursive paradigm of education developed in antiquity is still deeply rooted in the modern educational process. Moreover, in the conditions of the network information society discourse acts as a weapon of total mass impact on the mind, actions and emotions of a person, turning him into a limited subject (consumer) or vice versa into a disoriented subject (virtual). The education system is included in the multilevel semantic network of socio-cultural community. The network is controlled by a program that is not visible from the inside, in the internal dynamics. The mental impact of discourse is associated with the subconscious algorithm: emotions -thoughts -actions. Language formula causes expected emotions as a reaction to the situation. Discourse provokes semantic dynamics, introduces phantoms into the mass consciousness and allows individual or group behavior modeling. Traditionally There is a virtual communicative universe which exists as a fundamentally unstructured whole. The semantic connection in the virtual community becomes uncertain, because the own dynamics of the Internet is able to create simulated events, which look like discourse but not really occur. The criterion of truth is invalidated. A post is rated for representativeness in a particular community . Deliberately constructed and constantly reviewed fictions are based on the illusion which substitutes reality. Formation of the life world of the individual becomes dependent on information traffic. The network dependence creates potential conditions for mental and social transformations as a result of the collapse of illusions . Network creative technologies open the way back for socialization of phantoms and virtual Internet parties. It's hard to escape a discouraging conclusion that the increase of the technological format in education, even being aimed at forming consciousness within a certain cultural tradition, provokes and enhances the design of a socially dangerous subject opposing the real community in the form of culture, language, state. Axiological scenario in future modeling Value attitudes in constructing of the life world correlate with the macro-system principle of the semantic network dynamics organization. System principles are undoubtedly significant for network organization and management. The system of values captures the semantic matrix in the infosphere of communication, forming a certain hyper net. Prediction of the network non-reflective semantic coherence of the community is possible only on the basis of fundamental value orientations rooted in the subconscious and triggered subconsciously in the processes of perceiving, reasoning and understanding. Despite virtualization of the communicative and social space, transformation of meanings and creation of fakes, culture configures the network explicitly and implicitly. In this case, axiological attitude acts as a core semantic orientation for network tuning. In terms of education, the value system of culture should indicate an ideal of decent behavior, defining the social management program with account of a time perspective. The prospect of the future is emotionally perceived by a person, first of all, in terms of life safety. Violation of this perspective is the most powerful channel of opposition to destruction of cultural and ethnic communities, even in the context of active social networks. Marginal situations with an implicit future constitute certain "singularities", on which boundaries request for changes emerges, stimulating transitions to a new state . The preservation of community is the natural basis for its continued existence in time. The paradigm of education in the network society with its polylogos can be associated with the axiological paradigm which allows, when modeling a global society of the future, to combine the value attitudes with subconscious boundaries of mentality as ethnically normalized world perception, rooted in the chronotope of geographical environment. The ethnic stereotype forms a meta-discursive channel of communication between generations. It translates norms of relationships, perception of space-time, duty, honor, dignity, trust, a sense of community and security. Human memory, consolidating consciousness in time, connects it with cultural forms existing in the community. The history of culture demonstrates the stability of traditions as a system of institutions and values. New ideological and axiological attitudes are rejected by the majority of people and treated as a riot; there followers don't receive public recognition and approval. At the same time, discursive practices of meanings transformation constitute a hidden mechanism of conflict environment production, as far as they introduce new meanings, change traditional attitudes in a covert struggle for minds and group values. In the context of generational bridge, ccommunication contains objective conditions for a hidden conflict of generations. The educational paradigm is intended to remove this divergence and breaks in the generational bridge through the formation of common worldview and historical memory. Conclusions The scenario of the future should seamlessly combine socio-cultural and mental dynamics on the basis of a certain system of values in the project of the global society. The tools of the infosphere in modeling the vision of the future are adjusted according to the paradigm, which is not proclaimed, or, alternatively, clearly declared, introducing specific guidelines for understanding the life perspective. Depending on what vector prevails, it is possible to speak about scenarios of exploratory or normative forecast. However in both cases forecasting is latently driven by worldview paradigm. The exploratory forecast relies on existing trends extrapolation into near future. It is supposed that objective conditions would not change. At the level of the instinctive program, the normative forecast (which is implemented unconsciously), oppositely, appears to be a model of the required past. The paradigm of education in the project of the future implies such a method of the world interpretation which includes, on the one hand, historically determined cultural heritage and on the other, formation of practical consciousness as an individual way of world perception. The values linking the semantic sphere of the practical consciousness are constitutive signs of community. Globalization gives rise to particular socio-cultural environment and new types of virtual communities forcing us to rethink typological characteristics of the culturalhistorical process and life-world of the individual. The hyperactive network in digital culture transforms the communication infosphere, creates potential conditions for destruction of the community and mental structures of the collective unconscious, practical and discursive consciousness. Implementation of the strategy of mental protection of a person in developing technological environments in order to preserve human community should determine scenarios of the future in social forecasting and, first of all, in the strategy of education oriented to the future.
// SPDX-License-Identifier: GPL-2.0+ /* * addi_apci_1500.c * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. * * ADDI-DATA GmbH * Dieselstrasse 3 * D-77833 Ottersweier * Tel: +19(0)7223/9493-0 * Fax: +49(0)7223/9493-92 * http://www.addi-data.com * [email protected] */ #include <linux/module.h> #include <linux/interrupt.h> #include "../comedi_pci.h" #include "amcc_s5933.h" #include "z8536.h" /* * PCI Bar 0 Register map (devpriv->amcc) * see amcc_s5933.h for register and bit defines */ /* * PCI Bar 1 Register map (dev->iobase) * see z8536.h for Z8536 internal registers and bit defines */ #define APCI1500_Z8536_PORTC_REG 0x00 #define APCI1500_Z8536_PORTB_REG 0x01 #define APCI1500_Z8536_PORTA_REG 0x02 #define APCI1500_Z8536_CTRL_REG 0x03 /* * PCI Bar 2 Register map (devpriv->addon) */ #define APCI1500_CLK_SEL_REG 0x00 #define APCI1500_DI_REG 0x00 #define APCI1500_DO_REG 0x02 struct apci1500_private { unsigned long amcc; unsigned long addon; unsigned int clk_src; /* Digital trigger configuration [0]=AND [1]=OR */ unsigned int pm[2]; /* Pattern Mask */ unsigned int pt[2]; /* Pattern Transition */ unsigned int pp[2]; /* Pattern Polarity */ }; static unsigned int z8536_read(struct comedi_device *dev, unsigned int reg) { unsigned long flags; unsigned int val; spin_lock_irqsave(&dev->spinlock, flags); outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG); val = inb(dev->iobase + APCI1500_Z8536_CTRL_REG); spin_unlock_irqrestore(&dev->spinlock, flags); return val; } static void z8536_write(struct comedi_device *dev, unsigned int val, unsigned int reg) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG); outb(val, dev->iobase + APCI1500_Z8536_CTRL_REG); spin_unlock_irqrestore(&dev->spinlock, flags); } static void z8536_reset(struct comedi_device *dev) { unsigned long flags; /* * Even if the state of the Z8536 is not known, the following * sequence will reset it and put it in State 0. */ spin_lock_irqsave(&dev->spinlock, flags); inb(dev->iobase + APCI1500_Z8536_CTRL_REG); outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG); inb(dev->iobase + APCI1500_Z8536_CTRL_REG); outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG); outb(1, dev->iobase + APCI1500_Z8536_CTRL_REG); outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* Disable all Ports and Counter/Timers */ z8536_write(dev, 0x00, Z8536_CFG_CTRL_REG); /* * Port A is connected to Ditial Input channels 0-7. * Configure the port to allow interrupt detection. */ z8536_write(dev, Z8536_PAB_MODE_PTS_BIT | Z8536_PAB_MODE_SB | Z8536_PAB_MODE_PMS_DISABLE, Z8536_PA_MODE_REG); z8536_write(dev, 0xff, Z8536_PB_DPP_REG); z8536_write(dev, 0xff, Z8536_PA_DD_REG); /* * Port B is connected to Ditial Input channels 8-13. * Configure the port to allow interrupt detection. * * NOTE: Bits 7 and 6 of Port B are connected to internal * diagnostic signals and bit 7 is inverted. */ z8536_write(dev, Z8536_PAB_MODE_PTS_BIT | Z8536_PAB_MODE_SB | Z8536_PAB_MODE_PMS_DISABLE, Z8536_PB_MODE_REG); z8536_write(dev, 0x7f, Z8536_PB_DPP_REG); z8536_write(dev, 0xff, Z8536_PB_DD_REG); /* * Not sure what Port C is connected to... */ z8536_write(dev, 0x09, Z8536_PC_DPP_REG); z8536_write(dev, 0x0e, Z8536_PC_DD_REG); /* * Clear and disable all interrupt sources. * * Just in case, the reset of the Z8536 should have already * done this. */ z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PA_CMDSTAT_REG); z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG); z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PB_CMDSTAT_REG); z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG); z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(0)); z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(0)); z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(1)); z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(1)); z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(2)); z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(2)); /* Disable all interrupts */ z8536_write(dev, 0x00, Z8536_INT_CTRL_REG); } static void apci1500_port_enable(struct comedi_device *dev, bool enable) { unsigned int cfg; cfg = z8536_read(dev, Z8536_CFG_CTRL_REG); if (enable) cfg |= (Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE); else cfg &= ~(Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE); z8536_write(dev, cfg, Z8536_CFG_CTRL_REG); } static void apci1500_timer_enable(struct comedi_device *dev, unsigned int chan, bool enable) { unsigned int bit; unsigned int cfg; if (chan == 0) bit = Z8536_CFG_CTRL_CT1E; else if (chan == 1) bit = Z8536_CFG_CTRL_CT2E; else bit = Z8536_CFG_CTRL_PCE_CT3E; cfg = z8536_read(dev, Z8536_CFG_CTRL_REG); if (enable) { cfg |= bit; } else { cfg &= ~bit; z8536_write(dev, 0x00, Z8536_CT_CMDSTAT_REG(chan)); } z8536_write(dev, cfg, Z8536_CFG_CTRL_REG); } static bool apci1500_ack_irq(struct comedi_device *dev, unsigned int reg) { unsigned int val; val = z8536_read(dev, reg); if ((val & Z8536_STAT_IE_IP) == Z8536_STAT_IE_IP) { val &= 0x0f; /* preserve any write bits */ val |= Z8536_CMD_CLR_IP_IUS; z8536_write(dev, val, reg); return true; } return false; } static irqreturn_t apci1500_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct apci1500_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned short status = 0; unsigned int val; val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); if (!(val & INTCSR_INTR_ASSERTED)) return IRQ_NONE; if (apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG)) status |= 0x01; /* port a event (inputs 0-7) */ if (apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG)) { /* Tests if this is an external error */ val = inb(dev->iobase + APCI1500_Z8536_PORTB_REG); val &= 0xc0; if (val) { if (val & 0x80) /* voltage error */ status |= 0x40; if (val & 0x40) /* short circuit error */ status |= 0x80; } else { status |= 0x02; /* port b event (inputs 8-13) */ } } /* * NOTE: The 'status' returned by the sample matches the * interrupt mask information from the APCI-1500 Users Manual. * * Mask Meaning * ---------- ------------------------------------------ * 0b00000001 Event 1 has occurred * 0b00000010 Event 2 has occurred * 0b00000100 Counter/timer 1 has run down (not implemented) * 0b00001000 Counter/timer 2 has run down (not implemented) * 0b00010000 Counter 3 has run down (not implemented) * 0b00100000 Watchdog has run down (not implemented) * 0b01000000 Voltage error * 0b10000000 Short-circuit error */ comedi_buf_write_samples(s, &status, 1); comedi_handle_events(dev, s); return IRQ_HANDLED; } static int apci1500_di_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* Disables the main interrupt on the board */ z8536_write(dev, 0x00, Z8536_INT_CTRL_REG); /* Disable Ports A & B */ apci1500_port_enable(dev, false); /* Ack any pending interrupts */ apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG); apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG); /* Disable pattern interrupts */ z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG); z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG); /* Enable Ports A & B */ apci1500_port_enable(dev, true); return 0; } static int apci1500_di_inttrig_start(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct apci1500_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned int pa_mode = Z8536_PAB_MODE_PMS_DISABLE; unsigned int pb_mode = Z8536_PAB_MODE_PMS_DISABLE; unsigned int pa_trig = trig_num & 0x01; unsigned int pb_trig = (trig_num >> 1) & 0x01; bool valid_trig = false; unsigned int val; if (trig_num != cmd->start_arg) return -EINVAL; /* Disable Ports A & B */ apci1500_port_enable(dev, false); /* Set Port A for selected trigger pattern */ z8536_write(dev, devpriv->pm[pa_trig] & 0xff, Z8536_PA_PM_REG); z8536_write(dev, devpriv->pt[pa_trig] & 0xff, Z8536_PA_PT_REG); z8536_write(dev, devpriv->pp[pa_trig] & 0xff, Z8536_PA_PP_REG); /* Set Port B for selected trigger pattern */ z8536_write(dev, (devpriv->pm[pb_trig] >> 8) & 0xff, Z8536_PB_PM_REG); z8536_write(dev, (devpriv->pt[pb_trig] >> 8) & 0xff, Z8536_PB_PT_REG); z8536_write(dev, (devpriv->pp[pb_trig] >> 8) & 0xff, Z8536_PB_PP_REG); /* Set Port A trigger mode (if enabled) and enable interrupt */ if (devpriv->pm[pa_trig] & 0xff) { pa_mode = pa_trig ? Z8536_PAB_MODE_PMS_AND : Z8536_PAB_MODE_PMS_OR; val = z8536_read(dev, Z8536_PA_MODE_REG); val &= ~Z8536_PAB_MODE_PMS_MASK; val |= (pa_mode | Z8536_PAB_MODE_IMO); z8536_write(dev, val, Z8536_PA_MODE_REG); z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PA_CMDSTAT_REG); valid_trig = true; dev_dbg(dev->class_dev, "Port A configured for %s mode pattern detection\n", pa_trig ? "AND" : "OR"); } /* Set Port B trigger mode (if enabled) and enable interrupt */ if (devpriv->pm[pb_trig] & 0xff00) { pb_mode = pb_trig ? Z8536_PAB_MODE_PMS_AND : Z8536_PAB_MODE_PMS_OR; val = z8536_read(dev, Z8536_PB_MODE_REG); val &= ~Z8536_PAB_MODE_PMS_MASK; val |= (pb_mode | Z8536_PAB_MODE_IMO); z8536_write(dev, val, Z8536_PB_MODE_REG); z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PB_CMDSTAT_REG); valid_trig = true; dev_dbg(dev->class_dev, "Port B configured for %s mode pattern detection\n", pb_trig ? "AND" : "OR"); } /* Enable Ports A & B */ apci1500_port_enable(dev, true); if (!valid_trig) { dev_dbg(dev->class_dev, "digital trigger %d is not configured\n", trig_num); return -EINVAL; } /* Authorizes the main interrupt on the board */ z8536_write(dev, Z8536_INT_CTRL_MIE | Z8536_INT_CTRL_DLC, Z8536_INT_CTRL_REG); return 0; } static int apci1500_di_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { s->async->inttrig = apci1500_di_inttrig_start; return 0; } static int apci1500_di_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ /* Step 2b : and mutually compatible */ /* Step 3: check if arguments are trivially valid */ /* * Internal start source triggers: * * 0 AND mode for Port A (digital inputs 0-7) * AND mode for Port B (digital inputs 8-13 and internal signals) * * 1 OR mode for Port A (digital inputs 0-7) * AND mode for Port B (digital inputs 8-13 and internal signals) * * 2 AND mode for Port A (digital inputs 0-7) * OR mode for Port B (digital inputs 8-13 and internal signals) * * 3 OR mode for Port A (digital inputs 0-7) * OR mode for Port B (digital inputs 8-13 and internal signals) */ err |= comedi_check_trigger_arg_max(&cmd->start_arg, 3); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* Step 4: fix up any arguments */ /* Step 5: check channel list if it exists */ return 0; } /* * The pattern-recognition logic must be configured before the digital * input async command is started. * * Digital input channels 0 to 13 can generate interrupts. Channels 14 * and 15 are connected to internal board status/diagnostic signals. * * Channel 14 - Voltage error (the external supply is < 5V) * Channel 15 - Short-circuit/overtemperature error * * data[0] : INSN_CONFIG_DIGITAL_TRIG * data[1] : trigger number * 0 = AND mode * 1 = OR mode * data[2] : configuration operation: * COMEDI_DIGITAL_TRIG_DISABLE = no interrupts * COMEDI_DIGITAL_TRIG_ENABLE_EDGES = edge interrupts * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = level interrupts * data[3] : left-shift for data[4] and data[5] * data[4] : rising-edge/high level channels * data[5] : falling-edge/low level channels */ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; unsigned int hi_mask; unsigned int lo_mask; unsigned int chan_mask; unsigned int old_mask; unsigned int pm; unsigned int pt; unsigned int pp; unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, "invalid digital trigger number (0=AND, 1=OR)\n"); return -EINVAL; } if (shift <= 16) { hi_mask = data[4] << shift; lo_mask = data[5] << shift; old_mask = (1U << shift) - 1; invalid_chan = (data[4] | data[5]) >> (16 - shift); } else { hi_mask = 0; lo_mask = 0; old_mask = 0xffff; invalid_chan = data[4] | data[5]; } chan_mask = hi_mask | lo_mask; if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } pm = devpriv->pm[trig] & old_mask; pt = devpriv->pt[trig] & old_mask; pp = devpriv->pp[trig] & old_mask; switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: /* clear trigger configuration */ pm = 0; pt = 0; pp = 0; break; case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: pm |= chan_mask; /* enable channels */ pt |= chan_mask; /* enable edge detection */ pp |= hi_mask; /* rising-edge channels */ pp &= ~lo_mask; /* falling-edge channels */ break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: pm |= chan_mask; /* enable channels */ pt &= ~chan_mask; /* enable level detection */ pp |= hi_mask; /* high level channels */ pp &= ~lo_mask; /* low level channels */ break; default: return -EINVAL; } /* * The AND mode trigger can only have one channel (max) enabled * for edge detection. */ if (trig == 0) { int ret = 0; unsigned int src; src = pt & 0xff; if (src) ret |= comedi_check_trigger_is_unique(src); src = (pt >> 8) & 0xff; if (src) ret |= comedi_check_trigger_is_unique(src); if (ret) { dev_dbg(dev->class_dev, "invalid AND trigger configuration\n"); return ret; } } /* save the trigger configuration */ devpriv->pm[trig] = pm; devpriv->pt[trig] = pt; devpriv->pp[trig] = pp; return insn->n; } static int apci1500_di_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: return apci1500_di_cfg_trig(dev, s, insn, data); default: return -EINVAL; } } static int apci1500_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1500_private *devpriv = dev->private; data[1] = inw(devpriv->addon + APCI1500_DI_REG); return insn->n; } static int apci1500_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1500_private *devpriv = dev->private; if (comedi_dio_update_state(s, data)) outw(s->state, devpriv->addon + APCI1500_DO_REG); data[1] = s->state; return insn->n; } static int apci1500_timer_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci1500_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int val; switch (data[0]) { case INSN_CONFIG_ARM: val = data[1] & s->maxdata; z8536_write(dev, val & 0xff, Z8536_CT_RELOAD_LSB_REG(chan)); z8536_write(dev, (val >> 8) & 0xff, Z8536_CT_RELOAD_MSB_REG(chan)); apci1500_timer_enable(dev, chan, true); z8536_write(dev, Z8536_CT_CMDSTAT_GCB, Z8536_CT_CMDSTAT_REG(chan)); break; case INSN_CONFIG_DISARM: apci1500_timer_enable(dev, chan, false); break; case INSN_CONFIG_GET_COUNTER_STATUS: data[1] = 0; val = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan)); if (val & Z8536_CT_STAT_CIP) data[1] |= COMEDI_COUNTER_COUNTING; if (val & Z8536_CT_CMDSTAT_GCB) data[1] |= COMEDI_COUNTER_ARMED; if (val & Z8536_STAT_IP) { data[1] |= COMEDI_COUNTER_TERMINAL_COUNT; apci1500_ack_irq(dev, Z8536_CT_CMDSTAT_REG(chan)); } data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING | COMEDI_COUNTER_TERMINAL_COUNT; break; case INSN_CONFIG_SET_COUNTER_MODE: /* Simulate the 8254 timer modes */ switch (data[1]) { case I8254_MODE0: /* Interrupt on Terminal Count */ val = Z8536_CT_MODE_ECE | Z8536_CT_MODE_DCS_ONESHOT; break; case I8254_MODE1: /* Hardware Retriggerable One-Shot */ val = Z8536_CT_MODE_ETE | Z8536_CT_MODE_DCS_ONESHOT; break; case I8254_MODE2: /* Rate Generator */ val = Z8536_CT_MODE_CSC | Z8536_CT_MODE_DCS_PULSE; break; case I8254_MODE3: /* Square Wave Mode */ val = Z8536_CT_MODE_CSC | Z8536_CT_MODE_DCS_SQRWAVE; break; case I8254_MODE4: /* Software Triggered Strobe */ val = Z8536_CT_MODE_REB | Z8536_CT_MODE_DCS_PULSE; break; case I8254_MODE5: /* Hardware Triggered Strobe (watchdog) */ val = Z8536_CT_MODE_EOE | Z8536_CT_MODE_ETE | Z8536_CT_MODE_REB | Z8536_CT_MODE_DCS_PULSE; break; default: return -EINVAL; } apci1500_timer_enable(dev, chan, false); z8536_write(dev, val, Z8536_CT_MODE_REG(chan)); break; case INSN_CONFIG_SET_CLOCK_SRC: if (data[1] > 2) return -EINVAL; devpriv->clk_src = data[1]; if (devpriv->clk_src == 2) devpriv->clk_src = 3; outw(devpriv->clk_src, devpriv->addon + APCI1500_CLK_SEL_REG); break; case INSN_CONFIG_GET_CLOCK_SRC: switch (devpriv->clk_src) { case 0: data[1] = 0; /* 111.86 kHz / 2 */ data[2] = 17879; /* 17879 ns (approx) */ break; case 1: data[1] = 1; /* 3.49 kHz / 2 */ data[2] = 573066; /* 573066 ns (approx) */ break; case 3: data[1] = 2; /* 1.747 kHz / 2 */ data[2] = 1164822; /* 1164822 ns (approx) */ break; default: return -EINVAL; } break; case INSN_CONFIG_SET_GATE_SRC: if (chan == 0) return -EINVAL; val = z8536_read(dev, Z8536_CT_MODE_REG(chan)); val &= Z8536_CT_MODE_EGE; if (data[1] == 1) val |= Z8536_CT_MODE_EGE; else if (data[1] > 1) return -EINVAL; z8536_write(dev, val, Z8536_CT_MODE_REG(chan)); break; case INSN_CONFIG_GET_GATE_SRC: if (chan == 0) return -EINVAL; break; default: return -EINVAL; } return insn->n; } static int apci1500_timer_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int cmd; cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan)); cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */ cmd |= Z8536_CT_CMD_TCB; /* set trigger */ /* software trigger a timer, it only makes sense to do one write */ if (insn->n) z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan)); return insn->n; } static int apci1500_timer_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int cmd; unsigned int val; int i; cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan)); cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */ cmd |= Z8536_CT_CMD_RCC; /* set RCC */ for (i = 0; i < insn->n; i++) { z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan)); val = z8536_read(dev, Z8536_CT_VAL_MSB_REG(chan)) << 8; val |= z8536_read(dev, Z8536_CT_VAL_LSB_REG(chan)); data[i] = val; } return insn->n; } static int apci1500_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct apci1500_private *devpriv; struct comedi_subdevice *s; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_pci_enable(dev); if (ret) return ret; dev->iobase = pci_resource_start(pcidev, 1); devpriv->amcc = pci_resource_start(pcidev, 0); devpriv->addon = pci_resource_start(pcidev, 2); z8536_reset(dev); if (pcidev->irq > 0) { ret = request_irq(pcidev->irq, apci1500_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; } ret = comedi_alloc_subdevices(dev, 3); if (ret) return ret; /* Digital Input subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = apci1500_di_insn_bits; if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->len_chanlist = 1; s->insn_config = apci1500_di_insn_config; s->do_cmdtest = apci1500_di_cmdtest; s->do_cmd = apci1500_di_cmd; s->cancel = apci1500_di_cancel; } /* Digital Output subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = apci1500_do_insn_bits; /* reset all the digital outputs */ outw(0x0, devpriv->addon + APCI1500_DO_REG); /* Counter/Timer(Watchdog) subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 3; s->maxdata = 0xffff; s->range_table = &range_unknown; s->insn_config = apci1500_timer_insn_config; s->insn_write = apci1500_timer_insn_write; s->insn_read = apci1500_timer_insn_read; /* Enable the PCI interrupt */ if (dev->irq) { outl(0x2000 | INTCSR_INBOX_FULL_INT, devpriv->amcc + AMCC_OP_REG_INTCSR); inl(devpriv->amcc + AMCC_OP_REG_IMB1); inl(devpriv->amcc + AMCC_OP_REG_INTCSR); outl(INTCSR_INBOX_INTR_STATUS | 0x2000 | INTCSR_INBOX_FULL_INT, devpriv->amcc + AMCC_OP_REG_INTCSR); } return 0; } static void apci1500_detach(struct comedi_device *dev) { struct apci1500_private *devpriv = dev->private; if (devpriv->amcc) outl(0x0, devpriv->amcc + AMCC_OP_REG_INTCSR); comedi_pci_detach(dev); } static struct comedi_driver apci1500_driver = { .driver_name = "addi_apci_1500", .module = THIS_MODULE, .auto_attach = apci1500_auto_attach, .detach = apci1500_detach, }; static int apci1500_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &apci1500_driver, id->driver_data); } static const struct pci_device_id apci1500_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80fc) }, { 0 } }; MODULE_DEVICE_TABLE(pci, apci1500_pci_table); static struct pci_driver apci1500_pci_driver = { .name = "addi_apci_1500", .id_table = apci1500_pci_table, .probe = apci1500_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(apci1500_driver, apci1500_pci_driver); MODULE_AUTHOR("Comedi https://www.comedi.org"); MODULE_DESCRIPTION("ADDI-DATA APCI-1500, 16 channel DI / 16 channel DO boards"); MODULE_LICENSE("GPL");
def convert_annotations(root_path, gt_name, lmdb_name): assert isinstance(root_path, str) assert isinstance(gt_name, str) assert isinstance(lmdb_name, str) start_time = time.time() gt = loadmat(gt_name) img_num = len(gt['imnames'][0]) env = lmdb.open(lmdb_name, map_size=int(1e9 * 40)) with env.begin(write=True) as txn: for img_id in range(img_num): if img_id % 1000 == 0 and img_id > 0: total_time_sec = time.time() - start_time avg_time_sec = total_time_sec / img_id eta_mins = (avg_time_sec * (img_num - img_id)) / 60 print(f'\ncurrent_img/total_imgs {img_id}/{img_num} | ' f'eta: {eta_mins:.3f} mins') img_file = osp.join(root_path, 'imgs', gt['imnames'][0][img_id][0]) img = mmcv.imread(img_file, 'unchanged') height, width = img.shape[0:2] img_json = {} img_json['file_name'] = gt['imnames'][0][img_id][0] img_json['height'] = height img_json['width'] = width img_json['annotations'] = [] wordBB = gt['wordBB'][0][img_id] charBB = gt['charBB'][0][img_id] txt = gt['txt'][0][img_id] poly_list, _, poly_boundary_list, _, _, _ = match_bbox_char_str( wordBB, charBB, txt) for poly_inx in range(len(poly_list)): polygon = poly_list[poly_inx] min_x, min_y, max_x, max_y = polygon.bounds bbox = [min_x, min_y, max_x - min_x, max_y - min_y] anno_info = dict() anno_info['iscrowd'] = 0 anno_info['category_id'] = 1 anno_info['bbox'] = bbox anno_info['segmentation'] = [ poly_boundary_list[poly_inx].flatten().tolist() ] img_json['annotations'].append(anno_info) string = json.dumps(img_json) txn.put(str(img_id).encode('utf8'), string.encode('utf8')) key = 'total_number'.encode('utf8') value = str(img_num).encode('utf8') txn.put(key, value)
def minify_files(): current_dir = os.getcwd() top_dir = os.path.dirname( os.path.dirname( current_dir ) ) js_dir = os.path.join( top_dir, 'rawsalad', 'site_media', 'js' ) css_dir = os.path.join( top_dir, 'rawsalad', 'site_media', 'css' ) html_dir = os.path.join( top_dir, 'rawsalad', 'databrowser', 'templates' ) min_js_path = os.path.join( js_dir, 'min.js' ) min_css_path = os.path.join( css_dir, 'min.css' ) html_file = os.path.join( html_dir, 'app.html' ) js_files = get_js_minify_file_names( html_file ) css_files = os.listdir( css_dir ) js_paths = [ os.path.join( js_dir, name ) for name in js_files ] css_paths = [ os.path.join( css_dir, name ) for name in css_files ] minify( 'min.js', js_paths, '.js', min_js_path ) update_html( html_file, js_files )
use exitcode; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::process; use structopt::StructOpt; #[cfg(test)] mod tests { use super::*; #[test] fn verify_args_length() { let search_terms = vec!["add".to_string(), "a".to_string(), "commit".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); assert_eq!(2, cfg.search.len()) } #[test] fn verify_lowercase() { let search_terms = vec!["AdD".to_string(), "ComMit".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); assert_eq!(vec!["add".to_string(), "commit".to_string(),], cfg.search) } #[test] fn verify_stripping_articles() { let search_terms = vec![ "add".to_string(), "a".to_string(), "commit".to_string(), "to".to_string(), "the".to_string(), "repo".to_string(), ]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); assert_eq!( vec![ "add".to_string(), "commit".to_string(), "to".to_string(), "repo".to_string() ], cfg.search ) } #[test] fn first_pass_search_match() { let search_terms = vec!["add".to_string(), "a".to_string(), "commit".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); let result = first_pass(&cfg); assert_eq!(result.is_some(), true) } #[test] fn first_pass_search_delete() { let search_terms = vec!["delete".to_string(), "a".to_string(), "branch".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); let result = first_pass(&cfg); assert_eq!(result.is_some(), true) } #[test] fn first_pass_search_no_match() { let search_terms = vec!["weird".to_string(), "a".to_string(), "commit".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); let result = first_pass(&cfg); assert_eq!(result.is_some(), false) } #[test] fn second_pass_test() { let search_terms = vec![ "add".to_string(), "new".to_string(), "branch".to_string(), "remain".to_string(), "current".to_string(), ]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); if let Some(fp_result) = first_pass(&cfg) { second_pass(&cfg, fp_result); } } #[test] fn second_pass_delete_test() { let search_terms = vec!["delete".to_string(), "a".to_string(), "branch".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); if let Some(fp_result) = first_pass(&cfg) { println!("{:?}", fp_result); second_pass(&cfg, fp_result); } } #[test] fn combine_test() { let search_terms = vec!["add".to_string(), "new".to_string(), "branch".to_string()]; let cfg = Config::new(Cli { verbose: false, search_terms, }) .unwrap(); println!("\nAdd Test"); let add_terms = combined_options(&cfg, &cfg.search[0]); println!("{:?}", add_terms); println!("\nShow Test"); let show_terms = combined_options(&cfg, &String::from("show")); println!("{:?}", show_terms); println!("\nDelete Test"); let show_terms = combined_options(&cfg, &String::from("delete")); println!("{:?}", show_terms); } } #[derive(Debug, StructOpt)] /// Welcome to the Git Explore CLI, /// where you can search for git commands with natural language /// /// EXAMPLE: /// /// $ gitexplore compare two commits /// /// The closest matching command that can compare two commits is /// /// "git diff <sha1> <sha2> | less" pub struct Cli { /// Activate verbose mode #[structopt(short, long)] pub verbose: bool, /// The action or command you're looking for pub search_terms: Vec<String>, } #[derive(Debug, Clone)] struct SearchData { score: i8, pattern: String, option: OptionValue, } #[derive(Serialize, Deserialize, Debug)] pub struct Data { pub primary: Vec<OptionValue>, pub secondary: HashMap<String, Vec<OptionValue>>, // dynamic keys pub tertiary: HashMap<String, Vec<OptionValue>>, // dynamic keys } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(untagged)] /// Use an enum to represent the kinds of option values since it's optional for usage and nb fields to be present in the data pub enum OptionValue { TierThree { label: String, value: String, usage: String, nb: String, }, TierTwo { label: String, value: String, usage: String, }, TierOne { label: String, value: String, }, } // Impl block for getter methods impl OptionValue { fn get_label(&self) -> &String { match self { OptionValue::TierOne { label, .. } | OptionValue::TierTwo { label, .. } | OptionValue::TierThree { label, .. } => &label, } } fn get_value(&self) -> &String { match self { OptionValue::TierOne { value, .. } | OptionValue::TierTwo { value, .. } | OptionValue::TierThree { value, .. } => &value, } } fn get_usage(&self) -> &String { match self { OptionValue::TierTwo { usage, .. } | OptionValue::TierThree { usage, .. } => &usage, OptionValue::TierOne { value, .. } => &value, } } } #[derive(Debug)] pub struct Config { pub search: Vec<String>, pub data: Data, } impl Config { pub fn new(cli: Cli) -> Result<Config, &'static str> { // Data Construction let options_str = include_str!("options.json"); let data: Data = serde_json::from_str(options_str).unwrap_or_else(|err| { // If internal data is corrupted there's no point trying to continue execution or passing the error up the chain. // Write the error msg to stderr eprintln!("Internal Data corrupted: {}\nExiting...", err); // Use the SOFTWARE exit code which indicates that an internal software error has been detected process::exit(exitcode::SOFTWARE); }); if cli.search_terms.len() < 1 { return Err("No search terms used"); } // Transform all search terms into lowercase // Strip search term vector of articles - a, an, the let new_search_terms: Vec<String> = cli .search_terms .iter() .clone() .map(|x| x.to_lowercase()) .filter(|x| not_article(x)) .collect(); // We don't worry about the verbose field in the Cli struct Ok(Config { search: new_search_terms, data, }) } } fn not_article(x: &String) -> bool { *x != "a" && *x != "an" && *x != "the" } pub fn run(cfg: Config) -> Result<String, &'static str> { let fp_res = first_pass(&cfg); match fp_res { Some(fp) => { let search_data = second_pass(&cfg, &fp); match search_data.first() { Some(top_search) => { // Check confidence in score let top_score = top_search.score; // if top_score < top_search.pattern.split(' ').count() as i8 { // println!("Low confidence in match"); // } // If there is some top value // Check if there are more values with the same score let num_top_values: Vec<_> = search_data .iter() .filter(|x| x.score == top_score) .map(|x| &x.option) .collect(); if num_top_values.len() > 1 { println!("\nLooks there is more than one command that matches what you searched for!"); println!("\nEnumerating partially matching commands"); for (i, top_val) in num_top_values.iter().enumerate() { println!("\n\t{}. {:?}", i + 1, *top_val.get_usage()); match &top_val { OptionValue::TierThree { nb, .. } => println!("\t{}\n", nb), _ => (), } } } else { println!( "\nMatching git cmd for \"{}\" found! 🎉 - \n\n\t{:?}", cfg.search.join(" "), top_search.option.get_usage() ); match &top_search.option { OptionValue::TierThree { nb, .. } => println!("\t{}\n", nb), _ => (), } } } None => return Err("No matching commands found"), } } None => return Err("Invalid search term"), } Ok(String::from("Hello")) } fn first_pass<'a>(cfg: &'a Config) -> Option<&'a OptionValue> { let term = &cfg.search[0]; let options = &cfg.data.primary; for option in options.iter() { let label = option.get_label(); if label.contains(term) { return Some(option); } } None } fn second_pass<'a>(cfg: &'a Config, fp_res: &'a OptionValue) -> Vec<SearchData> { // Use value since that is the key for secondary and tertiary options let fp_value = fp_res.get_value(); let possible_options = combined_options(&cfg, &fp_value); let cli_terms = &cfg.search; // This data structure will allow us to weight possible options by a score let mut search_data: Vec<SearchData> = Vec::new(); // Iterate through the possible combined options for (opt_str, opt_val) in possible_options.iter() { let opt_val_clone = (**opt_val).clone(); let mut current_search = SearchData { score: 0, pattern: (*opt_str).clone(), option: opt_val_clone, }; // For each search term, check if it's present in the current option // If it is, incrememnt the score for that option by 1 for term in cli_terms.iter() { if opt_str.contains(term) { current_search.score += 1; } } search_data.push(current_search); } // Sort the collated data in descending order of score search_data.sort_by(|a, b| b.score.cmp(&a.score)); // println!("{:?}", search_data); search_data } fn combined_options<'a>(cfg: &'a Config, term: &String) -> Vec<(String, &'a OptionValue)> { let mut combined_search_terms: Vec<(String, &OptionValue)> = Vec::new(); // The search term exists in the secondary options data if let Some(secondary) = &cfg.data.secondary.get(term) { for s in secondary.iter() { // Match on possible enum variants match s { // This means there is a tertiary option OptionValue::TierOne { label, value } => { match &cfg.data.tertiary.get(value) { Some(tertiary_data) => { // Loop through the tertiary items for the key // and append the label to the corresponding secondary item label // Add this concatenated label to the combined_search_terms vec for t in tertiary_data.iter() { let t_label = t.get_label(); let combined_label = [term.clone(), label.clone(), t_label.clone()].join(" "); combined_search_terms.push((combined_label, t)); } } None => (), } } _ => { let s_label = s.get_label(); combined_search_terms.push(([term.clone(), s_label.clone()].join(" "), s)); } } } } combined_search_terms }
/** * This UDF returns array of string that has been split from input string. * * @param input input string that want to be split * @param delimiter separator or delimiter to split the input string * @return array of string that has been split * @author jesry.pandawa * @team DE */ public String[] eval(String input, String delimiter) { if (input == null || delimiter == null) { LOGGER.info("Not able to split input string, either input string or delimiter is null"); return new String[0]; } if (delimiter.equals("")) { delimiter = " "; } return input.split(delimiter); }
package wannabit.io.cosmostaion.task.FetchTask; import java.util.ArrayList; import retrofit2.Response; import wannabit.io.cosmostaion.base.BaseApplication; import wannabit.io.cosmostaion.base.BaseChain; import wannabit.io.cosmostaion.base.BaseConstant; import wannabit.io.cosmostaion.model.type.Validator; import wannabit.io.cosmostaion.network.ApiClient; import wannabit.io.cosmostaion.network.res.ResLcdValidators; import wannabit.io.cosmostaion.task.CommonTask; import wannabit.io.cosmostaion.task.TaskListener; import wannabit.io.cosmostaion.task.TaskResult; import wannabit.io.cosmostaion.utils.WLog; import static wannabit.io.cosmostaion.base.BaseChain.AKASH_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.BAND_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.CERTIK_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.CERTIK_TEST; import static wannabit.io.cosmostaion.base.BaseChain.COSMOS_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.IOV_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.IOV_TEST; import static wannabit.io.cosmostaion.base.BaseChain.IRIS_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.KAVA_MAIN; import static wannabit.io.cosmostaion.base.BaseChain.KAVA_TEST; import static wannabit.io.cosmostaion.base.BaseChain.OK_TEST; import static wannabit.io.cosmostaion.base.BaseChain.SECRET_MAIN; public class AllValidatorInfoTask extends CommonTask { private BaseChain mChain; public AllValidatorInfoTask(BaseApplication app, TaskListener listener, BaseChain chain) { super(app, listener); this.mResult.taskType = BaseConstant.TASK_FETCH_ALL_VALIDATOR; this.mChain = chain; } @Override protected TaskResult doInBackground(String... strings) { try { if (mChain.equals(COSMOS_MAIN)) { Response<ResLcdValidators> response = ApiClient.getCosmosChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(IRIS_MAIN)) { int page = 0; boolean needMore = true; ArrayList<Validator> allResult = new ArrayList<>(); do { page ++; Response<ArrayList<Validator>> response = ApiClient.getIrisChain(mApp).getValidatorList(""+page, "100").execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; needMore = false; } if (response.body() != null && response.body().size() > 0) { if(response.body().size() == 100) { allResult.addAll(response.body()); } else { allResult.addAll(response.body()); mResult.isSuccess = true; needMore = false; } } } while (needMore); mResult.resultData = allResult; } else if (mChain.equals(KAVA_MAIN)) { Response<ResLcdValidators> response = ApiClient.getKavaChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(KAVA_TEST)) { Response<ResLcdValidators> response = ApiClient.getKavaTestChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(BAND_MAIN)) { Response<ResLcdValidators> response = ApiClient.getBandChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(IOV_MAIN)) { Response<ResLcdValidators> response = ApiClient.getIovChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(IOV_TEST)) { Response<ResLcdValidators> response = ApiClient.getIovTestChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(OK_TEST)) { Response<ArrayList<Validator>> response = ApiClient.getOkTestChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body() != null && response.body().size() > 0) { mResult.resultData = response.body(); mResult.isSuccess = true; } } else if (mChain.equals(CERTIK_MAIN)) { Response<ResLcdValidators> response = ApiClient.getCertikChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(CERTIK_TEST)) { Response<ResLcdValidators> response = ApiClient.getCertikTestChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(SECRET_MAIN)) { Response<ResLcdValidators> response = ApiClient.getSecretChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } else if (mChain.equals(AKASH_MAIN)) { Response<ResLcdValidators> response = ApiClient.getAkashChain(mApp).getValidatorDetailList().execute(); if (!response.isSuccessful()) { mResult.isSuccess = false; mResult.errorCode = BaseConstant.ERROR_CODE_NETWORK; return mResult; } if (response.body() != null && response.body().result != null && response.body().result.size() > 0) { mResult.resultData = response.body().result; mResult.isSuccess = true; } } } catch (Exception e) { WLog.w("AllValidatorInfo Error " + e.getMessage()); } return mResult; } }
<gh_stars>1-10 package grproxy import ( "context" "fmt" "net" "strings" "testing" "time" "github.com/yulrizka/grproxy/testserver" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) func TestCall(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() target, err := newServer() if err != nil { t.Fatal(err) } if err := target.Start(context.Background()); err != nil { t.Fatal(err) } // start the proxy l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { t.Fatal(err) } } interceptor := NewInterceptor(target.listener.Addr().String()) go func() { if err := interceptor.Serve(ctx, l); err != nil { t.Logf("interceptor server: %v", err) } }() // test calling unary function addr := l.Addr().String() //addr = target.listener.Addr().String() conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { t.Fatal(err) } client := testserver.NewSampleClient(conn) t.Run("unary", func(t *testing.T) { req := testserver.SimpleRequest{Attr1: "attr1"} resp, err := client.Simple(ctx, &req) if err != nil { t.Fatalf("simple: %v", err) } if got, want := resp.Attr1, "response "+req.Attr1; got != want { t.Fatalf("attr1 got %q want %q", got, want) } }) t.Run("stream", func(t *testing.T) { stream, err := client.BidiStream(ctx) if err != nil { t.Fatal(err) } for i := 0; i < 2; i++ { req := testserver.SimpleRequest{Attr1: fmt.Sprintf("attr1 %d", i)} err := stream.Send(&req) if err != nil { t.Fatal(err) } resp, err := stream.Recv() if err != nil { t.Fatal(err) } if got, want := resp.Attr1, "response "+req.Attr1; got != want { t.Errorf("got %q want %q", got, want) } } if err := stream.CloseSend(); err != nil { t.Fatal(err) } }) t.Run("client stream", func(t *testing.T) { stream, err := client.ClientStream(ctx) if err != nil { t.Fatal(err) } for i := 0; i < 2; i++ { req := testserver.SimpleRequest{Attr1: fmt.Sprintf("attr1 %d", i)} err := stream.Send(&req) if err != nil { t.Fatal(err) } } if err := stream.CloseSend(); err != nil { t.Fatal(err) } var resp testserver.SimpleResponse if err = stream.RecvMsg(&resp); err != nil { t.Fatal(err) } got, want := resp.Attr1, "0 received: attr1 0;1 received: attr1 1;" if got != want { t.Fatalf("got %v want %v", got, want) } }) t.Run("server stream", func(t *testing.T) { stream, err := client.ServerStream(ctx, &testserver.SimpleRequest{Attr1: fmt.Sprintf("attr1")}) if err != nil { t.Fatal(err) } // receive multiple message var s strings.Builder for i := 0; i < 2; i++ { resp, err := stream.Recv() if err != nil { t.Fatal(err) } s.WriteString(resp.Attr1) s.WriteString(";") } if got, want := s.String(), "received attr1 0;received attr1 1;"; got != want { t.Fatalf("got %s want %s", got, want) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } }) } type server struct { grpc *grpc.Server listener net.Listener } func newServer() (*server, error) { s := new(server) l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { return nil, fmt.Errorf("httptest: failed to listen on a port: %v", err) } } s.listener = l s.grpc = grpc.NewServer() testserver.RegisterSampleService(s.grpc, testserver.NewHandler()) reflection.Register(s.grpc) return s, nil } func (s *server) Start(ctx context.Context) error { go func() { if err := s.grpc.Serve(s.listener); err != nil { panic(fmt.Sprintf("grpc server: %v", err)) } }() go func() { <-ctx.Done() s.grpc.Stop() }() return nil }
def _prep_newdata(self, newdata): newdata = newdata.reshape(-1, self.n_games, self.n_features) newtensor = tf.constant(newdata) return newtensor
A Content Analysis of Scholarship on Consensual Nonmonogamies: Methodological Roadmaps, Current Themes, and Directions for Future Research This study provides a content analysis of peer-reviewed journal articles about consensual nonmonogamy (CNM) from a social scientific lens published from 1926 through 2016, excluding articles specific to polygamy or other faith-based relational practices. The content analysis yielded 116 articles, with most of the articles being nonempirical research (n = 74) rather than empirical studies (n = 42). Although the number of published articles about CNM has increased significantly in recent decades (n = 26 from 1926 to 2000 compared with n = 90 from 2001 to 2016), the topics discussed in CNM literature were narrow in scope and focused on (a) relationship styles, (b) CNM stigma, and/or (c) LGBTQ issues. Content analysis data showed that the vast majority of articles were published in journals about sexuality, suggesting that CNM remains an underexamined topic in psychological science. Additionally, only a handful of the total articles centered on topics related to family concerns (n = 5) or training and counseling (n = 2). Findings from this content analysis suggest that individuals and families who practice CNM are an underserved and understudied group that would benefit from advancements in psychological scholarship specific to their experiences.
/** * Contains integration tests (interaction with the Model) and unit tests for LessonAddCommand. */ public class LessonAddCommandTest { private Model model = new ModelManager(getTypicalAddressBook(), new UserPrefs()); @Test public void constructor_nullLesson_throwsNullPointerException() { assertThrows(NullPointerException.class, () -> prepareLessonAddCommand(INDEX_FIRST_PERSON, null)); } @Test public void execute_validPersonValidLesson_success() { Lesson sampleLesson = new LessonBuilder().build(); Person editedPerson = new PersonBuilder(model.getFilteredPersonList() .get(INDEX_FIRST_PERSON.getZeroBased())) .withLessons(sampleLesson).build(); LessonAddCommand lessonAddCommand = prepareLessonAddCommand(INDEX_FIRST_PERSON, sampleLesson); String expectedMessage = String.format(LessonAddCommand.MESSAGE_ADD_LESSON_SUCCESS, editedPerson.getName(), sampleLesson); Model expectedModel = new ModelManager(new AddressBook(model.getAddressBook()), new UserPrefs()); expectedModel.setPerson(model.getFilteredPersonList().get(INDEX_FIRST_PERSON.getZeroBased()), editedPerson); assertCommandSuccess(lessonAddCommand, model, expectedMessage, expectedModel); } @Test public void execute_clashingLessonUnfilteredList_failure() { Lesson lesson = new LessonBuilder().build(); Person firstPerson = model.getFilteredPersonList().get(INDEX_FIRST_PERSON.getZeroBased()); Person person = new PersonBuilder(firstPerson).withLessons(lesson).build(); model.setPerson(firstPerson, person); // Add a different lesson on the same time slot Lesson clashingLesson = new LessonBuilder().withHomeworkSet("Test").buildRecurring(); LessonAddCommand lessonAddCommand = prepareLessonAddCommand(INDEX_FIRST_PERSON, clashingLesson); assertCommandFailure(lessonAddCommand, model, LessonAddCommand.MESSAGE_CLASHING_LESSON); } @Test public void execute_clashingLessonFilteredList_failure() { Lesson lesson = new LessonBuilder().build(); Person secondPerson = model.getFilteredPersonList().get(INDEX_SECOND_PERSON.getZeroBased()); Person person = new PersonBuilder(secondPerson).withLessons(lesson).build(); model.setPerson(secondPerson, person); showPersonAtIndex(model, INDEX_FIRST_PERSON); /* Add a different lesson to the only person in the filtered list on a clashing time slot as the second person in the unfiltered list. */ Lesson clashingLesson = new LessonBuilder().withHomeworkSet("Test").buildRecurring(); LessonAddCommand lessonAddCommand = prepareLessonAddCommand(INDEX_FIRST_PERSON, clashingLesson); assertCommandFailure(lessonAddCommand, model, LessonAddCommand.MESSAGE_CLASHING_LESSON); } @Test public void execute_invalidPersonIndexUnfilteredList_failure() { Index outOfBoundIndex = Index.fromOneBased(model.getFilteredPersonList().size() + 1); Lesson lesson = new LessonBuilder().buildRecurring(); LessonAddCommand lessonAddCommand = prepareLessonAddCommand(outOfBoundIndex, lesson); assertCommandFailure(lessonAddCommand, model, Messages.MESSAGE_INVALID_STUDENT_DISPLAYED_INDEX); } /** * Edit filtered list where index is larger than size of filtered list, * but smaller than size of address book */ @Test public void execute_invalidPersonIndexFilteredList_failure() { // filter list to show only the first person showPersonAtIndex(model, INDEX_FIRST_PERSON); Index outOfBoundIndex = INDEX_SECOND_PERSON; // ensures that outOfBoundIndex is still in bounds of address book list assertTrue(outOfBoundIndex.getZeroBased() < model.getAddressBook().getPersonList().size()); LessonAddCommand lessonAddCommand = prepareLessonAddCommand(outOfBoundIndex, new LessonBuilder().build()); assertCommandFailure(lessonAddCommand, model, Messages.MESSAGE_INVALID_STUDENT_DISPLAYED_INDEX); } @Test public void equals() { Lesson sampleLesson = new LessonBuilder().build(); LessonAddCommand addSampleLessonCommand = prepareLessonAddCommand(INDEX_FIRST_PERSON, sampleLesson); LessonAddCommand addSampleLessonCommand2 = prepareLessonAddCommand(INDEX_SECOND_PERSON, sampleLesson); // same object -> returns true assertTrue(addSampleLessonCommand.equals(addSampleLessonCommand)); // same values -> returns true LessonAddCommand addSampleLessonCommandCopy = prepareLessonAddCommand(INDEX_FIRST_PERSON, sampleLesson); assertTrue(addSampleLessonCommand.equals(addSampleLessonCommandCopy)); // different types -> returns false assertFalse(addSampleLessonCommand.equals(1)); // null -> returns false assertFalse(addSampleLessonCommand.equals(null)); // different person -> returns false assertFalse(addSampleLessonCommand.equals(addSampleLessonCommand2)); } /** * Generates a {@code LessonAddCommand} with parameters {@code index} and {@code lesson}. */ private LessonAddCommand prepareLessonAddCommand(Index index, Lesson lesson) { LessonAddCommand lessonAddCommand = new LessonAddCommand(index, lesson); lessonAddCommand.setDependencies(model, new UndoRedoStack()); return lessonAddCommand; } }
from fontbakery.utils import ( can_shape, text_flow, unindent_and_unwrap_rationale, ) from fontTools.ttLib import TTFont from fontbakery.codetesting import portable_path def test_text_flow(): assert text_flow("") == "" assert text_flow("Testing") == "Testing" assert text_flow("One Two Three") == "One Two Three" assert text_flow("One Two Three", width=5) == ("One\n" "Two\n" "Three") assert text_flow("One Two Three", width=6, space_padding=True) == ("One \n" "Two \n" "Three ") assert text_flow("One Two Three", width=7, space_padding=True) == ("One Two\n" "Three ") assert text_flow("One Two Three", width=9, left_margin=2, space_padding=True) == (" One Two\n" " Three ") assert text_flow("One Two Three", width=7, left_margin=1, space_padding=True) == (" One \n" " Two \n" " Three ") assert text_flow("One Two Three", width=9, left_margin=1, right_margin=1, space_padding=True) == (" One Two \n" " Three ") assert text_flow("One Two Three", width=8, left_margin=1, right_margin=1, space_padding=True) == (" One \n" " Two \n" " Three ") assert text_flow("One Two Three Four", width=7, left_margin=1, right_margin=1, space_padding=True) == (" One \n" " Two \n" " Three \n" " Four ") assert text_flow("One Two Three Four", width=6, left_margin=1, right_margin=1, space_padding=True) == (" One \n" " Two \n" " Thre \n" " e \n" " Four ") # FIXME! # assert text_flow("One Two Three", # width=12, # left_margin=6, # first_line_indent=-5, # space_padding=True) == ( " One \n" # " Two \n" # " Three ") def test_can_shape(): font = TTFont(portable_path( "data/test/source-sans-pro/OTF/SourceSansPro-Regular.otf" )) assert can_shape(font, "ABC") assert not can_shape(font, "こんにちは") def test_unindent_and_unwrap_rationale(): rationale = """ This is a line that is very long, so long in fact that it must be hard wrapped because it is longer than 88 lines, including the two 4-space indents. This is a new paragraph. This paragraph is also too long to fit within the maximum width, so it must be hard wrapped. This is a new line that was NOT soft-wrapped, so it will end up appendend to the previous line.
 This is yet another line, but this one was soft-wrapped (Shift+Return), which means that it will not be appendend to the end of the previous line. This is the last paragraph. """ expected_rationale = ( "\n" "This is a line that is very long, so long in fact that it must be hard wrapped" " because it is longer than 88 lines, including the two 4-space indents.\n" "\n" "This is a new paragraph. This paragraph is also too long to fit within the" " maximum width, so it must be hard wrapped." " This is a new line that was NOT soft-wrapped, so it will end up appendend to" " the previous line.\n" "This is yet another line, but this one was soft-wrapped (Shift+Return), which" " means that it will not be appendend to the end of the previous line.\n" "\n" "This is the last paragraph." "\n" ) assert unindent_and_unwrap_rationale(rationale) == expected_rationale
<filename>src/main/java/de/clashsoft/gentreesrc/codegen/Generator.java package de.clashsoft.gentreesrc.codegen; import de.clashsoft.gentreesrc.tool.Config; import de.clashsoft.gentreesrc.tree.DefinitionFile; import de.clashsoft.gentreesrc.tree.decl.TypeDecl; import org.stringtemplate.v4.AutoIndentWriter; import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupFile; import java.io.*; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.TreeSet; public class Generator { // =============== Fields =============== private final Config config; private final Map<String, String> importMap; private final STGroup treeGroup; // =============== Constructors =============== private Generator(Config config, Map<String, String> importMap, STGroup treeGroup) { this.config = config; this.importMap = importMap; this.treeGroup = treeGroup; } // =============== Static Methods =============== public static void generate(Config config, DefinitionFile definitionFile, Set<File> generatedFiles) throws IOException { // tree group final STGroup treeGroup = new STGroupFile(Generator.class.getResource(config.getLanguage() + ".stg")); treeGroup.registerRenderer(String.class, new StringRenderer()); // import map final Map<String, String> importMap = new HashMap<>(); ImportHelper.collectImportMap(definitionFile, importMap); // generate final Generator generator = new Generator(config, importMap, treeGroup); for (TypeDecl decl : definitionFile.getDeclarations()) { generator.generate(decl, generatedFiles); } } // =============== Methods =============== private void generate(TypeDecl decl, Set<File> generatedFiles) throws IOException { for (TypeDecl subDecl : decl.getSubTypes()) { this.generate(subDecl, generatedFiles); } if (decl.getAttributes().isImport()) { // type declarations marked 'import' do not generate any code return; } // imports final Set<String> imports = new TreeSet<>(); ImportHelper.collectImports(this.importMap, decl, imports); // target file final String fileName = this.treeGroup.getInstanceOf("fileName").add("typeDecl", decl).render(); final File file = new File(this.config.getOutputDir(), fileName); //noinspection ResultOfMethodCallIgnored file.getParentFile().mkdirs(); // main class final ST treeClass = this.treeGroup.getInstanceOf("treeClass"); treeClass.add("config", this.config); treeClass.add("typeDecl", decl); treeClass.add("imports", imports); try (Writer writer = new BufferedWriter(new FileWriter(file))) { // using write with a FileWriter is better than rendering to a String and then writing that, // because it does not require materializing the entire text in memory treeClass.write(new AutoIndentWriter(writer)); } generatedFiles.add(file); } }
def update_cell(self, src_path, update: bool = False) -> None: pdk = get_active_pdk() filepath = pathlib.Path(src_path) cell_name = filepath.stem.split(".")[0] function = partial(from_yaml, filepath) try: pdk.register_cells_yaml(**{cell_name: function}, update=update) except ValueError as e: print(e)
def variables(self) -> Set[str]: root = self.root vars = set() if is_variable(root): vars = {root} elif is_function(root): for arg in self.arguments: vars.update(arg.variables()) return vars
// Finds the minimum integer in an array of ints and returns it. private int min(int[] arr) { int min = Integer.MAX_VALUE; for(int i = 0; i < arr.length; ++i) { if(arr[i] < min) min = arr[i]; } return min; }
High unemployment numbers may be good for Republicans in the next election, which makes it disturbing that Republican leaders have blocked any discussion of stimulus policies that might succeed in putting people back to work. In fact, all job-creating proposals that involve spending money are considered verboten among both parties, because Republicans have cowed Democrats with the argument that the 2009 stimulus bill was an irredeemable failure and the deficit is causing unemployment. If Republicans are as deeply concerned about the 13.9 million out-of-work people as they claim to be, they might have offered ideas of their own that have some possibility of creating jobs. Instead, they have been chanting the same tired and discredited mantras the party has offered since the 1980s: huge tax cuts, huge cuts in safety-net spending, the clear-cutting of regulations, and the inevitable balanced-budget amendment. The latest example is the chimerical economic plan put forward on Tuesday by Tim Pawlenty, the former Minnesota governor, who at least until this speech was considered one of the more reasonable of the suitors for the Republican presidential nomination. Mr. Pawlenty went much further right in proposing to slash government than even the House Republicans or most of the other candidates. The danger is that the race becomes a Bunyan-esque contest between tax cutters, with the public lulled by the false belief that the current tax rates (already low) are somehow inhibiting hiring. Advertisement Continue reading the main story Mr. Pawlenty proposed getting rid of the capital gains tax — not cutting it, like Republicans of yore. He would also eliminate the taxes on interest, dividends, and inheritance. Though businesses are flush with cash, he would more than halve their taxes, and cut the top individual rate to 25 percent. This would magically “unleash the creative energy of America’s businesses, families and individuals,” he said, promising a booming job market and a decade of 5 percent yearly growth. Newsletter Sign Up Continue reading the main story Please verify you're not a robot by clicking the box. Invalid email address. Please re-enter. You must select a newsletter to subscribe to. Sign Up You will receive emails containing news content , updates and promotions from The New York Times. You may opt-out at any time. You agree to receive occasional updates and special offers for The New York Times's products and services. Thank you for subscribing. An error has occurred. Please try again later. View all New York Times newsletters. That is a preposterous target number, beyond the imagination of economists who can barely envision 3 percent growth. It is particularly unimaginable because of the giant deficits the vanishing tax money would create, as the ripples from blown-up government programs washed through state and city economies. Mr. Pawlenty predicts that the growth from lower taxes would wipe out the deficit, an idea discredited since the Reagan years.
Consensus Molecular Subtypes of Colorectal Cancer and their Clinical Implications. The colorectal cancer (CRC) Subtyping Consortium has unified six independent molecular classification systems, based on gene expression data, into a single consensus system with four distinct groups, known as the Consensus Molecular Subtypes (CMS); clinical implications are discussed in this review. This article is based on a literature review relevant to the CMS of CRC indexed in PubMed (US National Library of Medicine) as well as the authors' own published data. The CMS were determined and correlated with epigenomic, transcriptomic, microenvironmental, genetic, prognostic and clinical characteristics. The CMS1 subtype is immunogenic and hypermutated. CMS2 tumors are activated by the WNT-β-catenin pathway and have the highest overall survival. CMS3 feature a metabolic cancer phenotype and CMS4 cancers have the worst survival and have a strong stromal gene signature. The Consensus Molecular Subtypes of CRC may better inform clinicians of prognosis, therapeutic response, and potential novel therapeutic strategies.
<gh_stars>10-100 #include <Core/Geometry/IndexedGeometry.hpp> #include <iterator> namespace Ra { namespace Core { namespace Geometry { MultiIndexedGeometry::MultiIndexedGeometry( const MultiIndexedGeometry& other ) : AttribArrayGeometry( other ) { deepCopy( other ); } MultiIndexedGeometry::MultiIndexedGeometry( MultiIndexedGeometry&& other ) : AttribArrayGeometry( std::move( other ) ), m_indices( std::move( other.m_indices ) ) {} MultiIndexedGeometry::MultiIndexedGeometry( const AttribArrayGeometry& other ) : AttribArrayGeometry( other ) {} MultiIndexedGeometry::MultiIndexedGeometry( AttribArrayGeometry&& other ) : AttribArrayGeometry( std::move( other ) ) {} MultiIndexedGeometry& MultiIndexedGeometry::operator=( const MultiIndexedGeometry& other ) { invalidateAabb(); AttribArrayGeometry::operator=( other ); deepCopy( other ); notify(); return *this; } MultiIndexedGeometry& MultiIndexedGeometry::operator=( MultiIndexedGeometry&& other ) { invalidateAabb(); AttribArrayGeometry::operator=( std::move( other ) ); m_indices = std::move( other.m_indices ); notify(); return *this; } void MultiIndexedGeometry::clear() { invalidateAabb(); AttribArrayGeometry::clear(); deepClear(); notify(); } void MultiIndexedGeometry::copy( const MultiIndexedGeometry& other ) { invalidateAabb(); AttribArrayGeometry::copyBaseGeometry( other ); deepCopy( other ); notify(); } /// \todo Implement MultiIndexedGeometry::checkConsistency void MultiIndexedGeometry::checkConsistency() const { #ifdef CORE_DEBUG #endif } bool MultiIndexedGeometry::append( const MultiIndexedGeometry& other ) { bool dataHasBeenCopied = false; for ( const auto& [key, value] : other.m_indices ) { auto it = m_indices.find( key ); if ( it == m_indices.end() ) // copy entire layer { m_indices[key] = value; dataHasBeenCopied = true; } else { // try to append to an existing layer: should always work if ( it->second.second->append( *( value.second ) ) ) { dataHasBeenCopied = true; } else { CORE_ASSERT( false, "Inconsistency: layers with different semantics shares the same key" ); } } } if ( dataHasBeenCopied ) { invalidateAabb(); notify(); } return true; } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// bool MultiIndexedGeometry::containsLayer( const LayerSemantic& semanticName ) const { for ( const auto& [key, value] : m_indices ) { if ( key.first.find( semanticName ) != key.first.end() ) return true; } return false; } bool MultiIndexedGeometry::containsLayer( const LayerSemanticCollection& semantics ) const { for ( const auto& [key, value] : m_indices ) { if ( key.first == semantics ) return true; } return false; } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// size_t MultiIndexedGeometry::countLayers( const LayerSemantic& semanticName ) const { size_t c = 0; for ( const auto& [key, value] : m_indices ) { if ( key.first.find( semanticName ) != key.first.end() ) ++c; } return c; } size_t MultiIndexedGeometry::countLayers( const LayerSemanticCollection& semantics ) const { size_t c = 0; for ( const auto& [key, value] : m_indices ) { if ( key.first == semantics ) ++c; } return c; } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// std::pair<MultiIndexedGeometry::LayerKeyType, const GeometryIndexLayerBase&> MultiIndexedGeometry::getFirstLayerOccurrence( const LayerSemantic& semanticName ) const { for ( const auto& [key, value] : m_indices ) { if ( key.first.find( semanticName ) != key.first.end() ) return {key, *( value.second )}; } throw std::out_of_range( "Layer entry not found" ); } std::pair<MultiIndexedGeometry::LayerKeyType, const GeometryIndexLayerBase&> MultiIndexedGeometry::getFirstLayerOccurrence( const LayerSemanticCollection& semantics ) const { for ( const auto& [key, value] : m_indices ) { if ( key.first == semantics ) return {key, *( value.second )}; } throw std::out_of_range( "Layer entry not found" ); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// std::pair<MultiIndexedGeometry::LayerKeyType, GeometryIndexLayerBase&> MultiIndexedGeometry::getFirstLayerOccurrenceWithLock( const LayerSemantic& semanticName ) { for ( auto& [key, value] : m_indices ) { if ( key.first.find( semanticName ) != key.first.end() ) { CORE_ASSERT( !value.first, "try to get already locked layer" ); value.first = true; return {key, *( value.second )}; } } throw std::out_of_range( "Layer entry not found" ); } std::pair<MultiIndexedGeometry::LayerKeyType, GeometryIndexLayerBase&> MultiIndexedGeometry::getFirstLayerOccurrenceWithLock( const LayerSemanticCollection& semantics ) { for ( auto& [key, value] : m_indices ) { if ( key.first == semantics ) { CORE_ASSERT( !value.first, "try to get already locked layer" ); value.first = true; return {key, *( value.second )}; } } throw std::out_of_range( "Layer entry not found" ); } GeometryIndexLayerBase& MultiIndexedGeometry::getLayerWithLock( const LayerKeyType& layerKey ) { auto& p = m_indices.at( layerKey ); CORE_ASSERT( !p.first, "try to get already locked layer" ); p.first = true; return *( p.second ); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// void MultiIndexedGeometry::unlockFirstLayerOccurrence( const LayerSemantic& semanticName ) { for ( auto& [key, value] : m_indices ) { if ( key.first.find( semanticName ) != key.first.end() ) { CORE_ASSERT( value.first, "try to release unlocked layer" ); value.first = false; notify(); return; } } throw std::out_of_range( "Layer entry not found" ); } void MultiIndexedGeometry::unlockFirstLayerOccurrence( const LayerSemanticCollection& semantics ) { for ( auto& [key, value] : m_indices ) { if ( key.first == semantics ) { CORE_ASSERT( value.first, "try to release unlocked layer" ); value.first = false; notify(); return; } } throw std::out_of_range( "Layer entry not found" ); } void MultiIndexedGeometry::unlockLayer( const LayerKeyType& layerKey ) { auto& p = m_indices.at( layerKey ); CORE_ASSERT( p.first, "try to release unlocked layer" ); p.first = true; notify(); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// bool MultiIndexedGeometry::addLayer( std::unique_ptr<GeometryIndexLayerBase>&& layer, const std::string& layerName ) { LayerKeyType key {layer->semantics(), layerName}; if ( m_indices.find( key ) != m_indices.end() ) return false; m_indices.insert( {key, std::make_pair( false, layer.release() )} ); notify(); return true; } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// void MultiIndexedGeometry::deepCopy( const MultiIndexedGeometry& other ) { m_indices = other.m_indices; for ( auto& el : m_indices ) el.second.second = el.second.second->clone(); // replace copied entries by duplicates } void MultiIndexedGeometry::deepClear() { for ( auto& el : m_indices ) delete el.second.second; m_indices.clear(); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// std::size_t MultiIndexedGeometry::KeyHash::operator()( const LayerKeyType& k ) const { // Mix semantic collection into a single identifier string std::ostringstream stream; std::copy( k.first.begin(), k.first.end(), std::ostream_iterator<std::string>( stream, "" ) ); std::string result = stream.str(); std::sort( result.begin(), result.end() ); // Combine with layer name hash return std::hash<std::string> {}( result ) ^ ( std::hash<std::string> {}( k.second ) << 1 ); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// void PointCloudIndexLayer::linearIndices( const AttribArrayGeometry& attr ) { auto nbVert = attr.vertices().size(); collection().resize( nbVert ); collection().getMap() = IndexContainerType::Matrix::LinSpaced( nbVert, 0, nbVert - 1 ); } } // namespace Geometry } // namespace Core } // namespace Ra
<filename>src/cookie/protocol/network/types/HumanInformations.ts import ActorRestrictionsInformations from "./ActorRestrictionsInformations"; import HumanOption from "./HumanOption"; export default class HumanInformations { public options: HumanOption[]; public restrictions: ActorRestrictionsInformations; public sex: boolean; constructor(restrictions: ActorRestrictionsInformations = null, sex = false, options: HumanOption[] = null) { this.restrictions = restrictions; this.sex = sex; this.options = options; } }
/* * Sets scaleY and returns value indicating success. */ int CSGScale::setScaleY(double scaleY) { mScaleY = scaleY; mIsSetScaleY = true; return LIBSBML_OPERATION_SUCCESS; }
def __get_possible_route_segments(graph, path): edge_data = lambda a, b: graph.get_edge_data(a, b).values() routes_between = lambda a, b: map(lambda x: x['route'], edge_data(a, b)) make_journey_segment = lambda a, b: (a, set(routes_between(a, b))) return zip_with(make_journey_segment, path, tail(path))
/** * Simple ByteBuffer pool used by SSLHandler. * ByteBuffers are by default allocated as direct byte buffers. To use non-direct * ByteBuffers, set system property mina.sslfilter.directbuffer to false. * * @author The Apache Directory Project ([email protected]) * @version $Rev$, $Date$ */ class SSLByteBufferPool { private static final int PACKET_BUFFER_INDEX = 0; private static final int APPLICATION_BUFFER_INDEX = 1; private static boolean initiated = false; private static final String DIRECT_MEMORY_PROP = "mina.sslfilter.directbuffer"; private static boolean useDirectAllocatedBuffers = true; private static int packetBufferSize; private static int appBufferSize; private static int[] bufferStackSizes; private static final Stack[] bufferStacks = new Stack[] { new Stack(), new Stack(), }; /** * Initiate buffer pool and buffer sizes from SSLEngine session. * * @param sslEngine SSLEngine */ static synchronized void initiate(SSLEngine sslEngine) { if (!initiated) { // Use direct allocated memory or not? String prop = System.getProperty(DIRECT_MEMORY_PROP); if (prop != null) { useDirectAllocatedBuffers = Boolean .getBoolean(DIRECT_MEMORY_PROP); } // init buffer sizes from SSLEngine packetBufferSize = sslEngine.getSession().getPacketBufferSize(); // application buffer size has been doubled because SSLEngine // returns BUFFER_OVERFLOW even if there is enough room for the buffer. // So for now we use a size double the packet size as a workaround. appBufferSize = packetBufferSize * 2; initiateBufferStacks(); initiated = true; } } /** * Get bytebuffer with size the size of the largest SSL/TLS packet that may occur * (as defined by SSLSession). */ static ByteBuffer getPacketBuffer() { if (!initiated) { throw new IllegalStateException("Not initialized"); } return allocate(PACKET_BUFFER_INDEX); } /** * Get ByteBuffer with the size of the largest application buffer that may occur * (as defined by SSLSession). */ static ByteBuffer getApplicationBuffer() { if (!initiated) { throw new IllegalStateException("Not initialized"); } return allocate(APPLICATION_BUFFER_INDEX); } /** * Allocate or get the buffer which is capable of the specified size. */ private static ByteBuffer allocate(int idx) { Stack stack = bufferStacks[idx]; ByteBuffer buf; synchronized (stack) { buf = (ByteBuffer) stack.pop(); if (buf == null) { buf = createBuffer(bufferStackSizes[idx]); } } buf.clear(); return buf; } /** * Releases the specified buffer to buffer pool. */ public static void release(ByteBuffer buf) { // Sweep buffer for security. org.apache.mina.common.ByteBuffer.wrap(buf).sweep().release(); int stackIndex = getBufferStackIndex(buf.capacity()); if (stackIndex >= PACKET_BUFFER_INDEX) { Stack stack = bufferStacks[getBufferStackIndex(buf.capacity())]; synchronized (stack) { stack.push(buf); } } } /** * Expand size of provided buffer * @param buf buffer to be expande * @param newCapacity new capacity */ public static ByteBuffer expandBuffer(ByteBuffer buf, int newCapacity) { ByteBuffer newBuf = createBuffer(newCapacity); buf.flip(); newBuf.put(buf); release(buf); return newBuf; } private static void initiateBufferStacks() { bufferStackSizes = new int[2]; bufferStackSizes[PACKET_BUFFER_INDEX] = packetBufferSize; bufferStackSizes[APPLICATION_BUFFER_INDEX] = appBufferSize; } private static int getBufferStackIndex(int size) { if (size == packetBufferSize) return PACKET_BUFFER_INDEX; if (size == appBufferSize) return APPLICATION_BUFFER_INDEX; return -1; // not reused } private static ByteBuffer createBuffer(int capacity) { if (useDirectAllocatedBuffers) { try { return ByteBuffer.allocateDirect(capacity); } catch (OutOfMemoryError e) { useDirectAllocatedBuffers = false; System.err .println("OutOfMemoryError: No more direct buffers available; trying heap buffer instead"); } } return ByteBuffer.allocate(capacity); } }
<reponame>desmond-wang/Dythm package troid.dythm.game.achievement; import troid.dythm.game.profile.Profile; public class OneHundredPlays implements Achievement { @Override public String getName() { return "100 Plays"; } @Override public String getDescription() { return "Complete 100 songs"; } @Override public boolean isAchieved(Profile profile) { return profile.getPlayCount() >= 100; } }
// // If the input file is in Utf8 format, then the first three // characters are a marker that indicates this. // int ByteOrderMarkSize(const char *cursor) { return (cursor[0] == (char) 0xEF && cursor[1] == (char) 0xBB && cursor[2] == (char) 0xBF ? 3 : 0); }
//Creating A Pair Class For Head For Tracking Head And Tail class Pair{ public: Node *head = NULL; Node *tail = NULL; }
class SuperOpportunityFinder: """Super Opportunity Finder.""" def __init__( self, exchanges, collections, name=True, opportunity_id=0, get_usd_rates=False, opportunity_interval=0.05, ): """Initialize. SuperOpportunityFinder, given a dict of collections, yields opportunities in the order they come. There is not enough overlap between SuperOpportunityFinder and OpportunityFinder to warrant inheritance. The sometimes-odd structure of this class is to ensure that connections to exchanges' servers are closed. It is structured so because Python's pass-by-object reference can lead to new instances of exchanges (with unclosed connections). :param exchanges: A list of exchanges, either ccxt.Exchange objects or names of exchanges :param collections: A dict of collections, as returned by CollectionBuilder in async_build_markets.py. The self.collections field will be a Collections object. :param name: True if exchanges is a list of strings, False if it is a list of ccxt.Exchange objects """ self.adapter = FormatForLogAdapter( logging.getLogger( "peregrinearb.async_find_opportunities.SuperOpportunityFinder" ) ) self.adapter.debug("Initializing SuperOpportunityFinder") if name: self.exchanges = {e: getattr(ccxt, e)() for e in exchanges} else: self.exchanges = {e.id: e for e in exchanges} self.collections = Collections(collections) self.adapter.debug("Initialized SuperOpportunityFinder") self.rate_limited_exchanges = set() self._find_opportunity_calls = -1 # starting opportunity id for logging self.opportunity_id = opportunity_id self.usd_rates = {} self.get_usd_rates = get_usd_rates self.opportunity_interval = opportunity_interval async def get_opportunities( self, price_markets=None, close=True, ): """Get opportunities. :param price_markets: Optional. If you would like to first return the prices for the markets in price_markets and the corresponding opportunities before finding other opportunities. Example value is ['BTC/USD, BTC/USDT, ETH/USD, ETH/USDT] For markets in price_markets, return a 2-tuple of (opportunity, prices). Read docstring of _find_opportunity for more information. """ self.adapter.info("Finding inter-exchange opportunities.") # If you would like to first return the prices for the markets in price_markets and the corresponding # opportunities before finding other opportunities if price_markets is not None: collections = self.collections # First collects the prices for the markets in price_markets tasks = [] for market in price_markets: tasks.append( self._find_opportunity(market, self.collections[market], True) ) # todo: add if market in price_markets to _find_opportunity. deleting from collections is bad. del collections[market] for result in asyncio.as_completed(tasks): yield await result else: collections = self.collections tasks = [ self._find_opportunity(market_name, exchange_list) for market_name, exchange_list in collections.items() ] for result in asyncio.as_completed(tasks): yield await result if close: tasks = [e.close() for e in self.exchanges.values()] await asyncio.wait(tasks) self.adapter.info("Yielded all inter-exchange opportunities.") async def _find_opportunity(self, market_name, exchange_list, return_prices=False): """Find opportunity. :param return_prices: If True, returns a two-tuple where the first element is the opportunity dict and the second element is a dict keyed by exchange name in exchange_list and valued with the corresponding price of market_name. If False, returns the opportunity dict """ self._find_opportunity_calls += 1 self.opportunity_id += 1 current_opp_id = self.opportunity_id await asyncio.sleep(self.opportunity_interval * self._find_opportunity_calls) # Try again in 100 milliseconds if any of the exchanges in exchange_list are currently rate limited. for e in exchange_list: if e in self.rate_limited_exchanges: self.adapter.info( "Sleeping asynchronously because exchange was rate limited", exchange=e, sleeptime=0.1, ) await asyncio.sleep(0.1) return await self._find_opportunity(market_name, exchange_list) if return_prices: prices = {} self.adapter.info( "Finding opportunity", opportunity=current_opp_id, market=market_name, ) opportunity = { "highest_bid": {"price": -1, "exchange": None, "volume": 0}, "lowest_ask": {"price": float("Inf"), "exchange": None, "volume": 0}, "ticker": market_name, "datetime": datetime.datetime.now(tz=datetime.timezone.utc), "id": current_opp_id, } tasks = [ self._exchange_fetch_order_book(exchange_name, market_name, current_opp_id) for exchange_name in exchange_list ] for res in asyncio.as_completed(tasks): order_book, exchange_name = await res # If the order book's volume was too low or fetch_ticker raised ExchangeError or ExchangeNotAvailable if exchange_name is None: continue # If ticker is None, that means that there was either a RequestTimeout or DDosProtection error. if order_book is None: self.rate_limited_exchanges.add(exchange_name) await asyncio.sleep(0.2) # Because of asynchronicity, error.exchange_name may no longer be in self.rate_limited_exchanges if exchange_name in self.rate_limited_exchanges: self.rate_limited_exchanges.remove(exchange_name) if market_name in self.collections: # self.collections[market_name] instead of exchange_list because an exchange is removed if it # raised ExchangeError, which signals that the exchange no longer supports the specified market return await self._find_opportunity( market_name, self.collections[market_name] ) # edge case: if it was removed because there were only two exchanges for this market and one of them # was removed because it no longer supports this market. likely will happen only with very low-volume # and exotic markets else: return opportunity bid = order_book["bids"][0][0] ask = order_book["asks"][0][0] if return_prices: prices[exchange_name] = ask if bid > opportunity["highest_bid"]["price"]: opportunity["highest_bid"]["price"] = bid opportunity["highest_bid"]["exchange"] = exchange_name opportunity["highest_bid"]["volume"] = order_book["bids"][0][1] if ask < opportunity["lowest_ask"]["price"]: opportunity["lowest_ask"]["price"] = ask opportunity["lowest_ask"]["exchange"] = exchange_name opportunity["lowest_ask"]["volume"] = order_book["asks"][0][1] self.adapter.info( "Found opportunity", opportunity=current_opp_id, market=market_name ) if return_prices: return opportunity, prices return opportunity async def _exchange_fetch_order_book( self, exchange_name, market_name, current_opp_id ): """Exchange Fetch Order Book. Returns a two-tuple structured as (ticker, exchange_name) ticker is None if an order book with bids and asks was not returned by fetch_order_book exchange_name is None if an unavoidable error was raised or the order book has no bids or no asks """ self.adapter.debug( "Fetching ticker", opportunity=current_opp_id, exchange=exchange_name, market=market_name, ) try: order_book = await self.exchanges[exchange_name].fetch_order_book( market_name ) except ccxt.DDoSProtection: self.adapter.warning( "Rate limited for inter-exchange opportunity", opportunity=current_opp_id, exchange=exchange_name, market=market_name, ) return None, exchange_name except ccxt.RequestTimeout: self.adapter.warning( "Request timeout for inter-exchange opportunity.", opportunity=current_opp_id, exchange=exchange_name, market=market_name, ) return None, exchange_name # If the exchange no longer has the specified market except ccxt.ExchangeError: self.adapter.warning( "Fetching ticker raised an ExchangeError.", opportunity=current_opp_id, exchange=exchange_name, market=market_name, ) self.adapter.info( "Removing exchange from market", exchange=exchange_name, market=market_name, opportunity=current_opp_id, ) self.collections.remove_exchange_from_market(exchange_name, market_name) return None, None except ccxt.ExchangeNotAvailable: self.adapter.warning( "Fetching ticker raised an ExchangeNotAvailable error.", opportunity=current_opp_id, exchange=exchange_name, market=market_name, ) return None, None if order_book["bids"] == [] or order_book["asks"] == []: self.adapter.debug( "No asks or no bids", exchange=exchange_name, market=market_name ) return None, None if self.get_usd_rates: cap_currency_index = market_name.find("USD") # if self.cap_currency is the quote currency if cap_currency_index > 0: self._add_to_rates_dict( exchange_name, market_name, order_book["bids"][0][0] ) self.adapter.debug( "Fetched ticker", opportunity=current_opp_id, exchange=exchange_name, market=market_name, ) return order_book, exchange_name def _add_to_rates_dict(self, exchange_name, market_name, price): if exchange_name in self.usd_rates: self.usd_rates[exchange_name][market_name] = price else: self.usd_rates[exchange_name] = {market_name: price}
2015 Australian Silver and Gold Coins for October October releases from the Perth Mint of Australia feature several new gold, silver and platinum coins that showcase a variety of themes. They include Year of the Monkey coins, a two-ounce gold high relief kangaroo coin, a star-shaped coin, a maps-shaped coin, a five-ounce high relief koala silver coin and a platinum Australian Wedge-Tailed Eagle coin. Brief information on these and other new coins from the Perth Mint is offered below. 2016 Australian Lunar Year of the Monkey Gold and Silver Coins The Perth Mint’s Australian Lunar gold and silver programs continue with 2016-dated Year of the Monkey coins. New series releases include one-ounce, one-quarter ounce and one-tenth ounce gold coins; one-ounce and one-half ounce silver coins; and a four-coin silver typeset collection. The typeset collection has a proof, a gilded, a colored and bullion finish coins — all in one-ounce sizes. Each gold coin shows a single adult monkey sitting on the branch of a tree. The silver coins offer an adult monkey and baby monkey sitting in a peach tree. Mintages for the gold coins vary from 3,000 to 5,000 with 10,000 for each color silver coin. The limit for the typeset collection is capped at 1,500. Gold Coins Silver Coins Silver Typeset Collection 2016 Five Blessings 1 oz Silver Coin Chinese legend says that when five bats are pictured together, they are a visual representation of the Five Blessings which include longevity, wealth, health and composure, virtue, and the desire to die a natural death in old age. The 2016 Five Blessings 1 oz Silver Coin is struck as Australian legal tender with a face value of 1 dollar. Its mintage is capped at 10,000. Affiliate Link to Product Page 2015 Christmas Star Shaped 1 oz Silver Proof Coin Innovative in shape, the Perth Mint released the 2015 Christmas 1 oz Star Shaped Silver Proof Coin. As indicated by the name and in keeping with its theme of Christmas, each is in the shape of a star. Appearing on the reverse is a colorful Christmas tree with a shining star at its top. The tree stands against a backdrop of a forest. No more than 5,000 of these coins. Each is housed in a special star-shaped capsule that can be hung as a Christmas tree decoration. Affiliate Link to Product Page 2015 Kangaroo High Relief 2 oz Gold Proof Coin The two-ounce 2015 Australian Kangaroo High Relief Gold Proof Coin has a very limited mintage of 250. Its reverse offers a high relief image of a single adult kangaroo as its hops across the Australian landscape at dusk. This gold coin comes housed in a Perth Mint wooden display case. An illustrated shipper and a certificate of authenticity is also included. Affiliate Link to Product Page 2015 Wedge-Tailed Eagle 1 oz Platinum Proof Coin Former United States Mint Chief Engraver John M. Mercanti designed the image of the Wedge-Tailed Eagle as seen on this new platinum proof coin. It depicts the eagle with its wings spread as it is about to land on a tree branch. Mintage for the coin is limited to 500, and each is composed from one-ounce of 99.95% pure platinum. A gray display case and numbered certificate of authenticity is included with each purchase. Affiliate Link to Product Page 2015 Australian Koala 5 oz High Relief Silver Coin The 2015 Australian Koala 5 oz Silver Proof High Relief Coin is composed of five ounces of 99.9% pure silver. Depicted on its reverse is a single adult koala perched in the branch of an eucalyptus tree. Behind the marsupial, a traditional rural landscape appears. Mintage for the coin is set at 5,000. Affiliate Link to Product Page 2016 Southern Cassowary 1 oz Silver Proof Coin The Perth Mint’s Endangered and Extinct series continues with the one-ounce 2016 Southern Cassowary Coin. Depicted on the reverse is the likeness of a Southern Cassowary, which is a large flightless bird indigenous to Indonesia, New Guinea and parts of Australia. Previous program issues depicting the Tasmanian Tiger, the Tasmanian Wedge-Tailed Eagle and the Tasmanian Devil all sold out. Affiliate Link to Product Page 2015 Australian Map Shaped Redback Spider 1 oz Silver Coin Continuing the Perth Mint’s popular map-shaped series, the 2015 Redback Spider 1 oz Silver Coin offers a reverse design of a Redback spider as it crawls across the face of a rock. Each 99.9% pure coin is struck in the shape of the Australian continent. Mintage for this release is limited to 6,000. Each includes a Perth Mint display case, an illustrated shipper and a numbered certificate of authenticity. Affiliate Link to Product Page
<filename>customer/back-end server/accounts/api/serializers.py from rest_framework.serializers import ModelSerializer from accounts.models import Account class ProfileSerializer(ModelSerializer): class Meta: model = Account fields = [ 'id', 'first_name', 'last_name', 'username', 'email', 'country', 'image', 'region', 'address', 'phone_number', 'added', 'updated' ]
#include "default_logger.h" void DefaultLogger::log(const char *level, const char *file, int line, const char *fmt, va_list args) { int n; char buf[kMaxLogBufSize] = {'\0'}; n = snprintf(buf, kMaxLogBufSize, "[%s %s:%d]", level, file, line); n += vsnprintf(buf + n, kMaxLogBufSize - n, fmt, args); buf[n++] += '\n'; buf[n++] += '\0'; fprintf(stdout, "%s", buf); fflush(stdout); } DefaultLogger kDefaultLogger;
package mocks import "github.com/sclevine/agouti/api" type WebDriver struct { OpenCall struct { Desired map[string]interface{} ReturnSession *api.Session Err error } StartCall struct { Called bool Err error } StopCall struct { Called bool Err error } } func (w *WebDriver) Open(desired map[string]interface{}) (*api.Session, error) { return w.OpenCall.ReturnSession, w.OpenCall.Err } func (w *WebDriver) Start() error { w.StartCall.Called = true return w.StartCall.Err } func (w *WebDriver) Stop() error { w.StopCall.Called = true return w.StopCall.Err }
<gh_stars>0 import {faArrowRight} from '@fortawesome/free-solid-svg-icons'; import {FontAwesomeIcon as FAIcon} from '@fortawesome/react-fontawesome'; import {FC, ReactNode} from 'react'; import style from './Summary.module.scss'; import {ScrollLink} from './navbar'; interface Props { items: {label: string, value: ReactNode}[] actionLabel: string; actionTo: string; } export const Summary: FC<Props> = ({items, actionLabel, actionTo}) => ( <div className={style.main}> <dl className={style.list}> {items.map(({label, value}) => ( <div key={label} className={style.item}> <dt className={style.label}>{label}</dt> <dd className={style.value}>{value}</dd> </div> ))} </dl> <ScrollLink to={actionTo} className={style.action}> {actionLabel} <FAIcon icon={faArrowRight} className={style.arrow} /> </ScrollLink> </div> );
<reponame>balcieren/erenbalci.com import clsx from "clsx"; import { FC, ReactNode } from "react"; export type SectionLayoutProps = { title: string; id: string; children: ReactNode; titleClassName?: string; className?: string; }; export const SectionLayout: FC<SectionLayoutProps> = ({ id, title, children, titleClassName, className, }) => { return ( <div id={id} className={clsx("my-6 flex flex-col gap-8", className)}> <h4 className={clsx( "text-4xl font-extrabold underline decoration-blue-400 dark:text-white", titleClassName )} > {title} </h4> {children} </div> ); };
import os import argparse import pandas as pd import numpy as np import sys import json DEFAULT_PROJECT_REPO = os.path.sep.join(__file__.split(os.path.sep)[:-2]) PROJECT_REPO_DIR = os.path.abspath( os.environ.get('PROJECT_REPO_DIR', DEFAULT_PROJECT_REPO)) sys.path.append(os.path.join(PROJECT_REPO_DIR, 'src')) from feature_transformation import (parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols) def merge_data_dicts(data_dicts_list): # get a single consolidated data dict for all features and another for outcomes # combine all the labs, demographics and vitals jsons into a single json features_data_dict = dict() features_data_dict['schema']= dict() features_dict_merged = [] for data_dict in data_dicts_list: features_dict_merged += data_dict['schema']['fields'] feat_names = list() features_data_dict['schema']['fields'] = [] for feat_dict in features_dict_merged: if feat_dict['name'] not in feat_names: features_data_dict['schema']['fields'].append(feat_dict) feat_names.append(feat_dict['name']) return features_data_dict def get_all_features_data(labs_df, labs_data_dict, vitals_df, vitals_data_dict, demographics_df, demographics_data_dict): '''Returns the merged labs, vitals and demographics features into a single table and the data dict''' time_col = parse_time_col(vitals_data_dict) id_cols = parse_id_cols(vitals_data_dict) # merge the labs and vitals highfreq_df = pd.merge(vitals_df, labs_df, on=id_cols +[time_col], how='outer') highfreq_data_dict = merge_data_dicts([labs_data_dict, vitals_data_dict]) highfreq_data_dict['fields'] = highfreq_data_dict['schema']['fields'] cols_to_keep = parse_id_cols(highfreq_data_dict) + [parse_time_col(highfreq_data_dict)] + parse_feature_cols(highfreq_data_dict) highfreq_df = highfreq_df[cols_to_keep].copy() # merge the highfrequency features with the static features features_df = pd.merge(highfreq_df, demographics_df, on=id_cols, how='inner') features_data_dict = merge_data_dicts([highfreq_data_dict, demographics_data_dict]) features_data_dict['fields'] = features_data_dict['schema']['fields'] return features_df, features_data_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--collapsed_tslice_folder', type=str, help='folder where collapsed features from each tslice are stored') parser.add_argument('--tslice_folder', type=str, help='folder where raw features and static features from each tslice are stored') parser.add_argument('--tslice_list', type=str, help='list of all the tslices used for training the classifier') parser.add_argument('--static_data_dict_dir', type=str, help='directory where data dict for demographics and outcomes') parser.add_argument('--output_dir', type=str, help='folder to save merged features and outcomes from all tslices') args = parser.parse_args() # get all the collapsed labs, collapsed vitals, demographics and outcomes data dicts with open(os.path.join(args.static_data_dict_dir, 'Spec-Demographics.json'), 'r') as f1: demographics_data_dict = json.load(f1) demographics_data_dict['fields'] = demographics_data_dict['schema']['fields'] with open(os.path.join(args.static_data_dict_dir, 'Spec-Outcomes_TransferToICU.json'), 'r') as f2: outcomes_data_dict = json.load(f2) id_cols = parse_id_cols(demographics_data_dict) # get all the collapsed labs, collapsed vitals, demographics and outcomes in all the tslice folders print('Merging collapsed vitals, collapsed labs, demographics and outcomes in all the tslice folders = %s into a single features table and a single outcomes table...'%args.tslice_list) features_df_all_slices_list = list() outcomes_df_all_slices_list = list() mews_df_all_slices_list = list() for tslice in args.tslice_list.split(' '): curr_tslice_folder = args.tslice_folder+tslice curr_collapsed_tslice_folder = args.collapsed_tslice_folder+tslice collapsed_labs_df = pd.read_csv(os.path.join(curr_collapsed_tslice_folder, 'CollapsedLabsPerSequence.csv')) collapsed_vitals_df = pd.read_csv(os.path.join(curr_collapsed_tslice_folder, 'CollapsedVitalsPerSequence.csv')) demographics_df = pd.read_csv(os.path.join(curr_tslice_folder, 'demographics_before_icu_filtered_%s_hours.csv'%tslice)) mews_df = pd.read_csv(os.path.join(curr_collapsed_tslice_folder, 'MewsScoresPerSequence.csv')) collapsed_vitals_labs_df = pd.merge(collapsed_vitals_df, collapsed_labs_df, on=id_cols, how='inner') # merge the collapsed feaatures and static features in each tslice features_df = pd.merge(collapsed_vitals_labs_df, demographics_df, on=id_cols, how='inner') outcomes_df = pd.read_csv(os.path.join(curr_tslice_folder, 'clinical_deterioration_outcomes_filtered_%s_hours.csv'%tslice)) feature_cols = features_df.columns outcome_cols = outcomes_df.columns mews_cols = mews_df.columns # append fearures from all tslices features_df_all_slices_list.append(features_df.values) outcomes_df_all_slices_list.append(outcomes_df.values) mews_df_all_slices_list.append(mews_df.values) del collapsed_vitals_labs_df features_df_all_slices = pd.DataFrame(np.concatenate(features_df_all_slices_list), columns=feature_cols) outcomes_df_all_slices = pd.DataFrame(np.concatenate(outcomes_df_all_slices_list), columns=outcome_cols) mews_df_all_slices = pd.DataFrame(np.concatenate(mews_df_all_slices_list), columns=mews_cols) # get collapsed vitals and labs dict print('Merging collapsed vitals, collapsed labs, demographics and outcomes data dicts into a single features data dict and a single outcomes data dict...') with open(os.path.join(curr_collapsed_tslice_folder, 'Spec_CollapsedLabsPerSequence.json'), 'r') as f3: collapsed_labs_data_dict = json.load(f3) with open(os.path.join(curr_collapsed_tslice_folder, 'Spec_CollapsedVitalsPerSequence.json'), 'r') as f4: collapsed_vitals_data_dict = json.load(f4) with open(os.path.join(curr_collapsed_tslice_folder, 'Spec_MewsScoresPerSequence.json'), 'r') as f5: mews_data_dict = json.load(f5) # get a single consolidated data dict for all features and another for outcomes # combine all the labs, demographics and vitals jsons into a single json features_data_dict = dict() features_data_dict['schema']= dict() features_dict_merged = collapsed_labs_data_dict['schema']['fields'] + collapsed_vitals_data_dict['schema']['fields'] + demographics_data_dict['schema']['fields'] feat_names = list() features_data_dict['schema']['fields'] = [] for feat_dict in features_dict_merged: if feat_dict['name'] not in feat_names: features_data_dict['schema']['fields'].append(feat_dict) feat_names.append(feat_dict['name']) # convert the features to numpy float 32 to avoid memory issues feature_cols = parse_feature_cols(features_data_dict['schema']) feature_type_dict = dict.fromkeys(feature_cols) for k in feature_type_dict.keys(): feature_type_dict[k] = np.float32 features_df_all_slices = features_df_all_slices.astype(feature_type_dict) # save to disk features_csv = os.path.join(args.output_dir, 'features.csv') outcomes_csv = os.path.join(args.output_dir, 'outcomes.csv') mews_csv = os.path.join(args.output_dir, 'mews.csv') features_json = os.path.join(args.output_dir, 'Spec_features.json') outcomes_json = os.path.join(args.output_dir, 'Spec_outcomes.json') mews_json = os.path.join(args.output_dir, 'Spec_mews.json') print('saving features and outcomes to :\n%s\n%s\n%s'%(features_csv, outcomes_csv, mews_csv)) features_df_all_slices.to_csv(features_csv, index=False) outcomes_df_all_slices.to_csv(outcomes_csv, index=False) mews_df_all_slices.to_csv(mews_csv, index=False) print('saving features and outcomes dict to :\n%s\n%s\n%s'%(features_json, outcomes_json, mews_json)) with open(features_json, "w") as outfile_feats: json.dump(features_data_dict, outfile_feats) with open(outcomes_json, "w") as outfile_outcomes: json.dump(outcomes_data_dict, outfile_outcomes) with open(mews_json, "w") as outfile_mews: json.dump(mews_data_dict, outfile_mews)
// Validate ensures that license has at least one of type or uri func (l License) Validate() error { if "" == l.Type && "" == l.URI { return fmt.Errorf("license must have at least one of type or uri") } return nil }
/** * @par Detailed Design: * This function is responsible for actually issuing the system call that * will create the slave simulations on the appropriate machines. */ void Trick::MonteCarlo::spawn_slaves() { for (std::vector<MonteSlave *>::size_type i = 0; i < slaves.size(); ++i) { if (slaves[i]->state == MonteSlave::MC_UNINITIALIZED) { initialize_slave(slaves[i]) ; } } }
<filename>src/etc/matrix_product/matrix.h #ifndef __Matrix_ #define __Matrix_ #include <iostream> class Matrix { friend std::istream& operator>>(std::istream& in, Matrix& m); friend std::ostream& operator<<(std::ostream& out, Matrix&& m); friend std::ostream& operator<<(std::ostream& out, Matrix& m); private: int row; int col; int* m; public: Matrix(); Matrix(int r, int c); Matrix(Matrix& ma); Matrix(Matrix&& ma); ~Matrix(); int get(int i, int j); void set(int i, int j, int v); int get_row() { return this->row; } int get_col() { return this->col; } int* operator[](int i) { return this->m + i * this->col; } Matrix& operator=(Matrix& ma); Matrix& operator=(Matrix&& ma); Matrix operator*(Matrix& ma); Matrix operator*(Matrix&& ma); }; #endif
def grp1(df): grp1=df[['person','offer_id']][(df['valid_completed']==1) & (df['event']=='offer completed')].groupby(['person','offer_id']).count().reset_index() return grp1
<reponame>trf2-jus-br/eproc-api package br.jus.trf2.sistemaprocessual; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.sql.Connection; import java.sql.DriverManager; import java.util.Date; import java.util.Scanner; import javax.naming.Context; import javax.naming.InitialContext; import javax.sql.DataSource; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import com.crivano.swaggerservlet.SwaggerServlet; public class Utils { public static Connection getConnection() throws Exception { String dsName = EprocServlet.INSTANCE.getProperty("datasource.name"); if (dsName != null) { Context initContext = new InitialContext(); Context envContext = (Context) initContext.lookup("java:"); DataSource ds = (DataSource) envContext.lookup(dsName); Connection connection = ds.getConnection(); if (connection == null) throw new Exception("Can't open connection to database."); return connection; } else { Connection connection = null; Class.forName("com.mysql.jdbc.Driver"); String dbURL = EprocServlet.INSTANCE.getProperty("datasource.url"); String username = EprocServlet.INSTANCE.getProperty("datasource.username"); ; String password = EprocServlet.INSTANCE.getProperty("datasource.password"); ; connection = DriverManager.getConnection(dbURL, username, password); if (connection == null) throw new Exception("Can't open connection to database."); return connection; } } public static String getSQL(String filename) { try (Scanner scanner = new Scanner(EprocServlet.class.getResourceAsStream(filename + ".sql"), "UTF-8")) { String text = scanner.useDelimiter("\\A").next(); return text; } } public static byte[] calcMd5(byte[] content) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("MD5"); md.reset(); md.update(content); byte[] output = md.digest(); return output; } public static byte[] calcSha1(byte[] content) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-1"); md.reset(); md.update(content); byte[] output = md.digest(); return output; } public static byte[] calcSha256(byte[] content) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.reset(); md.update(content); byte[] output = md.digest(); return output; } /** * Transoforma array de bytes em String * * @param buf * @return */ public static String asHex(byte buf[]) { StringBuffer strbuf = new StringBuffer(buf.length * 2); int i; for (i = 0; i < buf.length; i++) { if (((int) buf[i] & 0xff) < 0x10) strbuf.append("0"); strbuf.append(Long.toString((int) buf[i] & 0xff, 16)); } return strbuf.toString(); } private static final DateTimeFormatter dtfBRHHMM = DateTimeFormat.forPattern("dd/MM/yyyy HH:mm"); public static String formatarDataHoraMinuto(Date d) { DateTime dt = new DateTime(d.getTime()); return dt.toString(dtfBRHHMM); } private static final DateTimeFormatter dtfBR = DateTimeFormat.forPattern("dd/MM/yyyy"); public static String formatarData(Date d) { DateTime dt = new DateTime(d.getTime()); return dt.toString(dtfBR); } public static String removeAcento(String acentuado) { if (acentuado == null) return null; String temp = new String(acentuado); temp = temp.replaceAll("[ÃÂÁÀ]", "A"); temp = temp.replaceAll("[ÉÈÊ]", "E"); temp = temp.replaceAll("[ÍÌÎ]", "I"); temp = temp.replaceAll("[ÕÔÓÒ]", "O"); temp = temp.replaceAll("[ÛÚÙÜ]", "U"); temp = temp.replaceAll("[Ç]", "C"); temp = temp.replaceAll("[ãâáà]", "a"); temp = temp.replaceAll("[éèê]", "e"); temp = temp.replaceAll("[íìî]", "i"); temp = temp.replaceAll("[õôóò]", "o"); temp = temp.replaceAll("[ûúùü]", "u"); temp = temp.replaceAll("[ç]", "c"); return temp; } public static String slugify(String string, boolean lowercase, boolean underscore) { if (string == null) return null; string = string.trim(); if (string.length() == 0) return null; string = removeAcento(string); // Apostrophes. string = string.replaceAll("([a-z])'s([^a-z])", "$1s$2"); string = string.replaceAll("[^\\w]", "-").replaceAll("-{2,}", "-"); // Get rid of any - at the start and end. string.replaceAll("-+$", "").replaceAll("^-+", ""); if (underscore) string.replaceAll("-", "_"); return (lowercase ? string.toLowerCase() : string); } }
def authenticate_and_init(cls, username, password): if is_ldap_authenticated_user(username, password) is True: return cls(username=username) else: return None
def office_to_rss(version, data): out = [] out.append('''<?xml version="1.0" encoding="UTF-8"?> <rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"> <channel> <language>fr</language> <source>%s</source> <copyright>Copyright AELF - Tout droits réservés</copyright> ''' % data.get('source', 'unk')) for office_variant in data.get('variants', []): office = office_variant['name'] for lecture_variants in office_variant['lectures']: lecture = lecture_variants[0] key = lecture.get('key', '') text = lecture.get('text', '') title = lecture.get('title', '') reference = lecture.get('reference', '') if version < 47: if 'antienne' in lecture: antienne = "<blockquote class=\"antienne\"><b>Antienne&nbsp;:</b> %s</blockquote>" % (lecture['antienne']) text = "%s%s%s" % (antienne, text, antienne) if 'verset' in lecture: text = "%s<blockquote class=\"verset\"%s</blockquote>" % (text, lecture['verset']) if 'repons' in lecture: text = "%s<blockquote class=\"repons\">%s</blockquote>" % (text, lecture['repons']) else: if 'antienne' in lecture: antienne_1 = "<div class=\"antienne\"><span tabindex=\"0\" id=\"%s-antienne-1\" class=\"line\"><span class=\"antienne-title\">Antienne&nbsp;:</span> %s</span></div>" % (key, lecture['antienne']) antienne_2 = "<div class=\"antienne\"><span tabindex=\"0\" id=\"%s-antienne-2\" class=\"line\"><span class=\"antienne-title\">Antienne&nbsp;:</span> %s</span></div>" % (key, lecture['antienne']) gloria_patri = "" if reference != "Dn 3": gloria_patri = "<div class=\"gloria_patri\"><span tabindex=\"0\" id=\"%s-gloria_patri\" class=\"line\">Gloire au Père, ...</span></div>" % (key) text = "%s%s%s%s" % (antienne_1, text, gloria_patri, antienne_2) if 'verset' in lecture: text = "%s<blockquote class=\"verset\"%s</blockquote>" % (text, lecture['verset']) if 'repons' in lecture: text = "%s<blockquote class=\"repons\">%s</blockquote>" % (text, lecture['repons']) title = lecture.get('short_title', title) long_title = lecture.get('long_title', '') title_reference = "" if reference: title_reference = "<small><i>— %s</i></small>" % (reference) text = "<h3>%s%s</h3><div style=\"clear: both;\"></div>%s" % (long_title, title_reference, text) out.append(''' <item> <variant>{office}</variant> <title>{title}</title> <reference>{reference}</reference> <key>{key}</key> <description><![CDATA[{text}]]></description> </item>'''.format( office = office, title = escape(title), reference = escape(reference), key = escape(lecture.get('key', '')), text = text, )) out.append('''</channel></rss>''') return ''.join(out)
/** * POJO class for single data processing unit - string (line) * * @author Sergii Sinelnychenko * */ public class StringFingerprint { /** * Line captured, processed and analyzed */ private String line; /** * MD5 hash of the result of line processing. Used to be the unique * anagram-signature of this string, allows easily to find other strings with * the same set of characters */ private String md5; public StringFingerprint(String md5, String str) { this.md5 = md5; this.line = str; } public String getLine() { return this.line; } public String getMd5() { return this.md5; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("StringFingerprint [line=").append(this.line).append(", md5=").append(this.md5).append("]"); return builder.toString(); } }
import java.io.Console; public class App { public static void main(String[] args) { Console myConsole = System.console(); System.out.println("Hi, welcome to the store!"); System.out.println("Would you like to plan a new event? (type Y or N)"); String startEventPlan = myConsole.readLine(); if (startEventPlan.equalsIgnoreCase("y")) { System.out.println("Great, how many people are you expecting?"); Integer guests = Integer.parseInt(myConsole.readLine()); System.out.println("Alrighty, what kind of food would you like? (type choice) Pizza, BBQ, Hors d'oeuvres, Sandwiches, or Sushi?"); String food = myConsole.readLine().toLowerCase(); System.out.println("Okay, what kind of beverage would you like? (type choice) Water, Soft Drinks, Beer, Cocktails, or Wine?"); String beverage = myConsole.readLine().toLowerCase(); System.out.println("Alrighty, what kind of entertainment would you like? (type choice) Music, Band, Magician, Inflatables, or None?"); String entertainment = myConsole.readLine().toLowerCase(); EventPlan newParty = new EventPlan (guests, food, beverage, entertainment); System.out.println("Okay, hear is what your party would cost: $" + newParty.eventCostTotal()); } else { System.out.println("Okay, take your time and let us know when you need to throw a great party!"); } // public static showTotalCost (EventPlan newParty) { // System.out.println("Okay, hear is what your party would cost:" + newParty.eventCostTotal()); // } } }