content
stringlengths
10
4.9M
<gh_stars>0 import 'reflect-metadata'; /** * Serialize decorator * It apply to properties * @param replaceWithId * @returns {(target:Object, propertyKey:string)=>undefined} * @constructor */ export declare function Serialize(replaceWithId?: boolean): (target: Object, propertyKey: string) => void; /** * Decorator to set a class Serializable * @param serializerSettings * @returns {(constructor:Function)=>void} * @constructor */ export declare function Serializable(): (constructor: Function) => void;
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore // This file is run by the x509 tests to ensure that a program with minimal // imports can sign certificates without errors resulting from missing hash // functions. package main import ( "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "math/big" "time" ) func main() { block, _ := pem.Decode([]byte(pemPrivateKey)) rsaPriv, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { panic("Failed to parse private key: " + err.Error()) } template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: "test", Organization: []string{"Σ Acme Co"}, }, NotBefore: time.Unix(1000, 0), NotAfter: time.Unix(100000, 0), KeyUsage: x509.KeyUsageCertSign, } if _, err = x509.CreateCertificate(rand.Reader, &template, &template, &rsaPriv.PublicKey, rsaPriv); err != nil { panic("failed to create certificate with basic imports: " + err.Error()) } } var pemPrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0 fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu /ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/ EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V -----END RSA PRIVATE KEY----- `
Laboratory comparison of new high flow rate respirable size-selective sampler Abstract A newly developed high flow rate respirable size-selective cyclone sampler (GK4.162—also known as the Respirable Air Sampling Cyclone Aluminum Large (RASCAL)) was calibrated to determine its optimum operating flow rate. The Health and Safety Laboratory in the United Kingdom and two laboratories from the National Institute for Occupational Safety and Health in the United States conducted experiments using two different methods: (1) polydisperse aerosol and time-of-flight direct reading instrument (Aerodynamic Particle Sizer (APS)) and (2) monodisperse aerosol and APS. The measured performance data for the cyclone was assessed against the international respirable convention using the bias map approach. Although the GK4.162 cyclone was tested using different aerosols and detection methods, the results from the three laboratories were generally similar. The recommended flow rate based on the agreement of results from the laboratories was 9.0 L/min.
import { HttpHeaders, HttpParams } from '@angular/common/http'; import { InjectionToken } from '@angular/core'; import { Title } from '@angular/platform-browser'; export const APP_TITLE = new InjectionToken<string>('appTitle'); export const appTitleFactory = (titleService: Title) => { return titleService.getTitle(); }; export class HttpOptions { headers?: HttpHeaders | { [header: string]: string | string[]; }; // observe?: 'response'; params?: HttpParams | { [param: string]: string | string[]; }; reportProgress?: boolean; // responseType?: 'json'; withCredentials?: boolean; }
Exhaustive searches had failed to find any trace of Mr Fossett Items possibly belonging to Steve Fossett, the US adventurer who vanished a year ago, have been found by a hiker in California. ID cards, cash and a sweatshirt were among the items found in woods near the town of Mammoth Lakes, in a rugged eastern area of the state. Mr Fossett, 63, vanished in September last year while on a solo flight that took off from neighbouring Nevada. The millionaire businessman was officially declared dead in February. Wide-ranging searches had failed to find any trace of Mr Fossett or his plane following his disappearance. I was wondering, why are there some ID cards and money when there was nothing else? No wallet, no bags, nothing, nothing, nothing Preston Morrow, hiker The BBC's Rajesh Mirchandani, in Los Angeles, says the items were found outside the vast area searched after Mr Fossett went missing - and also in a different direction to that in which he was thought to have flown. Mammoth Lakes has an elevation of about 7,800ft (2,400m) and is on the eastern side of the Sierra Nevada range. It is about 100 miles (160km) from where Mr Fossett began his flight. Mammoth Lakes Police Chief Randy Schienle told CNN the items were "well-weathered". 'No wreckage' Hiker Preston Morrow told US television he had stumbled upon the items while returning from a mountain walk on Monday. There were no signs of wreckage from the plane, said Mr Morrow, an employee at a Mammoth Lakes sporting goods store. "I was coming back down this really steep terrain and what caught my eye was these little (ID) cards in the dirt and the pine needles, and some $100 bills," he said. "I was wondering, why are there some ID cards and money when there was nothing else? No wallet, no bags, nothing, nothing, nothing," he added. Mr Morrow said he returned the next day and found the sweatshirt in the same area but was not sure if it was related. The missing adventurer's ID cards were reportedly issued by the Federal Aviation Administration in Illinois. Michael LoVallo, a lawyer for Mr Fossett's wife, Peggy, said: "We are aware of the reports and are trying to verify the information." Mr Fossett took off from the ranch of hotel magnate Barron Hilton, at Yerington, Nevada, on 3 September on a flight that was expected to last three hours. In 2002, he became the first person to circle the globe solo in a balloon and had about 100 other world records to his name. E-mail this to a friend Printable version Bookmark with: Delicious Digg reddit Facebook StumbleUpon What are these?
#include <bits/stdc++.h> #define REP(a,b) for(int a=0; a<(b); ++a) #define FWD(a,b,c) for(int a=(b); a<(c); ++a) #define FWDS(a,b,c,d) for(int a=(b); a<(c); a+=d) #define BCK(a,b,c) for(int a=(b); a>(c); --a) #define ALL(a) (a).begin(), (a).end() #define SIZE(a) ((int)(a).size()) #define VAR(x) #x ": " << x << " " #define popcount __builtin_popcount #define popcountll __builtin_popcountll #define gcd __gcd #define x first #define y second #define st first #define nd second #define pb push_back using namespace std; template<typename T> ostream& operator<<(ostream &out, const vector<T> &v){ out << "{"; for(const T &a : v) out << a << ", "; out << "}"; return out; } template<typename S, typename T> ostream& operator<<(ostream &out, const pair<S,T> &p){ out << "(" << p.st << ", " << p.nd << ")"; return out; } typedef long long LL; typedef pair<int, int> PII; typedef long double K; typedef vector<int> VI; const int dx[] = {0,0,-1,1}; //1,1,-1,1}; const int dy[] = {-1,1,0,0}; //1,-1,1,-1}; int n, m, pos, neg; char buff[10]; vector<char> e; bool possible(){ return pos - neg*m <= n && n <= pos*m - neg; } int a, b; void find_pos(){ --pos; n -= a; while(!possible()) --n, ++a; printf("%d ", a); } void find_neg(){ --neg; n += b; while(!possible()) ++n, ++b; printf("%d ", b); } int main(){ pos = 1; for(;;){ scanf("%s", buff); scanf("%s", buff); if(buff[0] == '=') break; e.push_back(buff[0]); if(buff[0] == '+') ++pos; else ++neg; } scanf("%d", &n); m = n; a = b = 1; if(!possible()) printf("Impossible\n"); else{ printf("Possible\n"); find_pos(); for(char c : e){ printf("%c ", c); if(c == '+') find_pos(); else find_neg(); } printf(" = %d\n", m); } return 0; }
Unravelling the ‘Velcro Effect’: Is Deterring Assaults against the Police Indicative of a More Aggressive Style of Policing? The ‘Velcro Effect’ was proposed to explain how the mere threat of using incapacitant sprays deterred assaults against the police (Kaminski, Edwards, & Johnson, 1998). The deterrent capabilities of CS spray were used to legitimise its deployment against a background of critical media coverage. However, what Kaminski et al. failed to consider is that batons — and the police uniform — also deter assaults against the police. This suggests an alternative explanation for the ability to deter assaults against the police that centres on the actions of police officers rather than the characteristics ascribed to any single weapon. Based on a study of officers in one of the 43 police forces in England and Wales, this article argues that it is the officer's ability to appear competent and ready to fight that deters would-be assailants. Furthermore, it is argued that in order to deter would-be assailants, police officers have to adopt an aggressive demeanour that is far removed from that of the traditional image associated with routine policing in England and Wales.
/** * * @author Luiz Felipe De Amorim */ public class MovieDetails extends JFrame{ private JPanel panel; private JLabel filmImg; private JLabel title; private JPanel info; private JProgressBar stars; private JLabel year; private JLabel classification; private JLabel duration; private JPanel description; private JLabel shortDescription; private JLabel cast; private JLabel director; private JPanel buttons; private JButton myList; private JButton downl; private JPanel relationated; private JLabel film0; private JLabel film01; public MovieDetails() throws IOException { super(); //Inicializando frame this.setSize(800, 900); this.setResizable(false); this.setLocationRelativeTo(null); this.setVisible(true); //Jpanel panel = new JPanel();//Add JPanel panel.setLayout(new GridLayout(0, 1)); //Label de imagem do filme filmImg = new JLabel("IMAGEM");//Add JLabel filmImg.setForeground(Color.red); filmImg.setLayout(new BorderLayout()); //label titulo title = new JLabel("Título"); //panel descrição description = new JPanel();//texto de descrição do filme/serie BoxLayout layout = new BoxLayout(description, BoxLayout.Y_AXIS); description.setLayout(layout); shortDescription = new JLabel("Descrição do filme/serie");//breve descrição filme/serie cast = new JLabel("Elenco");//elenco do filme/serie director = new JLabel("Diretores"); //diretor do filme/serie //panel de info info = new JPanel(); info.setLayout(new GridLayout(1,4));//layout do panel info stars = new JProgressBar();//estrelas de recomendação year = new JLabel("19XX");//ano de lançamento do filme/serie classification = new JLabel("TV-MA");//classificação do filme/serie duration = new JLabel("1hr 28min"); //duração do filme/serie //painel de botoes minha lista e download buttons = new JPanel(); myList = new JButton("My list");//botao de adicionar a minha lista downl = new JButton("Download");//botao de download /* //painel de filmes/series relacionadas relationated = new JPanel(); BoxLayout layoutRelationed = new BoxLayout(relationated, BoxLayout.X_AXIS); description.setLayout(layoutRelationed); film0 = new JLabel("Filme0");//label filme relacionado 0 film01=new JLabel("Filme01");//label filme relacionado 01 */ //Relacionando elementos this.add(panel); //Add panel ao frame. panel.add(filmImg); panel.add(title); panel.add(info); info.add(stars); info.add(year); info.add(classification); info.add(duration); panel.add(description); description.add(shortDescription); description.add(cast); description.add(director); panel.add(buttons); buttons.add(myList); buttons.add(downl); /* panel.add(relationated); relationated.add(film0); relationated.add(film01); */ } }
Fancy wrapping your lips around "arguably the fanciest premixed drink ever made"? Well, consider this your invitation to the Lemmy Party. This Friday The Catfish will be launching the new Lemmy Party Berliner Weisse from Mash Brewing. Although to call it simply a "Berliner Weisse" is underselling somewhat, which is why they reckon it's more of a "beer RTD". The beer takes its inspiration from the Whisky Sour cocktail and is actually a special version of the Wizz Fizz sour ale, aged in Starward whisky barrels with added lemon zest and a mixed culture yeast. The night will also see the launch of Mash's new Indian Ale, plus the new branding of the twin IPAs Copycat and Challenger. And, on top of that, they'll be slinging cans of the Thornbury lager from sister brewery 3 Ravens – the first time these tinnies will be available to the public. Get along from 6pm as there'll be a brewer's shout, coaster flipping comp and DJ Orgasmatron spinning up some Lemmy Kilmister-approved tunes.
use crate::data::*; use crate::view::View; use clipboard::{ClipboardContext, ClipboardProvider}; use std::cmp; use termion::event::{Event, Key, MouseButton, MouseEvent}; #[derive(Debug, Clone)] pub enum State { Insert, Message, Prompt(String, String, PromptAction), Select(usize), Selected, Open(String), Exit, } #[derive(Debug, Clone)] pub enum PromptAction { Save, ConfirmExit, Open, ConfirmOpen(String), } const SCROLL_FACTOR: usize = 2; impl State { // Handles a Termion event, consuming the current state and returning the new state pub fn handle<T>(self, content: &mut T, view: &mut View, event: Event) -> Self where T: Editable + Saveable + Undoable + Selectable + Modifiable, { match self { State::Prompt(prompt, message, action) => { State::handle_prompt(content, view, event, prompt, message, action) } State::Select(origin) => State::handle_select(content, view, event, origin), State::Insert => State::handle_insert(content, view, event), State::Message => State::handle_message(content, view, event), State::Selected => State::handle_selected(content, view, event), State::Open(_) | State::Exit => panic!("Can't handle exit state"), } } fn handle_message<T>(content: &mut T, view: &mut View, event: Event) -> Self where T: Editable + Named + Undoable + Modifiable + Saveable, { view.quiet(); Self::handle_insert(content, view, event) } fn handle_insert<T>(content: &mut T, view: &mut View, event: Event) -> Self where T: Editable + Named + Undoable + Modifiable + Saveable, { match event { Event::Key(Key::Ctrl('q')) | Event::Key(Key::Esc) => { if content.was_modified() { let prompt = "Changes not saved do you really want to exit (y/N): ".to_string(); let message = "".to_string(); view.prompt(&prompt, &message); return State::Prompt(prompt, message, PromptAction::ConfirmExit); } else { return State::Exit; } } Event::Key(Key::Ctrl('s')) => { if content.name().is_empty() { let prompt = "Save to: ".to_string(); view.prompt(&prompt, ""); return State::Prompt(prompt, "".to_string(), PromptAction::Save); } else { let msg = match content.save() { Err(e) => e.to_string(), Ok(_) => format!("Saved file {}", content.name()), }; view.message(&msg); return State::Message; } } Event::Key(Key::Ctrl('o')) => { let prompt = "Open file: ".to_string(); let message = "".to_string(); view.prompt(&prompt, &message); return State::Prompt(prompt, message, PromptAction::Open); } Event::Mouse(MouseEvent::Press(MouseButton::Left, x, y)) => { let (line, col) = view.translate_coordinates(content, x, y); content.move_at(line, col); return State::Select(content.pos()); } Event::Mouse(MouseEvent::Press(MouseButton::WheelDown, _, _)) => { view.scroll_view(SCROLL_FACTOR as isize, content); } Event::Mouse(MouseEvent::Press(MouseButton::WheelUp, _, _)) => { view.scroll_view(-(SCROLL_FACTOR as isize), content); } Event::Key(Key::Ctrl('z')) => { content.undo(); } Event::Key(Key::Ctrl('y')) => { content.redo(); } Event::Key(Key::Ctrl('v')) => { let mut ctx: ClipboardContext = ClipboardProvider::new().unwrap(); for c in ctx .get_contents() .unwrap_or_else(|_| "".to_string()) .chars() { content.insert(c); } } Event::Key(Key::Up) => { content.step(Movement::Up); view.adjust_view(content.line()); } Event::Key(Key::Down) => { content.step(Movement::Down); view.adjust_view(content.line()); } Event::Key(Key::Left) => { content.step(Movement::Left); view.adjust_view(content.line()); } Event::Key(Key::Right) => { content.step(Movement::Right); view.adjust_view(content.line()); } Event::Key(Key::PageUp) => { content.step(Movement::PageUp(view.lines_height() as usize)); view.center_view(content.line()); } Event::Key(Key::PageDown) => { content.step(Movement::PageDown(view.lines_height() as usize)); view.center_view(content.line()); } Event::Key(Key::Home) => { content.step(Movement::LineStart); } Event::Key(Key::End) => { content.step(Movement::LineEnd); } Event::Key(Key::Backspace) | Event::Key(Key::Ctrl('h')) => { content.delete(); view.adjust_view(content.line()); } Event::Key(Key::Delete) => { content.delete_forward(); view.adjust_view(content.line()); } Event::Key(Key::Char(c)) => { content.insert(c); view.adjust_view(content.line()); } Event::Unsupported(u) => { view.message(&format!("Unsupported escape sequence {:?}", u)); } _ => {} } State::Insert } fn handle_prompt<T>( content: &mut T, view: &mut View, event: Event, prompt: String, mut message: String, action: PromptAction, ) -> Self where T: Editable + Saveable + Modifiable, { match event { Event::Key(Key::Char('\n')) => match action { PromptAction::Save => { let msg: String; let old_name = content.name().clone(); content.set_name(message.clone()); msg = match content.save() { Err(e) => { content.set_name(old_name); e.to_string() } Ok(_) => format!("Saved file {}", message), }; view.message(&msg); State::Message } PromptAction::ConfirmExit => { if message.to_lowercase() == "y" { State::Exit } else { view.message(""); State::Message } } PromptAction::Open => { let filename = message; if content.was_modified() { let prompt = "Changes not saved do you really want to open a new file (y/N): " .to_string(); let message = "".to_string(); view.prompt(&prompt, &message); State::Prompt(prompt, message, PromptAction::ConfirmOpen(filename)) } else { State::Open(filename) } } PromptAction::ConfirmOpen(filename) => { if message.to_lowercase() == "y" { State::Open(filename) } else { view.message(""); State::Message } } }, Event::Key(Key::Char('\t')) => State::Prompt(prompt, message, action), // TODO: autocompletion Event::Key(Key::Char(c)) => { message.push(c); view.prompt(&prompt, &message); State::Prompt(prompt, message, action) } Event::Key(Key::Backspace) | Event::Key(Key::Delete) => { message.pop(); view.prompt(&prompt, &message); State::Prompt(prompt, message, action) } Event::Key(Key::Ctrl('q')) => State::Exit, Event::Key(Key::Esc) => { view.quiet(); State::Insert } _ => State::Prompt(prompt, message, action), } } fn handle_selected<T>(content: &mut T, view: &mut View, event: Event) -> Self where T: Selectable + Editable + Named + Undoable + Modifiable + Saveable, { match event { Event::Key(Key::Ctrl('c')) => { let (beg, end) = content.sel().unwrap(); let selection: String = content.iter().skip(beg).take(end - beg + 1).collect(); let mut ctx: ClipboardContext = ClipboardProvider::new().unwrap(); ctx.set_contents(selection).unwrap(); content.reset_sel(); State::Insert } Event::Key(Key::Ctrl('x')) => { let (beg, end) = content.sel().unwrap(); let selection: String = content.iter().skip(beg).take(end - beg + 1).collect(); let mut ctx: ClipboardContext = ClipboardProvider::new().unwrap(); ctx.set_contents(selection).unwrap(); delete_sel(content); view.adjust_view(content.line()); content.reset_sel(); State::Insert } Event::Key(Key::Backspace) | Event::Key(Key::Delete) => { delete_sel(content); view.adjust_view(content.line()); content.reset_sel(); State::Insert } Event::Key(Key::Char(_)) => { delete_sel(content); view.adjust_view(content.line()); content.reset_sel(); Self::handle_insert(content, view, event) } _ => { content.reset_sel(); Self::handle_insert(content, view, event) } } } fn handle_select<T>(content: &mut T, view: &mut View, event: Event, origin: usize) -> Self where T: Editable + Selectable, { match event { Event::Mouse(MouseEvent::Hold(x, y)) => { let (line, col) = view.translate_coordinates(content, x, y); content.move_at(line, col); let sel = ( cmp::min(origin, content.pos()), cmp::max(origin, content.pos()), ); content.set_sel(sel); State::Select(origin) } Event::Mouse(MouseEvent::Release(x, y)) => { let (line, col) = view.translate_coordinates(content, x, y); content.move_at(line, col); if origin != content.pos() { let sel = ( cmp::min(origin, content.pos()), cmp::max(origin, content.pos()), ); content.set_sel(sel); State::Selected } else { State::Insert } } _ => State::Select(origin), } } } fn delete_sel<T>(content: &mut T) where T: Selectable + Editable, { let (beg, end) = content.sel().unwrap(); assert!(beg < end); let end = cmp::min(end + 1, content.len() - 1); content.move_to(end); for _ in beg..end { content.delete(); } }
Sweden—Recent Changes in Welfare State Arrangements The Swedish welfare state, once developed to create a new society based on social equality and universal rights, has taken on a partly new direction. Extensive choice reforms have been implemented in social services and an increasing proportion of tax-funded social services, including child day care, primary and secondary schools, health care, and care of the elderly, is provided by private entrepreneurs, although funded by taxes. Private equity firms have gained considerable profits from the welfare services. The changes have taken place over a 20-year period, but at an accelerated pace in the last decade. Sweden previously had very generous sickness and unemployment insurance, in terms of both duration and benefit levels, but is falling behind in terms of generosity, as indicated by increasing levels of relative poverty among those who depend on benefits and transfers. Increasing income inequality over the past 20 years further adds to increasing the gaps between population groups. In some respects, Sweden is becoming similar to other Organisation for Economic Co-operation and Development countries. The article describes some of the changes that have occurred. However, there is still widespread popular support for the publicly provided welfare state services.
<gh_stars>1-10 import {Animated} from 'react-native'; import {systemWeights} from 'react-native-typography'; import styled from 'styled-components/native'; import {COLOR_WHITE, COLOR_YELLOW} from '../../globals/constants'; interface BackdropProps { height: number; width: number; } export const Backdrop = styled(Animated.View)<BackdropProps>` position: absolute; align-items: center; justify-content: space-around; z-index: 1; height: ${({height}) => height}px; padding: 40px; width: ${({width}) => width}px; background-color: rgba(0, 0, 0, 0.93); `; export const Button = styled.TouchableOpacity` align-items: center; justify-content: center; height: 56px; width: 144px; border: 3px ${COLOR_YELLOW}; border-radius: 8px; `; export const ButtonText = styled.Text` font-size: 22px; text-align: center; color: ${COLOR_YELLOW}; `; ButtonText.defaultProps = { style: systemWeights.bold, }; export const Image = styled.Image` height: 128px; margin-bottom: 40px; width: 128px; `; export const ImageFrame = styled.View` justify-content: center; align-items: center; `; export const Text = styled.Text` font-size: 24px; text-align: center; color: ${COLOR_WHITE}; `; Text.defaultProps = { style: systemWeights.semibold, };
/** * Represent a section of a SQL Statement. Sections are: <br/> * SELECT, WHERE, ORDER BY * * @author The eFaps Team * @version $Id$ */ public abstract class AbstractQSection { /** * Append to the SQLSelect. * @param _select SQLSelect to be appended to * @return this * @throws EFapsException on error * */ public abstract AbstractQSection appendSQL(SQLSelect _select) throws EFapsException; /** * Prepare this section. * @param _query Query this AbstractQSection belongs to * @throws EFapsException on error * @return this */ public abstract AbstractQSection prepare(final AbstractObjectQuery<?> _query) throws EFapsException; }
/* ** trackEvent ** ** An allocation event has dropped in on us. ** We need to do the right thing and track it. */ void trackEvent(uint32_t aTimeval, char aType, uint32_t aHeapRuntimeCost, tmcallsite * aCallsite, uint32_t aHeapID, uint32_t aSize, tmcallsite * aOldCallsite, uint32_t aOldHeapID, uint32_t aOldSize) { if (NULL != aCallsite) { if (NULL != CALLSITE_RUN(aCallsite) && (NULL == aOldCallsite || NULL != CALLSITE_RUN(aOldCallsite))) { STAllocation *allocation = NULL; allocation = allocationTracker(aTimeval, aType, aHeapRuntimeCost, aCallsite, aHeapID, aSize, aOldCallsite, aOldHeapID, aOldSize); if (NULL == allocation) { REPORT_ERROR(__LINE__, allocationTracker); } } else { REPORT_ERROR(__LINE__, trackEvent); } } else { REPORT_ERROR(__LINE__, trackEvent); } }
/** * See {@link #combine_float}, but for a group method with a void result. * These need to be combined as well, both for synchronization purposes, * and for the exceptions. */ protected final synchronized void combine_void(BinomialCombiner combiner, boolean to_all, int lroot, Exception ex) throws Exception { int peer; int mask = 1; int relrank = (myGroupRank - lroot + groupSize) % groupSize; GroupMessage message; while (mask < groupSize) { if ((mask & relrank) == 0) { peer = relrank | mask; if (peer < groupSize) { peer = (peer + lroot) % groupSize; /* receive result */ message = getMessage(peer); /* call the combiner */ try { combiner.combine(myGroupRank, ex, peer, message.exceptionResult, groupSize); ex = null; } catch (Exception e) { ex = e; } freeMessage(message); } } else { peer = ((relrank & (~mask)) + lroot) % groupSize; /* send result */ int peer_rank = destination.memberRanks[peer]; int peer_skeleton = destination.memberSkels[peer]; logger.debug("Sending message to peer " + peer + " on cpu " + peer_rank); WriteMessage w = Group.unicast[peer_rank].newMessage(); w.writeByte(COMBINE); w.writeInt(peer_skeleton); w.writeInt(myGroupRank); if (ex != null) { w.writeByte(RESULT_EXCEPTION); w.writeObject(ex); } else { w.writeByte(RESULT_VOID); } w.finish(); break; } mask <<= 1; } if (to_all) { if (myGroupRank == lroot) { if (reply_to_all == null) { reply_to_all = MulticastGroups.getMulticastSendport( destination.multicastHostsID, destination.multicastHosts); } /* forward result to all */ WriteMessage w = reply_to_all.newMessage(); w.writeByte(COMBINE_RESULT); w.writeInt(destination.groupID); w.writeInt(lroot); if (ex != null) { w.writeByte(RESULT_EXCEPTION); w.writeObject(ex); } else { w.writeByte(RESULT_VOID); } w.finish(); } /* receive result from root */ message = getMessage(lroot); if (message.exceptionResult != null) { ex = message.exceptionResult; } freeMessage(message); } if (ex != null) { throw ex; } }
import argparse import code import io import json import os import re import sys import traceback import typing from abc import ABC, abstractmethod from argparse import ArgumentParser from fnmatch import fnmatch from typing import Any, Callable, Dict, Iterable, List, Optional, TextIO, Tuple, Union from hpc.autoscale import hpclogging as logging from hpc.autoscale import hpctypes as ht from hpc.autoscale.job import demandprinter from hpc.autoscale.job.demand import DemandResult from hpc.autoscale.job.demandcalculator import DemandCalculator, new_demand_calculator from hpc.autoscale.job.demandprinter import OutputFormat from hpc.autoscale.job.driver import SchedulerDriver from hpc.autoscale.job.job import Job from hpc.autoscale.job.schedulernode import SchedulerNode from hpc.autoscale.node import node as nodelib from hpc.autoscale.node import vm_sizes from hpc.autoscale.node.bucket import NodeBucket from hpc.autoscale.node.constraints import NodeConstraint, get_constraints from hpc.autoscale.node.node import Node from hpc.autoscale.node.nodehistory import NodeHistory from hpc.autoscale.node.nodemanager import NodeManager, new_node_manager from hpc.autoscale.results import ( DefaultContextHandler, EarlyBailoutResult, MatchResult, register_result_handler, ) from hpc.autoscale.util import json_dump, load_config, partition_single def _print(*msg: Any) -> None: if os.getenv("SCALELIB_AUTOCOMPLETE_LOG"): log_file = os.getenv("SCALELIB_AUTOCOMPLETE_LOG") assert log_file with open(log_file, "a") as fw: print(*msg, file=fw) def error(msg: Any, *args: Any) -> None: print(str(msg) % args, file=sys.stderr) raise RuntimeError(str(msg) % args) sys.exit(1) def warn(msg: Any, *args: Any) -> None: print(str(msg) % args, file=sys.stderr) def str_list(c: str) -> List[str]: return c.rstrip(",").split(",") def json_type(c: str) -> Dict: try: if os.path.exists(c): with open(c) as fr: return json.load(fr) return json.loads(c) except Exception as e: error("Given json file/literal '{}': {}".format(c, e)) sys.exit(1) def constraint_type(c: Union[str, List[str]]) -> str: assert isinstance(c, str) try: return json.dumps(json.loads(c)) except Exception: ... try: key, value_expr = c.split("=", 1) values = [x.strip() for x in value_expr.split(",")] values_converted = [] for v in values: converted: Union[int, float, bool, str, None] = v if v.startswith('"') and v.endswith('"'): converted = v.lstrip('"').rstrip('"') elif v.lower() in ["true", "false"]: converted = v.lower() == "true" else: try: converted = int(v) except Exception: try: converted = float(v) except Exception: ... if v == "null": converted = None values_converted.append(converted) if len(values_converted) == 1: return json.dumps({key: values_converted[0]}) return json.dumps({key: values_converted}) except Exception as e: _print(str(e)) raise def parse_format(c: str) -> str: c = c.lower() if c in ["json", "table", "table_headerless"]: return c print("Expected json, table or table_headerless - got", c, file=sys.stderr) sys.exit(1) class ReraiseAssertionInterpreter(code.InteractiveConsole): def __init__( self, locals: Optional[Dict] = None, filename: str = "<console>", reraise: bool = True, ) -> None: code.InteractiveConsole.__init__(self, locals=locals, filename=filename) self.reraise = reraise hist_file = os.path.expanduser("~/.cyclegehistory") if os.path.exists(hist_file): with open(hist_file) as fr: self.history_lines = fr.readlines() else: self.history_lines = [] self.history_fw = open(hist_file, "a") def raw_input(self, prompt: str = "") -> str: line = super().raw_input(prompt) if line.strip(): self.history_fw.write(line) self.history_fw.write("\n") self.history_fw.flush() return line def showtraceback(self) -> None: if self.reraise: _, value, _ = sys.exc_info() if isinstance(value, AssertionError) or isinstance(value, SyntaxError): raise value return code.InteractiveConsole.showtraceback(self) class ShellDict(dict): def __init__(self, wrapped: Dict[str, Any], prefix: str = "") -> None: super().__init__(wrapped) for key, value in wrapped.items(): # let's replace invalid letters with _ # e.g. ip-012345678 -> ip_012345678 attr_safe_key = re.sub("[^a-zA-Z0-9_]", "_", prefix + key) if not attr_safe_key[0].isalpha(): # default prefix if the user has something like job ids, which # are integers and can't be used as attributes attr_safe_key = "k_" + attr_safe_key setattr(self, attr_safe_key, value) def shell(config: Dict, shell_locals: Dict[str, Any], script: Optional[str],) -> None: """ Provides read only interactive shell. type gehelp() in the shell for more information """ banner = "\nCycleCloud Autoscale Shell" interpreter = ReraiseAssertionInterpreter(locals=shell_locals) try: __import__("readline") # some magic - create a completer that is bound to the locals in this interpreter and not # the __main__ interpreter. interpreter.push("import readline, rlcompleter") interpreter.push('readline.parse_and_bind("tab: complete")') interpreter.push("_completer = rlcompleter.Completer(locals())") interpreter.push("def _complete_helper(text, state):") interpreter.push(" ret = _completer.complete(text, state)") interpreter.push(' ret = ret + ")" if ret[-1] == "(" else ret') interpreter.push(" return ret") interpreter.push("") interpreter.push("readline.set_completer(_complete_helper)") for item in interpreter.history_lines: try: if '"""' in item: interpreter.push( "readline.add_history('''%s''')" % item.rstrip("\n") ) else: interpreter.push( 'readline.add_history("""%s""")' % item.rstrip("\n") ) except Exception: pass interpreter.push("from hpc.autoscale.job.job import Job\n") interpreter.push("from hpc.autoscale import hpclogging as logging\n") except ImportError: banner += ( "\nWARNING: `readline` is not installed, so autocomplete will not work." ) if script: with open(script) as fr: source = fr.read() # important - if you pass in a separate globals dict then # any locally defined functions will be stored incorrectly # and you will get unknown func errors exec(source, shell_locals, shell_locals) else: interpreter.interact(banner=banner) def disablecommand(f: Callable) -> Callable: setattr(f, "disabled", True) return f class CommonCLI(ABC): def __init__(self, project_name: str) -> None: self.project_name = project_name self.example_nodes: List[Node] = [] self.node_names: List[str] = [] self.hostnames: List[str] = [] self.__node_mgr: Optional[NodeManager] = None def connect(self, config: Dict) -> None: """Tests connection to CycleCloud""" self._node_mgr(config) @abstractmethod def _setup_shell_locals(self, config: Dict) -> Dict: ... def shell_parser(self, parser: ArgumentParser) -> None: parser.set_defaults(read_only=False) parser.add_argument("--script", "-s", required=False) def shell(self, config: Dict, script: Optional[str] = None) -> None: """ Interactive python shell with relevant objects in local scope. Use --script to run python scripts """ shell_locals = self._setup_shell_locals(config) for t in [Job, ht.Memory, ht.Size, Node, SchedulerNode, NodeBucket]: simple_name = t.__name__.split(".")[-1] if simple_name not in shell_locals: shell_locals[simple_name] = t if script and not os.path.exists(script): error("Script does not exist: %s", script) shell(config, shell_locals, script) @abstractmethod def _driver(self, config: Dict) -> SchedulerDriver: ... def _make_example_nodes(self, config: Dict, node_mgr: NodeManager) -> List[Node]: buckets = node_mgr.get_buckets() return [b.example_node for b in buckets] def _get_example_nodes( self, config: Union[List[Dict], Dict], force: bool = False ) -> List[Node]: if self.example_nodes: return self.example_nodes if isinstance(config, list): config = load_config(*config) driver = self._driver(config) cache_file = os.path.join(driver.autoscale_home, ".example_node_cache.json") if os.path.exists(cache_file) and not force: with open(cache_file) as fr: cache = json.load(fr) self.example_nodes = [Node.from_dict(x) for x in cache["example-nodes"]] self.node_names = cache["node-names"] self.hostnames = cache["hostnames"] else: node_mgr = self._node_mgr(config, driver) self.example_nodes = self._make_example_nodes(config, node_mgr) _, scheduler_nodes = driver._read_jobs_and_nodes(config) self.example_nodes.extend(scheduler_nodes) self.node_names = [n.name for n in node_mgr.get_nodes()] self.hostnames = [ n.hostname for n in node_mgr.get_nodes() if n.hostname ] + [x.hostname for x in scheduler_nodes] with open(cache_file, "w") as fw: to_dump = { "example-nodes": self.example_nodes, "node-names": self.node_names, "hostnames": self.hostnames, } json_dump(to_dump, fw) return self.example_nodes def _node_mgr( self, config: Dict, driver: Optional[SchedulerDriver] = None ) -> NodeManager: if self.__node_mgr is not None: return self.__node_mgr driver = driver or self._driver(config) config = driver.preprocess_config(config) jobs, nodes = driver.read_jobs_and_nodes(config) node_mgr = new_node_manager(config, existing_nodes=nodes) driver.preprocess_node_mgr(config, node_mgr) self.__node_mgr = node_mgr return self.__node_mgr def _node_history(self, config: Dict) -> NodeHistory: return self._driver(config).new_node_history(config) def _demand_calc( self, config: Dict, driver: SchedulerDriver ) -> Tuple[DemandCalculator, List[Job]]: node_mgr = self._node_mgr(config, driver) node_history = self._node_history(config) driver = self._driver(config) jobs, scheduler_nodes = driver.read_jobs_and_nodes(config) dcalc = new_demand_calculator( config, node_mgr=node_mgr, node_history=node_history, node_queue=driver.new_node_queue(config), singleton_lock=driver.new_singleton_lock(config), existing_nodes=scheduler_nodes, ) return dcalc, jobs def _demand( self, config: Dict, driver: Optional[SchedulerDriver] = None, ctx_handler: Optional[DefaultContextHandler] = None, ) -> DemandCalculator: driver = driver or self._driver(config) if not ctx_handler: ctx_handler = self._ctx_handler(config) register_result_handler(ctx_handler) dcalc, jobs = self._demand_calc(config, driver) logging.info( "Calculating demand for %s jobs: %s", len(jobs), [j.name for j in jobs] ) for job in jobs: ctx_handler.set_context("[Job {}]".format(job.name)) logging.info("Adding %s", job) dcalc.add_job(job) demand = dcalc.get_demand() logging.info( "Done calculating demand. %s new nodes, %s unmatched nodes, %s nodes total", len(demand.new_nodes), len(demand.unmatched_nodes), len(demand.compute_nodes), ) return dcalc def _ctx_handler(self, config: Dict) -> DefaultContextHandler: return DefaultContextHandler("[{}]".format(self.project_name)) def autoscale_parser(self, parser: ArgumentParser) -> None: parser.set_defaults(read_only=False) self._add_output_format(parser) self._add_output_columns(parser) def autoscale( self, config: Dict, output_columns: Optional[List[str]], output_format: OutputFormat, dry_run: bool = False, long: bool = False, ) -> None: """End-to-end autoscale process, including creation, deletion and joining of nodes.""" output_columns = output_columns or self._get_default_output_columns(config) if dry_run: logging.warning("Running gridengine autoscaler in dry run mode") # allow multiple instances config["lock_file"] = None # put in read only mode config["read_only"] = True ctx_handler = self._ctx_handler(config) register_result_handler(ctx_handler) driver = self._driver(config) driver.initialize() config = driver.preprocess_config(config) logging.debug("Driver = %s", driver) invalid_nodes: List[Node] = [] jobs, scheduler_nodes = driver.read_jobs_and_nodes(config) for snode in scheduler_nodes: if snode.marked_for_deletion: invalid_nodes.append(snode) # nodes in error state must also be deleted nodes_to_delete = driver.handle_failed_nodes(invalid_nodes) demand_calculator = self._demand(config, driver, ctx_handler) failed_nodes = demand_calculator.node_mgr.get_failed_nodes() failed_nodes_to_delete = driver.handle_failed_nodes(failed_nodes) nodes_to_delete.extend(failed_nodes_to_delete) demand_result = demand_calculator.finish() if dry_run: demandprinter.print_demand( output_columns, demand_result, output_format=output_format, log=not dry_run, long=long, ) return ctx_handler.set_context("[joining]") # details here are that we pass in nodes that matter (matched) and the driver figures out # which ones are new and need to be added via qconf joined = driver.add_nodes_to_cluster( [x for x in demand_result.compute_nodes if x.exists] ) driver.handle_post_join_cluster(joined) ctx_handler.set_context("[scaling]") # bootup all nodes. Optionally pass in a filtered list if demand_result.new_nodes: # if not dry_run: result = demand_calculator.bootup() logging.info(result) # if not dry_run: demand_calculator.update_history() # we also tell the driver about nodes that are unmatched. It filters them out # and returns a list of ones we can delete. idle_timeout = int(config.get("idle_timeout", 300)) boot_timeout = int(config.get("boot_timeout", 3600)) logging.fine("Idle timeout is %s", idle_timeout) unmatched_for_5_mins = demand_calculator.find_unmatched_for( at_least=idle_timeout ) timed_out_booting = demand_calculator.find_booting(at_least=boot_timeout) # I don't care about nodes that have keep_alive=true timed_out_booting = [n for n in timed_out_booting if not n.keep_alive] timed_out_to_deleted: List[Node] = [] unmatched_nodes_to_delete: List[Node] = [] if timed_out_booting: logging.info( "The following nodes have timed out while booting: %s", timed_out_booting, ) timed_out_to_deleted = driver.handle_boot_timeout(timed_out_booting) or [] if unmatched_for_5_mins: node_expr = ", ".join([str(x) for x in unmatched_for_5_mins]) logging.info( "Unmatched for at least %s seconds: %s", idle_timeout, node_expr ) unmatched_nodes_to_delete = ( driver.handle_draining(unmatched_for_5_mins) or [] ) nodes_to_delete = [] for node in timed_out_to_deleted + unmatched_nodes_to_delete: if node.assignments: logging.warning( "%s has jobs assigned to it so we will take no action.", node ) continue nodes_to_delete.append(node) if nodes_to_delete: try: logging.info("Deleting %s", [str(n) for n in nodes_to_delete]) delete_result = demand_calculator.delete(nodes_to_delete) if delete_result: # in case it has anything to do after a node is deleted (usually just remove it from the cluster) driver.handle_post_delete(delete_result.nodes) except Exception as e: logging.warning("Deletion failed, will retry on next iteration: %s", e) logging.exception(str(e)) demandprinter.print_demand( output_columns, demand_result, output_format=output_format, log=not dry_run, long=long, ) try: self.refresh_autocomplete(config) except Exception as e: logging.error( "Ignoring error that occurred while updating autocomplete refresh: %s", e, ) return demand_result def demand_parser(self, parser: ArgumentParser) -> None: self._add_output_format(parser) self._add_output_columns(parser) def demand( self, config: Dict, output_columns: Optional[List[str]], output_format: OutputFormat, long: bool = False, ) -> None: """Dry-run version of autoscale.""" output_columns = output_columns or self._get_default_output_columns(config) self.autoscale(config, output_columns, output_format, dry_run=True, long=long) def jobs(self, config: Dict) -> None: """ Writes out autoscale jobs as json. Note: Running jobs are excluded. """ jobs, _ = self._driver(config).read_jobs_and_nodes(config) json.dump( jobs, sys.stdout, indent=2, default=lambda x: x.to_dict() if hasattr(x, "to_dict") else str(x), ) def create_nodes_parser(self, parser: ArgumentParser) -> None: parser.set_defaults(read_only=False) parser.add_argument("-k", "--keep-alive", action="store_true", default=False) parser.add_argument("-n", "--nodes", type=int, default=-1) parser.add_argument("-s", "--slots", type=int, default=-1, required=False) parser.add_argument( # type: ignore "-a", "--nodearray", type=str, required=False ).completer = self._nodearray_completer # type: ignore parser.add_argument( # type: ignore "-v", "--vm-size", type=str, required=False ).completer = self._vm_size_completer # type: ignore parser.add_argument("-p", "--placement-group", type=str, required=False) parser.add_argument( "-S", "--software-configuration", type=json_type, required=False ) parser.add_argument( "-O", "--node-attribute-overrides", type=json_type, required=False ) self._add_constraint_expr(parser) parser.add_argument( "-d", "--dry-run", action="store_true", default=False, required=False ) parser.add_argument( "-x", "--exclusive", action="store_true", default=False, required=False ) parser.add_argument( "-X", "--exclusive-task", action="store_true", default=False, required=False ) self._add_output_format(parser) self._add_output_columns(parser) parser.add_argument("--strategy", "-t", type=str, choices=["pack", "scatter"]) def create_nodes( self, config: Dict, nodes: int, slots: int, constraint_expr: List[str], nodearray: Optional[str], vm_size: Optional[str], placement_group: Optional[str], strategy: Optional[str], exclusive: bool, exclusive_task: bool, software_configuration: Optional[Dict], node_attribute_overrides: Optional[Dict], output_columns: Optional[List[str]], output_format: OutputFormat, long: bool = False, keep_alive: bool = False, dry_run: bool = False, ) -> None: """ Create a set of nodes given various constraints. A CLI version of the nodemanager interface. """ if nodes < 0 and slots < 0: nodes = 1 if nodes > 0 and slots > 0: error("Please pick -n/--nodes or -s/--slots, but not both.") if not strategy: strategy = "pack" if slots > 0 else "scatter" if dry_run: config["lock_file"] = None config["read_only"] = True if placement_group: config["nodearrays"] = nodearrays = config.get("nodearrays", {}) nodearrays[nodearray] = na = nodearrays.get(nodearray, {}) na["placement_groups"] = pgs = na.get("placement_groups", []) if placement_group not in pgs: pgs.append(placement_group) cons_dict: Dict[str, Any] = {"node.exists": False} if exclusive_task: cons_dict["exclusive-task"] = True elif exclusive: cons_dict["exclusive"] = True if nodearray: cons_dict["node.nodearray"] = nodearray if vm_size: cons_dict["node.vm_size"] = vm_size # none is also a valid placement group cons_dict["node.placement_group"] = placement_group writer = io.StringIO() self.validate_constraint(config, constraint_expr, writer) validated_cons = json.loads(writer.getvalue()) if not isinstance(validated_cons, list): validated_cons = [validated_cons] unparsed_cons = validated_cons + [cons_dict] parsed_cons = get_constraints(unparsed_cons) node_mgr = self._node_mgr(config) result = node_mgr.allocate( parsed_cons, node_count=nodes, slot_count=slots, allow_existing=False, assignment_id="create_nodes()", ) if result: for node in result.nodes: node.keep_alive = keep_alive if software_configuration: node.node_attribute_overrides[ "Configuration" ] = node.node_attribute_overrides.get("Configuration", {}) node.node_attribute_overrides["Configuration"].update( software_configuration ) if node_attribute_overrides: node.node_attribute_overrides.update(node_attribute_overrides) bootup_result = node_mgr.bootup() if bootup_result: # assert bootup_result.nodes demandprinter.print_demand( columns=output_columns or self._get_default_output_columns(config), demand_result=DemandResult( bootup_result.nodes, bootup_result.nodes, [], [] ), output_format=output_format, long=long, ) return else: error(str(bootup_result)) else: error(str(result)) def _node_name_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: self._get_example_nodes(parsed_args.config) output_prefix = "" if prefix.endswith(","): output_prefix = prefix return [output_prefix + x + "," for x in self.node_names] def _hostname_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: self._get_example_nodes(parsed_args.config) output_prefix = "" if "," in prefix: left, _right = prefix.rsplit(",", 1) output_prefix = left + "," return [output_prefix + x + "," for x in self.hostnames] def _nodearray_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: self._get_example_nodes(parsed_args.config) return list(set([x.nodearray for x in self.example_nodes])) def _output_columns_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: try: config = parsed_args.config if isinstance(config, list): config = load_config(*config) self._get_example_nodes(config) _print(action) _print(dir(action)) _print(parser) _print(dir(parser)) _print(parsed_args) _print(dir(parsed_args)) cmd = None if hasattr(parsed_args, "cmd"): cmd = getattr(parsed_args, "cmd") default_output_columns = self._get_default_output_columns(config, cmd) + [] for node in self.example_nodes: for res_name in node.resources: if res_name not in default_output_columns: default_output_columns.append(res_name) for meta_name in node.metadata: if meta_name not in default_output_columns: default_output_columns.append(meta_name) for prop in nodelib.QUERYABLE_PROPERTIES: if prop not in default_output_columns: default_output_columns.append(prop) output_prefix = "" if "," in prefix: rest_of_list = prefix[: prefix.rindex(",")] output_prefix = "{},".format(rest_of_list) return ["{}{},".format(output_prefix, x) for x in default_output_columns] # return ["{},".format(x) for x in default_output_columns] except Exception: traceback.print_exc(file=sys.__stderr__) raise def _vm_size_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: try: self._get_example_nodes(parsed_args.config) filtered_nodes = self.example_nodes if hasattr(parsed_args, "nodearray") and parsed_args.nodearray: filtered_nodes = [ n for n in self.example_nodes if n.nodearray == parsed_args.nodearray ] return list(set([x.vm_size for x in filtered_nodes])) except Exception: import traceback _print(traceback.format_exc()) raise def join_nodes_parser(self, parser: ArgumentParser) -> None: parser.set_defaults(read_only=False) self._add_hostnames(parser) self._add_nodenames(parser) def join_nodes( self, config: Dict, hostnames: List[str], node_names: List[str] ) -> None: """Adds selected nodes to the scheduler""" driver, demand_calc, nodes = self._find_nodes(config, hostnames, node_names) joined_nodes = driver.add_nodes_to_cluster(nodes) print("Joined the following nodes:") for n in joined_nodes or []: print(" ", n) def retry_failed_nodes_parser(self, parser: ArgumentParser) -> None: parser.set_defaults(read_only=False) def retry_failed_nodes(self, config: Dict) -> None: """Retries all nodes in a failed state.""" node_mgr = self._node_mgr(config) node_mgr.cluster_bindings.retry_failed_nodes() def remove_nodes_parser(self, parser: ArgumentParser) -> None: self._add_hostnames(parser) self._add_nodenames(parser) parser.add_argument("--force", action="store_true", default=False) parser.set_defaults(read_only=False) def remove_nodes( self, config: Dict, hostnames: List[str], node_names: List[str], force: bool = False, ) -> None: """Removes the node from the scheduler without terminating the actual instance.""" self.delete_nodes(config, hostnames, node_names, force, do_delete=False) def delete_nodes_parser(self, parser: ArgumentParser) -> None: self._add_hostnames(parser) self._add_nodenames(parser) parser.add_argument("--force", action="store_true", default=False) parser.set_defaults(read_only=False) def delete_nodes( self, config: Dict, hostnames: List[str], node_names: List[str], force: bool = False, do_delete: bool = True, ) -> None: """Deletes node, including draining post delete handling""" driver, demand_calc, nodes = self._find_nodes(config, hostnames, node_names) if not force: for node in nodes: if node.assignments: error( "%s is currently matched to one or more jobs (%s)." + " Please specify --force to continue.", node, node.assignments, ) if node.keep_alive and do_delete: error( "%s is marked as KeepAlive=true. Please exclude this.", node, ) if node.required: error( "%s is unmatched but is flagged as required." + " Please specify --force to continue.", node, ) drained_nodes = driver.handle_draining(nodes) or [] print("Drained the following nodes that have joined {}:".format(driver.name)) for n in drained_nodes: print(" ", n) if do_delete: result = demand_calc.delete(nodes) print("Deleting the following nodes:") for n in result.nodes or []: print(" ", n) removed_nodes = driver.handle_post_delete(nodes) or [] print("Removed the following nodes from {}:".format(driver.name)) for n in removed_nodes: print(" ", n) def default_output_columns_parser(self, parser: ArgumentParser) -> None: cmds = [ x for x in dir(self) if x[0].isalpha() and hasattr(getattr(self, x), "__call__") ] parser.add_argument("-d", "--command", choices=cmds) def default_output_columns( self, config: Dict, command: Optional[str] = None ) -> None: """ Output what are the default output columns for an optional command. """ self._get_default_output_columns(config) def_cols = self._default_output_columns(config, command) sys.stdout.write("# cli option\n") sys.stdout.write("--output-columns {}\n".format(",".join(def_cols))) sys.stdout.write("# json snippet for autoscale.json\n") sys.stdout.write('"default-output-columns": ') arg_parser = create_arg_parser(self.project_name, self) output_columns = {} if command else {"default": def_cols} assert arg_parser._subparsers assert arg_parser._subparsers._actions for action in arg_parser._subparsers._actions: if not action.choices: continue choices: Dict[str, Any] = action.choices # type: ignore for cmd_name, choice in choices.items(): # if they specified a specific command, filter for it if command and cmd_name != command: continue for action in choice._actions: if "--output-columns" in action.option_strings: output_columns[cmd_name] = self._default_output_columns( config, cmd_name ) json.dump({"output-columns": output_columns}, sys.stdout, indent=2) @abstractmethod def _default_output_columns( self, config: Dict, cmd: Optional[str] = None ) -> List[str]: ... def _get_default_output_columns( self, config: Dict, cmd_name: Optional[str] = None ) -> List[str]: cmd_name = cmd_name or traceback.extract_stack()[-2].name cmd_specified = config.get("output-columns", {}).get(cmd_name) if cmd_specified: return cmd_specified default_specified = config.get("output-columns", {}).get("default") if default_specified: return default_specified default_cmd = self._default_output_columns(config, cmd_name) if default_cmd: return default_cmd return self._default_output_columns(config) def nodes_parser(self, parser: ArgumentParser) -> None: self._add_output_columns(parser) self._add_output_format(parser) self._add_constraint_expr(parser) def nodes( self, config: Dict, constraint_expr: List[str], output_columns: List[str], output_format: OutputFormat, long: bool = False, ) -> None: """Query nodes""" writer = io.StringIO() self.validate_constraint(config, constraint_expr, writer=io.StringIO()) validated_constraints = writer.getvalue() driver = self._driver(config) output_columns = output_columns or self._get_default_output_columns(config) demand_calc, _ = self._demand_calc(config, driver) filtered = _query_with_constraints( config, validated_constraints, demand_calc.node_mgr.get_nodes() ) demand_result = DemandResult([], filtered, [], []) demandprinter.print_demand( output_columns, demand_result, output_format=output_format, long=long, ) def buckets_parser(self, parser: ArgumentParser) -> None: self._add_output_columns(parser) self._add_output_format(parser) self._add_constraint_expr(parser) def buckets( self, config: Dict, constraint_expr: List[str], output_format: OutputFormat, long: bool = False, output_columns: Optional[List[str]] = None, ) -> None: """Prints out autoscale bucket information, like limits etc""" writer = io.StringIO() self.validate_constraint(config, constraint_expr, writer=writer) node_mgr = self._node_mgr(config) specified_output_columns = output_columns output_format = output_format or "table" output_columns = output_columns or [ "nodearray", "placement_group", "vm_size", "vcpu_count", "pcpu_count", "memory", "available_count", ] if specified_output_columns is None: # fill in other columns for bucket in node_mgr.get_buckets(): for resource_name in bucket.resources: if resource_name not in output_columns: output_columns.append(resource_name) for bucket in node_mgr.get_buckets(): for attr in dir(bucket.limits): if attr[0].isalpha() and "count" in attr: value = getattr(bucket.limits, attr) if isinstance(value, int): bucket.resources[attr] = value bucket.example_node._resources[attr] = value filtered = _query_with_constraints( config, writer.getvalue(), node_mgr.get_buckets() ) demand_result = DemandResult([], [f.example_node for f in filtered], [], []) config["output_columns"] = output_columns demandprinter.print_demand( output_columns, demand_result, output_format=output_format, long=long, ) def limits_parser(self, parser: ArgumentParser) -> None: self._add_output_format(parser, default="json") def limits( self, config: Dict, output_format: OutputFormat, long: bool = False, ) -> None: """ Writes a detailed set of limits for each bucket. Defaults to json due to number of fields. """ node_mgr = self._node_mgr(config) output_format = output_format or "json" output_columns = [ "nodearray", "placement_group", "vm_size", "vm_family", "vcpu_count", "available_count", ] for bucket in node_mgr.get_buckets(): for attr in dir(bucket.limits): if attr[0].isalpha() and "count" in attr: value = getattr(bucket.limits, attr) if isinstance(value, int): bucket.resources[attr] = value bucket.example_node._resources[attr] = value for resource_name in bucket.resources: if resource_name not in output_columns: output_columns.append(resource_name) demand_result = DemandResult( [], [f.example_node for f in node_mgr.get_buckets()], [], [] ) demandprinter.print_demand( output_columns, demand_result, output_format=output_format, long=long, ) def config_parser(self, parser: ArgumentParser) -> None: parser.set_defaults(read_only=False) def config(self, config: Dict, writer: TextIO = sys.stdout) -> None: """Writes the effective autoscale config, after any preprocessing, to stdout""" driver = self._driver(config) driver.preprocess_config(config) json.dump(config, writer, indent=2) def validate_constraint_parser(self, parser: ArgumentParser) -> None: self._add_constraint_expr(parser) def validate_constraint( self, config: Dict, constraint_expr: List[str], writer: TextIO = sys.stdout, quiet: bool = False, ) -> Union[List, Dict]: """ Validates then outputs as json one or more constraints. """ ret: List = [] for expr in constraint_expr: value = json.loads(expr) if len(ret) == 0 or not isinstance(value, dict): ret.append(value) else: last = ret[-1] overlapped = set(value.keys()).intersection(set(last.keys())) if overlapped: # there are conflicts, so just add a new dictionary ret.append(value) else: # no conflicts, just update the last dictionary # and they will be anded together last.update(value) as_cons = get_constraints(ret) if not quiet: if len(as_cons) == 1: # simple case - just a single dictionary json_dump(as_cons[0], writer) else: json_dump(as_cons, writer) writer.write("\n") for cons in as_cons: sys.stderr.write(str(cons)) return ret def refresh_autocomplete(self, config: Dict) -> None: """Refreshes local autocomplete information for cluster specific resources and nodes.""" self._get_example_nodes(config, force=True) def _find_nodes( self, config: Dict, hostnames: List[str], node_names: List[str] ) -> Tuple[SchedulerDriver, DemandCalculator, List[Node]]: hostnames = hostnames or [] node_names = node_names or [] driver = self._driver(config) demand_calc = self._demand(config, driver) demand_result = demand_calc.finish() if hostnames == ["*"] or node_names == ["*"]: return driver, demand_calc, demand_result.compute_nodes by_hostname = partition_single( demand_result.compute_nodes, lambda n: n.hostname_or_uuid.lower() ) by_node_name = partition_single( demand_result.compute_nodes, lambda n: n.name.lower() ) found_nodes = [] for hostname in hostnames: if not hostname: error("Please specify a hostname") if hostname.lower() not in by_hostname: # it doesn't exist in CC, but we still want to delete it # from the cluster by_hostname[hostname.lower()] = SchedulerNode(hostname, {}) found_nodes.append(by_hostname[hostname.lower()]) for node_name in node_names: if not node_name: error("Please specify a node_name") if node_name.lower() not in by_node_name: error( "Could not find a CycleCloud node that has node_name %s." + " Run 'nodes' to see available nodes.", node_name, ) found_nodes.append(by_node_name[node_name.lower()]) return driver, demand_calc, found_nodes @property def autoscale_home(self) -> str: if os.getenv("AUTOSCALE_HOME"): return os.environ["AUTOSCALE_HOME"] return os.path.join("/opt", "cycle", self.project_name) def initconfig_parser(self, parser: ArgumentParser) -> None: parser.add_argument("--cluster-name", required=True) parser.add_argument("--username", required=True) parser.add_argument("--password") parser.add_argument("--url", required=True) default_home = self.autoscale_home parser.add_argument( "--log-config", default=os.path.join(default_home, "logging.conf"), dest="logging__config_file", ).completer = default_completer # type:ignore parser.add_argument( "--lock-file", default=os.path.join(default_home, "scalelib.lock") ).completer = default_completer # type:ignore parser.add_argument( "--default-resource", type=json.loads, action="append", default=[], dest="default_resources", ) parser.add_argument( "--idle-timeout", default=300, type=int, dest="idle_timeout" ) parser.add_argument( "--boot-timeout", default=1800, type=int, dest="boot_timeout" ) parser.add_argument( "--disable-default-resources", required=False, action="store_true", default=False, # help="Disables generation of default resources for ncpus,pcpus,ngpus,mem*b", ) self._initconfig_parser(parser) @abstractmethod def _initconfig_parser(self, parser: ArgumentParser) -> None: ... def initconfig(self, writer: TextIO = sys.stdout, **config: Dict) -> None: """Creates an initial autoscale config. Writes to stdout""" self._initconfig(config) for key in list(config.keys()): if "__" in key: parent, child = key.split("__") if parent not in config: config[parent] = {} config[parent][child] = config.pop(key) json.dump(config, writer, indent=2) @abstractmethod def _initconfig(self, config: Dict) -> None: ... def analyze_parser(self, parser: ArgumentParser) -> None: parser.add_argument("--job-id", "-j", required=True) parser.add_argument("--long", "-l", action="store_true", default=False) def analyze(self, config: Dict, job_id: str, long: bool = False,) -> None: """ Prints out relevant reasons that a job was not matched to any nodes. """ if not long: try: _, columns_str = os.popen("stty size", "r").read().split() except Exception: columns_str = "120" columns = int(columns_str) else: columns = 2 ** 31 ctx_handler = DefaultContextHandler("[demand-cli]") register_result_handler(ctx_handler) dcalc = self._demand(config, ctx_handler=ctx_handler) found_nodes = [] for node in dcalc.get_demand().compute_nodes: if job_id in node.assignments: found_nodes.append(node) if found_nodes: print("Job {} is assigned to the following nodes:".format(job_id)) for node in found_nodes: print(" ", node) return if long: jobs, _ = self._driver(config).read_jobs_and_nodes(config) jobs = [x for x in jobs if x.name == job_id] if jobs: sys.stdout.write("Job {}:\n".format(jobs[0].name)) json_dump(jobs[0].to_dict(), sys.stdout) sys.stdout.write("\n") key = "[Job {}]".format(job_id) if key not in ctx_handler.by_context: print("Unknown job id {}".format(job_id), file=sys.stderr) sys.exit(1) results = ctx_handler.by_context[key] for result in results: if isinstance(result, (EarlyBailoutResult, MatchResult)) and result: continue if not long and result.status == "CompoundFailure": continue if not result: whitespace = " " * max(1, 24 - len(result.status)) message_lines = result.message.splitlines() if len(message_lines) > 1: print() prefix = result.status + whitespace + ":" line_columns = max(20, columns - len(prefix) - 1) print(prefix, message_lines[0][:line_columns], end="") print() for line in message_lines[1:]: print(" " * len(prefix), line[:line_columns], end="") print() def _add_constraint_expr(self, parser: ArgumentParser) -> None: parser.add_argument( # type: ignore "--constraint-expr", "-C", default=[], type=constraint_type, action="append", ).completer = self._constraint_completer # type:ignore def _add_hostnames(self, parser: ArgumentParser) -> None: parser.add_argument( # type: ignore "-H", "--hostnames", type=str_list, default=[] ).completer = self._hostname_completer # type: ignore def _add_nodenames(self, parser: ArgumentParser) -> None: parser.add_argument( # type: ignore "-N", "--node-names", type=str_list, default=[] ).completer = self._node_name_completer # type: ignore def _add_output_columns(self, parser: ArgumentParser) -> None: parser.add_argument( # type: ignore "--output-columns", "-o", type=str_list ).completer = self._output_columns_completer # type: ignore parser.add_argument("--long", "-l", action="store_true", default=False) def _add_output_format( self, parser: ArgumentParser, default: OutputFormat = "table" ) -> None: parser.add_argument( "--output-format", "-F", default=default, type=parse_format, choices=["json", "table", "table_headerless"], ) def _constraint_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: try: return self.__constraint_completer(prefix, action, parser, parsed_args) except Exception as e: _print(str(e)) traceback.print_exc(file=sys.__stderr__) raise def __constraint_completer( self, prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: example_nodes = self._get_example_nodes(parsed_args.config) _print("prefix is", prefix) if "=" in prefix: default_values: List[str] = [] def _convert_default_value(default_value: Any) -> List[str]: if isinstance(default_value, ht.Size): if default_value.magnitude.lower()[0] in "bkm": default_value = default_value.convert_to("g") if isinstance(default_value, ht.Memory): default_value = "memory::%s" % str(default_value) else: default_value = "size::%s" % str(default_value) elif isinstance(default_value, bool): # to encourage use of json style bools and not python # as you would do when defining things in autoscale.json default_value = str(default_value).lower() == "true" return [ str(default_value).lower(), str(not default_value).lower(), ] else: _print("not size or bool", default_value) return [str(default_value)] if prefix.startswith("node."): attr = prefix[len("node.") :].split("=")[0] output_prefix = "node.{}=".format(attr) _print("output_prefix", output_prefix) if "," in prefix: _print("output_prefix2", output_prefix) rest_of_list = prefix[prefix.index("=") + 1 : prefix.rindex(",")] output_prefix = "node.{}={},".format(attr, rest_of_list) if attr == "vm_size": poss_vm_sizes = [] # let's use the example nodes' vm_sizes unless they are all unknown if example_nodes: known = [ x.vm_size for x in example_nodes if x.vm_size != "unknown" ] poss_vm_sizes = list(set(known)) if not poss_vm_sizes: poss_vm_sizes = [ x for x in vm_sizes.all_possible_vm_sizes() if not x.startswith("Basic_") ] ret = [ output_prefix + x for x in poss_vm_sizes if x and not x.startswith("Basic") ] return ret if attr == "vm_family": poss_vm_families = [] # same idea as vm_size if example_nodes: known = [ x.vm_family for x in example_nodes if x.vm_family != "unknown" ] poss_vm_families = list(set(known)) if not poss_vm_families: poss_vm_families = [ x for x in vm_sizes.all_possible_vm_families() if x and not x.startswith("basic_") ] return [output_prefix + x for x in poss_vm_families] if attr == "location": poss_locations = [] # same idea as vm_size if example_nodes: known = [ x.location for x in example_nodes if x.location != "unknown" ] poss_locations = list(set(known)) if not poss_locations: poss_locations = [x for x in vm_sizes.all_possible_locations()] return [output_prefix + x for x in poss_locations if x] _print("attr is", attr) for example_node in example_nodes: if hasattr(example_node, attr): _print("hasattr ", attr, str(getattr(example_node, attr))) default_value = getattr(example_node, attr) default_values.extend(_convert_default_value(default_value)) else: res_name = prefix.split("=")[0] output_prefix = "{}=".format(res_name) if "," in prefix: rest_of_list = prefix[prefix.index("=") + 1 : prefix.rindex(",")] output_prefix = "{}={},".format(res_name, rest_of_list) for example_node in example_nodes: if res_name in example_node.resources: default_values.extend( _convert_default_value(example_node.resources[res_name]) ) _print("Returning", [output_prefix + x for x in default_values]) return [output_prefix + x for x in default_values] from hpc.autoscale.node.node import QUERYABLE_PROPERTIES node_attrs = ["node.%s=" % x for x in QUERYABLE_PROPERTIES] resources = [] for example_node in example_nodes: for expr in ["%s=" % x for x in example_node.resources.keys()]: if expr not in resources: resources.append(expr) return sorted(node_attrs + resources) def _invoke_autocomplete(self, parser: ArgumentParser) -> None: try: import argcomplete argcomplete.autocomplete( parser, validator=fnmatch_validator, always_complete_options="long" ) except ImportError: pass NodeLike = typing.TypeVar("NodeLike", Node, NodeBucket) def _query_with_constraints( config: Dict, constraint_expr: str, targets: List[NodeLike] ) -> List[NodeLike]: constraints = _parse_constraint(constraint_expr) filtered: List[NodeLike] = [] append: NodeLike for targ in targets: satisfied = True for c in constraints: if isinstance(targ, Node): result = c.satisfied_by_node(targ) elif isinstance(targ, NodeBucket): result = c.satisfied_by_node(targ.example_node) else: raise TypeError( "Expected Node or NodeBucket, got {}".format(type(targ)) ) if not result: satisfied = False logging.warning(result) break if satisfied: assert isinstance(targ, (Node, NodeBucket)) filtered.append(targ) return filtered def _parse_constraint(constraint_expr: str) -> List[NodeConstraint]: try: if constraint_expr: constraint_parsed = json.loads(constraint_expr) else: constraint_parsed = [] except Exception as e: print( "Could not parse constraint as json '{}' - {}".format(constraint_expr, e), file=sys.stderr, ) sys.exit(1) if not isinstance(constraint_parsed, list): constraint_parsed = [constraint_parsed] return get_constraints(constraint_parsed) def default_completer( prefix: str, action: argparse.Action, parser: ArgumentParser, parsed_args: argparse.Namespace, ) -> List[str]: if isinstance(action.default, list): return action.default return [action.default] def create_arg_parser( project_name: str, module: Any, default_config: Optional[str] = None ) -> ArgumentParser: parser = ArgumentParser() sub_parsers = parser.add_subparsers() def csv_list(x: str) -> List[str]: return [x.strip() for x in x.split(",")] help_msg = io.StringIO() default_install_dir = os.path.join("/", "opt", "cycle", project_name) def add_parser( name: str, func: Callable, read_only: bool = True, skip_config: bool = False, default_config: Optional[str] = None, ) -> ArgumentParser: doc_str = (func.__doc__ or "").strip() doc_str = " ".join([x.strip() for x in doc_str.splitlines()]) help_msg.write("\n {:20} - {}".format(name, doc_str)) default_config = default_config or os.path.join( default_install_dir, "autoscale.json" ) if not os.path.exists(default_config): default_config = None new_parser = sub_parsers.add_parser(name) new_parser.set_defaults(func=func, cmd=name, read_only=read_only) if skip_config: return new_parser new_parser.add_argument( "--config", "-c", default=default_config, required=not bool(default_config), ).completer = default_completer # type: ignore return new_parser configure_parser_functions = {} for attr_name in dir(module): if attr_name[0].isalpha() and attr_name[0].lower(): if attr_name.endswith("_parser"): cli_name = attr_name[: -len("_parser")] configure_parser_functions[cli_name] = getattr(module, attr_name) else: attr = getattr(module, attr_name) if hasattr(attr, "__call__"): if hasattr(attr, "disabled") and getattr(attr, "disabled"): continue configure_parser_functions[attr_name] = lambda parser: 0 for cli_name, ap_func in configure_parser_functions.items(): func = getattr(module, cli_name) if hasattr(func, "disabled") and getattr(func, "disabled"): continue child_parser = add_parser( cli_name, func, skip_config=cli_name == "initconfig", default_config=default_config, ) ap_func(child_parser) parser.usage = help_msg.getvalue() if hasattr(module, "_invoke_autocomplete"): module._invoke_autocomplete(parser) return parser def main( argv: Iterable[str], project_name: str, module: Any, default_config: Optional[str] = None, ) -> None: parser = create_arg_parser(project_name, module, default_config) args = parser.parse_args(list(argv)) if not hasattr(args, "func") or not hasattr(args, "cmd"): parser.print_help() sys.exit(1) # parse list of config paths to a single config if hasattr(args, "config"): args.config = load_config(args.config) logging.initialize_logging(args.config) # if applicable, set read_only/lock_file if args.read_only: args.config["read_only"] = True args.config["lock_file"] = None kwargs = {} for k in dir(args): if k[0].islower() and k not in ["read_only", "func", "cmd"]: kwargs[k] = getattr(args, k) if hasattr(module, "_initialize") and hasattr(args, "config"): getattr(module, "_initialize")(args.cmd, args.config) try: args.func(**kwargs) except AssertionError: raise except Exception as e: print("Error '%s': See the rest in the log file" % str(e), file=sys.stderr) if hasattr(e, "message"): print(getattr(e, "message"), file=sys.stderr) logging.debug("Full stacktrace", exc_info=sys.exc_info()) sys.exit(1) def fnmatch_validator(keyword_to_check_against: str, current_input: str) -> bool: if "*" not in current_input: current_input = current_input + "*" return fnmatch(keyword_to_check_against, current_input)
class ElfDynamic: """ An object to represent an Elf dynamic entry. (linker/loader directives) """ has_string = [DT_NEEDED, DT_SONAME] def __init__(self, bytes=None): self.name = "" def __repr__(self): name = self.getName() if not name: name = hex(self.d_value) return "%s %s" % (name, self.getTypeName()) def getName(self): return self.name def setName(self, name): self.name = name def getTypeName(self): return dt_types.get(self.d_tag, "Unknown: %s"%hex(self.d_tag))
// --------------------------------------------------------------------- // // Copyright (c) 2014 - 2020 by the IBAMR developers // All rights reserved. // // This file is part of IBAMR. // // IBAMR is free software and is distributed under the 3-clause BSD // license. The full text of the license can be found in the file // COPYRIGHT at the top level directory of IBAMR. // // --------------------------------------------------------------------- /////////////////////////////// INCLUDES ///////////////////////////////////// #include "ibtk/CartCellDoubleLinearCFInterpolation.h" #include "BoundaryBox.h" #include "BoxArray.h" #include "CartesianPatchGeometry.h" #include "CellData.h" #include "CoarseFineBoundary.h" #include "GridGeometry.h" #include "Patch.h" #include "PatchHierarchy.h" #include "PatchLevel.h" #include "RefineOperator.h" #include "tbox/Array.h" #include <memory> #include <set> #include <string> #include <vector> #include "ibtk/namespaces.h" // IWYU pragma: keep // FORTRAN ROUTINES #if (NDIM == 2) #define CC_LINEAR_NORMAL_INTERPOLATION_FC IBTK_FC_FUNC(cclinearnormalinterpolation2d, CCLINEARNORMALINTERPOLATION2D) #endif #if (NDIM == 3) #define CC_LINEAR_NORMAL_INTERPOLATION_FC IBTK_FC_FUNC(cclinearnormalinterpolation3d, CCLINEARNORMALINTERPOLATION3D) #endif // Function interfaces extern "C" { void CC_LINEAR_NORMAL_INTERPOLATION_FC(double* U, const int& U_gcw, const int& ilower0, const int& iupper0, const int& ilower1, const int& iupper1, #if (NDIM == 3) const int& ilower2, const int& iupper2, #endif const int& loc_index, const int& ratio, const int* blower, const int* bupper); } /////////////////////////////// NAMESPACE //////////////////////////////////// namespace IBTK { /////////////////////////////// STATIC /////////////////////////////////////// namespace { static const int REFINE_OP_STENCIL_WIDTH = 1; static const int GHOST_WIDTH_TO_FILL = 1; } // namespace /////////////////////////////// PUBLIC /////////////////////////////////////// CartCellDoubleLinearCFInterpolation::~CartCellDoubleLinearCFInterpolation() { clearPatchHierarchy(); return; } // ~CartCellDoubleLinearCFInterpolation void CartCellDoubleLinearCFInterpolation::setPhysicalBoundaryConditions(Patch<NDIM>& /*patch*/, const double /*fill_time*/, const IntVector<NDIM>& /*ghost_width_to_fill*/) { // intentionally blank return; } // setPhysicalBoundaryConditions IntVector<NDIM> CartCellDoubleLinearCFInterpolation::getRefineOpStencilWidth() const { #if !defined(NDEBUG) TBOX_ASSERT(d_refine_op->getStencilWidth().max() <= REFINE_OP_STENCIL_WIDTH); #endif return REFINE_OP_STENCIL_WIDTH; } // getRefineOpStencilWidth void CartCellDoubleLinearCFInterpolation::preprocessRefine(Patch<NDIM>& /*fine*/, const Patch<NDIM>& /*coarse*/, const Box<NDIM>& /*fine_box*/, const IntVector<NDIM>& /*ratio*/) { // intentionally blank return; } // preprocessRefine void CartCellDoubleLinearCFInterpolation::postprocessRefine(Patch<NDIM>& fine, const Patch<NDIM>& coarse, const Box<NDIM>& fine_box, const IntVector<NDIM>& ratio) { for (const auto& patch_data_index : d_patch_data_indices) { d_refine_op->refine(fine, coarse, patch_data_index, patch_data_index, fine_box, ratio); } return; } // postprocessRefine void CartCellDoubleLinearCFInterpolation::setConsistentInterpolationScheme(const bool consistent_type_2_bdry) { d_consistent_type_2_bdry = consistent_type_2_bdry; return; } // setConsistentInterpolationScheme void CartCellDoubleLinearCFInterpolation::setPatchDataIndex(const int patch_data_index) { std::set<int> patch_data_indices; patch_data_indices.insert(patch_data_index); setPatchDataIndices(patch_data_indices); return; } // setPatchDataIndex void CartCellDoubleLinearCFInterpolation::setPatchDataIndices(const std::set<int>& patch_data_indices) { d_patch_data_indices.clear(); d_patch_data_indices = patch_data_indices; return; } // setPatchDataIndices void CartCellDoubleLinearCFInterpolation::setPatchDataIndices(const ComponentSelector& patch_data_indices) { std::set<int> patch_data_index_set; for (int l = 0; l < patch_data_indices.getSize(); ++l) { if (patch_data_indices.isSet(l)) { const int patch_data_index = l; patch_data_index_set.insert(patch_data_index); } } setPatchDataIndices(patch_data_index_set); return; } // setPatchDataIndices void CartCellDoubleLinearCFInterpolation::setPatchHierarchy(Pointer<PatchHierarchy<NDIM> > hierarchy) { #if !defined(NDEBUG) TBOX_ASSERT(hierarchy); #endif if (d_hierarchy) clearPatchHierarchy(); d_hierarchy = hierarchy; const int finest_level_number = d_hierarchy->getFinestLevelNumber(); d_cf_boundary.resize(finest_level_number + 1); const IntVector<NDIM>& max_ghost_width = getRefineOpStencilWidth(); for (int ln = 0; ln <= finest_level_number; ++ln) { d_cf_boundary[ln] = CoarseFineBoundary<NDIM>(*d_hierarchy, ln, max_ghost_width); } Pointer<GridGeometry<NDIM> > grid_geom = d_hierarchy->getGridGeometry(); const BoxArray<NDIM>& domain_boxes = grid_geom->getPhysicalDomain(); d_domain_boxes.resize(finest_level_number + 1); d_periodic_shift.resize(finest_level_number + 1); for (int ln = 0; ln <= finest_level_number; ++ln) { Pointer<PatchLevel<NDIM> > level = d_hierarchy->getPatchLevel(ln); const IntVector<NDIM>& ratio = level->getRatio(); d_domain_boxes[ln] = BoxArray<NDIM>(domain_boxes); d_domain_boxes[ln].refine(ratio); d_periodic_shift[ln] = grid_geom->getPeriodicShift(ratio); } return; } // setPatchHierarchy void CartCellDoubleLinearCFInterpolation::clearPatchHierarchy() { d_hierarchy.setNull(); d_cf_boundary.clear(); d_domain_boxes.clear(); d_periodic_shift.clear(); return; } // clearPatchHierarchy void CartCellDoubleLinearCFInterpolation::computeNormalExtension(Patch<NDIM>& patch, const IntVector<NDIM>& ratio, const IntVector<NDIM>& /*ghost_width_to_fill*/) { #if !defined(NDEBUG) TBOX_ASSERT(d_hierarchy); TBOX_ASSERT(!d_consistent_type_2_bdry); TBOX_ASSERT(ratio.min() == ratio.max()); #endif // Ensure that the fine patch is located on the expected destination level; // if not, we are not guaranteed to have appropriate coarse-fine interface // boundary box information. if (!patch.inHierarchy()) return; // Get the co-dimension 1 cf boundary boxes. const int patch_num = patch.getPatchNumber(); const int patch_level_num = patch.getPatchLevelNumber(); #if !defined(NDEBUG) Pointer<PatchLevel<NDIM> > level = d_hierarchy->getPatchLevel(patch_level_num); TBOX_ASSERT(&patch == level->getPatch(patch_num).getPointer()); #endif const Array<BoundaryBox<NDIM> >& cf_bdry_codim1_boxes = d_cf_boundary[patch_level_num].getBoundaries(patch_num, 1); const int n_cf_bdry_codim1_boxes = cf_bdry_codim1_boxes.size(); // Check to see if there are any co-dimension 1 coarse-fine boundary boxes // associated with the patch; if not, there is nothing to do. if (n_cf_bdry_codim1_boxes == 0) return; // Get the patch data. for (const auto& patch_data_index : d_patch_data_indices) { Pointer<CellData<NDIM, double> > data = patch.getPatchData(patch_data_index); #if !defined(NDEBUG) TBOX_ASSERT(data); #endif const int U_ghosts = (data->getGhostCellWidth()).max(); #if !defined(NDEBUG) if (U_ghosts != (data->getGhostCellWidth()).min()) { TBOX_ERROR("CartCellDoubleLinearCFInterpolation::computeNormalExtension():\n" << " patch data does not have uniform ghost cell widths" << std::endl); } #endif const int data_depth = data->getDepth(); const IntVector<NDIM> ghost_width_to_fill = GHOST_WIDTH_TO_FILL; Pointer<CartesianPatchGeometry<NDIM> > pgeom = patch.getPatchGeometry(); const Box<NDIM>& patch_box = patch.getBox(); for (int k = 0; k < n_cf_bdry_codim1_boxes; ++k) { const BoundaryBox<NDIM>& bdry_box = cf_bdry_codim1_boxes[k]; const Box<NDIM> bc_fill_box = pgeom->getBoundaryFillBox(bdry_box, patch_box, ghost_width_to_fill); const unsigned int location_index = bdry_box.getLocationIndex(); for (int depth = 0; depth < data_depth; ++depth) { double* const U = data->getPointer(depth); const int r = ratio.min(); CC_LINEAR_NORMAL_INTERPOLATION_FC(U, U_ghosts, patch_box.lower(0), patch_box.upper(0), patch_box.lower(1), patch_box.upper(1), #if (NDIM == 3) patch_box.lower(2), patch_box.upper(2), #endif location_index, r, bc_fill_box.lower(), bc_fill_box.upper()); } } } return; } // computeNormalExtension /////////////////////////////// PROTECTED //////////////////////////////////// /////////////////////////////// PRIVATE ////////////////////////////////////// /////////////////////////////// NAMESPACE //////////////////////////////////// } // namespace IBTK //////////////////////////////////////////////////////////////////////////////
/** * Serialize the tree to the file * @param randomAccessFile * @throws StorageManagerException */ public void writeToStream(final RandomAccessFile randomAccessFile) throws StorageManagerException { nodesQueue.clear(); try { randomAccessFile.write(SSTableConst.MAGIC_BYTES_SPATIAL_RTREE_INDEX); final ByteBuffer nodeSizeBytes = DataEncoderHelper.intToByteBuffer(maxNodeSize); randomAccessFile.write(nodeSizeBytes.array()); nodesQueue.push(rootNode); while(! nodesQueue.isEmpty()) { final RTreeDirectoryNode node = nodesQueue.pop(); handleNewNode(randomAccessFile, node); } updateIndexNodePointer(randomAccessFile); } catch (IOException e) { throw new StorageManagerException(e); } }
<reponame>AlexeySushkov/Exonum-Demo // Copyright 2018 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Tests that compare exonum collections and corresponding rust types using proptest. extern crate exonum; #[macro_use] extern crate proptest; extern crate modifier; use modifier::Modifier; use std::collections::HashMap; mod list_index; mod map_index; mod proof_list_index; mod proof_map_index; // Max size of the generated sequence of actions. const ACTIONS_MAX_LEN: usize = 100; #[derive(Debug, Clone)] enum ListAction<V> { Push(V), Pop, Extend(Vec<V>), // Applied with argument modulo `collection.len()`. Truncate(u64), // Applied to index modulo `collection.len()`. Set(u64, V), Clear, MergeFork, } #[derive(Debug, Clone)] enum MapAction<K, V> { // Should be applied to a small subset of keys (like modulo 8 for int). Put(K, V), // Should be applied to a small subset of keys (like modulo 8 for int). Remove(K), Clear, MergeFork, } impl<V> Modifier<Vec<V>> for ListAction<V> { fn modify(self, list: &mut Vec<V>) { match self { ListAction::Push(val) => { list.push(val); } ListAction::Pop => { list.pop(); } ListAction::Extend(vec) => { list.extend(vec); } ListAction::Truncate(size) => { let len = list.len(); if len > 0 { list.truncate(size as usize % len); } } ListAction::Set(idx, val) => { let len = list.len(); if len > 0 { list[idx as usize % len] = val; } } ListAction::Clear => { list.clear(); } _ => unreachable!(), } } } impl<V> Modifier<HashMap<u8, V>> for MapAction<u8, V> { fn modify(self, map: &mut HashMap<u8, V>) { match self { MapAction::Put(k, v) => { let k = k % 8; map.insert(k, v); } MapAction::Remove(k) => { let k = k % 8; map.remove(&k); } MapAction::Clear => { map.clear(); } _ => unreachable!(), } } } impl<V> Modifier<HashMap<[u8; 32], V>> for MapAction<[u8; 32], V> { fn modify(self, map: &mut HashMap<[u8; 32], V>) { match self { MapAction::Put(mut k, v) => { k[0] = k[0] % 8; map.insert(k, v); } MapAction::Remove(mut k) => { k[0] = k[0] % 8; map.remove(&k); } MapAction::Clear => { map.clear(); } _ => unreachable!(), } } }
“Everything is Either Sent by God or Used by God”: An Exploratory Study on the Impact of COVID-19 Upon the Religious Lives of Black Families Living with Dementia The purpose of this research study was to explore the impact of COVID-19 on church engagement for Black families affected by dementia in the USA. Semi-structured interviews were conducted with current caregivers, church leaders, and persons with dementia (n = 16). The following themes emerged: (a) Ability to continue religious practices, (b) Increased church engagement, (c) Importance of fellowship, (d) Role of technology, and (e) New normal. As the Internet becomes the new church building, online worship services enabled more families affected by dementia to engage. Many church leaders expressed the intent of continuing to provide online worship services post-pandemic. Families highlighted their need to fellowship with other congregants. Technology was perceived as a double-edged sword serving as both a motivator and a barrier to religious engagement. These findings will support faith leaders in understanding the needs of their congregants during the COVID-19 pandemic, such as allowing families living with dementia to continue engaging in religious practices and living in meaningful ways. Introduction In 2020, the unprecedented COVID-19 pandemic caused 1,798,154 deaths worldwide (Worldometer, 2020). One demographic group that is particularly susceptible to the SARS-CoV-2 virus is older adults with preexisting conditions like dementia 1 3 (D'Adamo et al., 2020). Persons living with dementia (PLWDs) are twice as likely to contract COVID-19 than individuals without dementia, partly because memory impairment associated with dementia may interfere with their abilities to adhere to COVID-19 preventative measures (Wang et al., 2021). Dementia is also a strong predictor of COVID-related deaths due to aging and chronic comorbidities such as cerebrovascular diseases (Abootalebi et al., 2020). PLWDs may have experienced worse dementia symptoms, such as declines in cognitive function, aggravation of behavioral symptoms, and impairment in motor function, in consequence of the pandemic quarantine (Rainero et al., 2021). The COVID-19 pandemic has also affected the psychological well-being of PLWDs and their caregivers. The requirements for social distancing have placed a physical barrier between families living with dementia and their support system. The lack of in-person support may induce an increasing sense of loneliness and depression in PLWDs and caregivers (Grenade & Boldy, 2008;Ye & Zhang, 2019). Furthermore, multiple studies suggest such psychological conditions can potentially increase the risk of delirium and aggressive behaviors for PLWDs, which may result in self-harm or injuries to others (LaHue et al., 2020;Kales et al., 2019). In addition, caring for a PLWD during a pandemic may lead to higher levels of burden, psychological morbidity, and financial stress (Rainero et al., 2021). During the pandemic, Black American families living with dementia may have experienced an increasingly high dual burden with being at high risk for COVID-19 as well as dementia. Black American older adults have the highest rate of dementia illnesses and are twice as likely to be diagnosed with Alzheimer's disease or other forms of dementia than White Americans of the same age (Alzheimer's Association, 2021). Black Americans across the USA have also been disproportionately affected by COVID-19 (Millett et al., 2020). They are three times more likely to be hospitalized and two times more likely to die from COVID-19 than White Americans (Center for Disease Control & Prevention, 2021). As a result of the COVID-19 pandemic, Black Americans are particularly vulnerable to negative mental consequences and suffer from stress, anxiety, and depression (Ibrahimi et al., 2020;Novacek et al., 2020). Existing health disparities have heightened stress and affected the emotional well-being of Black families living with dementia (Phillips et al., 2020). While there is no perfect solution to the dual burden of dementia and COVID-19, faith and religiosity may serve as a protective factor against stressors for PLWDs and caregivers (Koenig, 2012). Religious coping encompasses a wide array of religious behaviors, including praying, seeking support from congregations and clergies, and attending religious services (Chatters et al., 2008). Engaging in religious activities may help Black individuals alter the psychological perceptions and consequences of unfavorable situations (Taylor et al., 2004). Traditionally, churches have been the backbone of the Black community by providing much-needed support to families in distress (Assari, 2013). However, physical closures of Black churches during the pandemic may have forced families to opt for a home-based approach for practicing their faith and religious traditions (McGowan, 2020). A few studies have highlighted that the closure of churches due to the COVID-19 pandemic has negatively impacted the expression of spirituality and mental health of individuals (Chatters et al., 2020;DeSouza et al., 2021;Galiatsatos et al., 2020;Imber-Black, 2020). However, the extent of those effects on families living with dementia, especially for Black families, is unknown (Bavel et al., 2020;Wang et al., 2020). Therefore, the purpose of this research was to explore the impact of COVID-19 on the religious lives of Black families living with dementia. The findings from this study may identify how the spiritual needs, of Black PLWDs and their caregivers, were or were not addressed during the COVID-19 pandemic. Methods The phenomena of interest for this study were the firsthand experiences of Black PLWDs, dementia caregivers, and church leaders as they strive to continue their religious practices in the middle of a pandemic. This current study extended from a larger qualitative research project aiming to design faith-based home activities for families affected by dementia. For this study, we employed a qualitative descriptive design to investigate the COVID-related impact on religious engagement for Black families living with dementia. A qualitative descriptive design is characterized by low levels of interpretation from researchers, which makes it useful to gain firsthand insights into a poorly understood phenomenon (Colorafi & Evans, 2016;Sandelowski, 2000). Institutional approval was received for this study (IRB# 00115228). Purposeful, non-probability sampling strategies were used to recruit a convenience sample of participants that were members of predominantly Black churches for the main research project. Participants from the main project were included in this study if they met the following inclusion criteria: (1) identifying as Black or African American; (2) identifying as a PLWD, current caregiver for a PLWD, and/or church leader during the pandemic; (3) being able to communicate in English; (4) having access to technology for video conferencing. Caregivers who provided care to a family member with dementia before March 2020 were not included in this study. Data Collection Black family caregivers, PLWDs, and church leaders in the metropolitan area of Georgia and Illinois were interviewed for this study. Verbal informed consents were obtained from participants or a legal representative for those deemed ineligible to provide consent before data collection. Participants were asked to participate in a semi-structured, one-on-one interview lasting up to one hour. Interviews were conducted between July 2020 and September 2020. All interviews were conducted remotely through videoconference by the PI (F.E.). The questions for the interviews were reviewed by experts and piloted by the research team prior to being used in the study. Participants were queried on the unique spiritual needs and difficulties of families living with dementia during the COVID-19 pandemic. Caregivers and PLWDs were asked (1) how has the pandemic impacted their participation in religious practices, (2) what religious practices were they unable to engage in during the pandemic, (3) what challenges did they face while practicing their religion during the pandemic, (4) what support did they receive from their faith communities. Church leaders were queried on (1) changes in format when delivering religious practices and receiving feedback from their members, (2) how has the church supported members of their congregation affected by dementia during the pandemic. Additional information was collected using probes during interviews. Interviews were audio-recorded and transcribed verbatim. Pseudonyms and alpha-numeric codes (P1, P2, P3, etc.) were used to maintain the anonymity of participants. Field notes, such as observations and immediate impressions, were taken by the researcher during each interview. Data Analysis All interview data, including field notes and transcripts, were analyzed using thematic analysis and inductive coding (Braun & Clarke, 2006). Researchers first read all field notes to familiarize themselves with the data. The field notes of different researchers were compared to reveal inconsistencies in interpretation. Transcripts were then reviewed independently by members of the research team (F.E., Y.G., and M.S.) to identify codes and resolve any discrepancies among the individually developed codes. Microsoft Word and Excel were used to organize data and extract statements in the narrative interview. Fourteen codes were identified. Figure 1 illustrates the grouping of codes which resulted in five thematic categories. To enhance the rigor and credibility of data collection and analysis, participants were provided with a summary of findings to verify the interpretive accuracy (Carlson, 2010). Results of the study were shared with the project design team established by the PI for feedback (Lincoln & Guba, 1986). The project design team directly experienced the phenomenon under investigation by identifying as a Black American Christian and either a caregiver, PLWD, or church leader. A journal was maintained by the interviewer where biases, preconceptions, and thought processes were recorded throughout the study to achieve research reflexivity. The interviewer's reflexive journal included: (1) assumptions regarding how churches can support PLWDs during a pandemic; (2) role as Black American Christian; (3) personal value system; and (4) potential role conflicts with participants (Tufford & Newman, 2012). In addition, an audit trail including the context of the study, data analysis protocols, and methodological decisions was employed to achieve confirmability of the qualitative study findings (Carcary, 2009;Rodgers & Cowles, 1993). Themes Five thematic categories emerged from analysis of transcripts: (1) Ability to continue religious practices, (2) Increased church engagement, (3) Importance of fellowship, (4) Role of technology, and (5) New normal. Ability to Continue Religious Practices Participants shared that families had remained connected to their faith due to the ability to continue with religious traditions and practices remotely. Since the physical closure of Black churches, worship services were adjusted to online delivery. Most churches utilized various video conferencing platforms (e.g., Facebook Live, Zoom, and church websites) to invite congregants to engage at home. Many caregivers preferred this online transition as it enabled them to fully participate and receive more spiritual nourishment. One caregiver described her experience worshiping from home: I feel less distracted. You know how when you in church and everybody jumping up, shouting, and hollering and stuff, and you can't really get the sermon in? But now that he's on the screen, and it's nobody in the church but him, it's just like he's talking just to me. That's… that's the way it seems to me. But I sort of… I, I like it (P5). As churches adjusted delivering services online, the length of online worship services was also reduced significantly. Caregivers believed condensed services were ideal for PLWDs due to their limited attention span. Likewise, a church leader received positive feedback from her congregants: They liked that it's shorter. They liked that it's trying… it's-and, and I'll just use the term, truncated…We needed to get to the meat. So, they really appreciated that we go to the meat of the-the meat of the issue, um, which was getting there and hearing the word, worshipping, fellowshipping, and getting out (P3). In addition to the provision of online services, some churches offered drive-in or outdoor services for special occasions. Church leaders received positive feedback from the community as "they indicated that they loved it, and that they want it to happen again and again while the weather is nice" (P11). However, one church (3) 50-59 6 (1) 60-69 44 (7) 70 or older 25 (4) What best describes you? (Select all that apply) Persons living with dementia 19 (3) Caregiver to persons living with dementia 44 (7) Church leader 50 (8) Denomination African Methodist Episcopal (AME) 19 (3) Baptist 31 (5) Holiness 6 (1) Jehovah's Witness 6 (1) Presbyterian 13 (2) United Methodist 6 (1) United Church of Christ 6 (1) Non-denominational 13 (2) leader pointed out the limitation of drive-in services for families living with dementia as she shared, "we actually did have some caregivers, a son, bring his dad. You know, and he was just excited. But that's not always possible due to physical health" (P3). Furthermore, participants recognized the importance of continuing religious practices such as the Holy Communion during the COVID-19 pandemic. One church leader summarized, "for people who are just used to having communion, their world is gone, shut down if they don't have communion" (P3). To meet the needs of the families during the pandemic, churches have been delivering consecrated communion packages to families' doorsteps. A pastor provided a detailed description of how families living with dementia are receiving communion at home: We give communion… we give you the bulletin… it outlines and asks whoever the head of the household is to be the person who does the communion… We're all priests of our household. And, um, but I have consecrated it before we give it out. And that they actually take it and then they physically give it to the other members of their household. And that is something that is very familiar to those who have dementia because they're used to receiving communion from someone that they see as an authority for them (P3). Families living with dementia were able to remain connected to God and to their faith. One caregiver stated, "it hasn't stopped me from praising The Lord. It hasn't stopped me from going to church. It hasn't stopped me from knowing that I need to know The Lord on Sunday morning" (P2). Additionally, one participant believed that the adjustment in church services "has just been another way that God has showed us how to worship" (P5). One caregiver echoed this sentiment as she extended her gratitude to God: I'm thankful; I just get so overwhelmed sometimes you know, with a good, in a good way, you know, thinking about, um-My pastor use to always say, "Everything is either sent by God or used by God." You know? He doesn't necessarily send a tornado, you know, that's just part of nature and things are, you know, but he can use it. You know, just like this COVID. You know, it's being used for some good stuff (P14). Increased Church Engagement Participants reported increased engagement in religious activities since the COVID-19 pandemic. As the pandemic pushed many Black churches online, caregivers and PLWDs unable to attend in-person worship services pre-pandemic were now able to actively engage in all church events. A caregiver, whose spouse had been homebound for years due to dementia, discussed the beneficial changes of churches adapting and hosting more of their services online: So, about four years ago, she stopped attending the meeting. But I will say now with Zoom, you know, we have our midweek meeting at 7:30 like we usually do, and our Sunday meetings at 10 like we usually do, and we have 'em on Zoom now. So, she's able to sit in. Sometimes she, you know, she's agitated and pacing… and sometimes she's sleep… She's not able to participate, but she's able to view it. And the friends are always glad to see her (P3). Church leaders also reported a significant growth in online engagement of church activities during the pandemic. The increased online engagement potentially reflected the increased attendance by PLWDs and caregivers. Many participants attributed this growth to the flexibility of time that recorded services provide. The replay feature of church services was especially beneficial for caregivers with busy schedules. As one pastor explained, "if they weren't able to get to the service, um, then they were able to go back and watch the service later" (P3). This same pastor also substantiated this growing trend by utilizing data gathered from her church's online platform: We have more members who were not coming to service online, and connected-not just in the Sunday service, but also in Bible study, which is phenomenal for me. But our numbers in the sanctuary would be anywhere from 90 to 150 on a Sunday, right? But every… every… all of the data from the, um, different platforms-like it's usually, at minimum, 450 people, somewhere in there every… you know, on, on a Sunday (P3). The increase in religious engagement was also manifested in the daily aspects of the lives of PLWDs and caregivers. Caregivers shared that they started to establish new religious routines or unique family traditions during the pandemic. One caregiver disclosed that her family had designated every Sunday to playing Gospel music since the beginning of quarantine. She shared, "we would go out on the deck and put on the gospel. Just play just gospel all day long. And she is content and we sit and talk and just listen to the gospel all day on Sunday" (P1). For another caregiver, social isolation enabled her to devote private time into developing new routines, such as Bible reading and prayer. Participants also recognized the role churches played in promoting religious engagement among congregants during the pandemic. Churches developed new outreach programs to help families living with dementia remain spiritually connected. Multiple church leaders mentioned the use of communication structures like texting groups or calling trees to check on their congregants. One pastor emphasized the outreach system for youths to contact older congregants and dementia caregivers: We started like a phone call log where we would just call and check up on the older folks, before stores were opening and things like that. Finding out what they needed. Um, and it was interesting 'cause so many of them were active, but checking on the ones who are caregivers, like, do you need a break, do you need us to run to the store (P7). Aside from telephone communication, some churches also increased their effort to physically support their congregants by delivering care packages to their doorsteps. The packages often contained monthly church bulletins, exercise tips, church service updates, communion kits, and resources for food and household supplies. Church leaders reported this allowed homebound members to see a familiar face. Importance of Fellowship Despite all the strategies churches used to help congregants stay connected online, many participants expressed they missed the fellowship in-person worship services had offered. Traditionally, churches had served as a place for congregants to socialize with their friends and neighbors. One caregiver caring for her 79-year-old mother highlighted the importance of physical contacts and faceto-face socialization for older adults in particular: The only reason I said people go to church is to physically fellowship and say, "hey, how you doing?", that kind of thing. And for the older generation, that's the only way they know how to do it. They don't know how to do it virtual. They've got to have that physical contact to do that… And that's a part of how they were raised and going to church. You go every Sunday. We're going to see how Mr. Jones is doing and maybe barter and get the beans and you can take some sweet potatoes. And that bartering is what they grew up with (P13). Missing physical touch was a sentiment that was shared between many participants. Another caregiver expressed, "I miss my church family. I miss seeing them. I miss touching 'cause I'm a hugger. I miss that" (P16). In addition, another caregiver and PLWD dyad pointed out that the in-person church fellowship served as a safe haven for emotional expression. As they shared, "It (online worship) is not the same…It's not the fellowship…touching and reaching out…and running and crying and sobbing" (P2). Furthermore, one participant suggested for PLWDs, the physical space of a church can evoke memories of them interacting with their friends and fellow congregants: My mom had a seat in her church that she sat in for…forever. And so the people in the pews in front of her, the people in the pews behind her, they, they were all there. And then, like when, so you, you would chat with people before the service (P13). In response to the absence of fellowship, one participant believed that friendships and companionship, previously established through the church, can still be maintained through remote connections such as telephone calls: But that's been one of the big things, you know, for me is realizing, you don't even really miss going to church every Sunday, you know. I stay so busy… I don't necessarily miss the fellowship 'cause I still call my friends. I still call and check on people, and they still call and check on me (P14). Role of Technology Participants also discussed the role of technology as both a motivator and a barrier for religious engagement. One caregiver believed technology accelerates innovation. With the help of technology, she was able to attend multiple services from different churches during the pandemic: Like so many people, I've been able to visit more churches, you know, online you know, and, and I love it! I absolutely love that! I still look at my church as well, but I really do-I like innovative things, you know, that's why I like Bill Roberts, you know, um, really innovative. I like to see churches really involved in the community, you know, because we can still be a part of that, even with COVID, you know (P14). For a caregiver who is Jehovah's Witnesses, technology helped his wife living with dementia connect with other members around the world during the pandemic. Their denomination's global website encompassed diverse topics including songs, The Watchtower, COVID updates, prerecorded conventions, public talks, etc. According to him, "once you start … digging into the website you'll see that there's something in there for everybody, for every situation … for everything" (P6). The online community established through the Jehovah's Witnesses website helped his wife to "feel like she's a part of a worldwide organization instead of just, ah, you know, just she and I" (P6). On the other hand, some participants were concerned that technology may hinder older adults from accessing church services. Church leaders reported that their older congregants had experienced difficulties with using technology. One church leader also introduced her ministry's establishment of an outreach team to help with technology issues: So whether or not they could actually get online was then the concern. And so… when they would go out, they would make sure that the caregivers, many of whom themselves did not know how to connect … via the internet; they didn't have Facebook to watch Facebook Live. They didn't understand how to get on the website. They just were not technologically literate. And so, that team, then, was able to help…the household connect with faces at the church (P3). To address the problem of limited digital fluency, several participants received help from younger generations. One caregiver described the ways her grandson assisted him with setting up TV for worship services: When it first started, I did because aging people and electronics don't always agree with one another. But thank God they got young people in the house that know how to do that. So, they teach you. So, now I can get on by myself… the very first one, I had to come downstairs and sit down with Bruce and watch him on television. 'Cause Bruce put it on our TV in the living room (P2). Other participants pointed out some families did not have the equipment to access technology, including TV, computer, and the Internet. Churches were seeking ways to accommodate those families' needs: Journal of Religion and Health (2021) 60:3193-3208 Two of our members … could not watch online, so that, if the person is not able to have connection in that manner, it's a technical aspect that was a concern. So, we actually purchased two TVs for two of our members that has the DVD player in it, so that they can watch the DVDs because they don't have internet…So, then we're working on what we do on Sundays is recording that and making sure that we have DVDs for them to watch those services, 'cause our services are pre-recorded (P10). New Normal All participants agreed that the COVID-19 pandemic had forever changed the way families and ministries worship. As one participant summarized, "there may be a return to the sanctuary, but there's never a go back to what we used to" (P8). All church leaders interviewed expressed the intent of continuing online worship services in the post-pandemic era. One church leader shared the vision of her ministry: One of the things that we are committed to is when-not if-but when we're able to go back into the church space, how do we continue with our live streaming so that those family members who might've felt like, "How do I get home? How do I get to church? I can't leave my family member. Oh, I can't watch it until next week when it's finally up on the… on the website," they can fully participate. And so, the pandemic has helped us be able to provide service to those who were home ridden and weren't able to before (P7) Moreover, numerous churches invested in improving their information technology equipment to deliver high-quality remote services in the long run: As we built our budget, or we're building our budget for this next conference year, we're including technology in it because we will have to make sure that …our church is able to deliver virtually as well as live and in person. So, we're gonna have to have some media specialists…we're gonna have to have a production manager that helps us to put together a professional presentation for when we are delivering live…so that means that I'm gonna be talking to a teleprompter, as well as talking directly to an audience so, may not be using a handheld mic, may have to use a, you know, a lapel mic (P8). Discussion Churches play an integral role in Black communities (Assari, 2013). However, the COVID-19 pandemic has presented challenges to the Black community resulting in widespread disruptions of religious practices (DeFranza et al., 2020). This study sought to explore the impact of the COVID-19 pandemic on the religious lives of Black families living with dementia. The personal accounts of Black PLWDs, caregivers, and church leaders were gathered to provide a comprehensive view of the religious experience for the families in a time of crisis. 3 The analysis of participants' interviews indicated that families with dementia continued religious practices during the pandemic through innovative formats. In response to the cancellation of in-person worship, churches adopted online worship services that enabled PLWDs who were homebound for years to actively participate. Online delivery formats may already be familiar to individuals affected by dementia, as a previous study has shown religious programs on television, podcasts, and live streaming connections which were used as alternative methods of religious engagement for PLWDs who cannot physically attend worship services even before to the pandemic (Epps & Williams, 2018). Additionally, most participants preferred services that were more concise and shorter in length, partly due to the reduced attention span of PLWDs. This finding is consistent with previous research that reports the simplicity and conciseness of worship services which are essential in promoting engagement and participation for PLWDs (Epps et al., 2020a). Participants reported an overall increased engagement in religious activities, including church services, Bible studies, and prayers, during the pandemic. This may indicate the potential increase in church engagement for families living with dementia in particular. The engagement parallels literature examining how Black cancer survivors increased religious participation to cope with COVID-19 (Hamilton et al., 2021). The significant increase in religious engagement highlights the function of spirituality as solace and support when in the face of calamity (Defranza et al., 2020). Moreover, participants reported that social isolation gave caregivers ample space and time for spiritual nourishment and self-reflection. Some caregivers were able to use this opportunity to engage in learning about their own faith and establish new religious routines. This sentiment echoes the findings of previous studies which reported that for some Christian families spending time away from the church community can ultimately strengthen one's faith (VanderWeele, 2020). With the increasing use of technology by families and churches during the pandemic, several participants emphasized the role of technology as a double-edged sword. Some participants perceived technology as an innovative tool that helped PLWDs and caregivers to stay spiritually connected without having time and location as constraints. However, other participants raised the concern about technology being the barrier for older adults to access online church services. With older PLWDs and caregivers not having the relevant equipment and skill for accessing technology, churches played increasingly important roles in providing technical support to their congregants. Additionally, several caregivers received assistance from their grandchildren with setting up technology. From this perspective, technology use as the result of the pandemic may help facilitate intergenerational bonding. This may be favorable for families affected by dementia, as previous literature suggests that intergenerational bonding has mutual benefits for older adults with dementia and young participants (Chung, 2009). Overall, Black churches and their congregants adapted to a "new normal" as the result of the pandemic. Many church leaders were planning to continue with providing online worship service even when ministries reopen church doors to their congregants. This would be particularly beneficial to PLWDs and their caregivers who were incapable of attending in-person worship services. Furthermore, most participants demonstrated an optimistic attitude toward changes in their religious practices as a result of the pandemic. Many participants realized the physical closure of churches did not inhibit an individual's ability to engage in religious practices. Many congregants believe that God has allowed the pandemic to happen for a reason. Positive religious coping, acceptance of the pandemic as God's plan, helped caregivers and PLWDs to find their purpose and overcome loneliness associated with social isolation (Dolcos et al., 2021;Koenig, 2020). Limitations There were several limitations in this study. Participants were recruited from a small convenience sample, with many having connections to the primary investigator, possibly resulting in selection bias. Secondly, more than half of the participants interviewed were Baptists and Methodists residing in similar geographical locations. Further, due to social distancing mandates, all interviews were conducted through video conferencing platforms. Therefore, the research team may have selected participants that were more technology-savvy. Future Research and Implications Recommendations for future studies are to widen the scope of this research to include other ethnicities and religions outside of Christianity. Future research should also focus on further exploring the effect of COVID-19 in promoting intergenerational bonding for Black families. Faith leaders and churches may utilize findings to design worship services and outreach programs specifically tailored to families living with dementia during the COVID-19 pandemic. This study's findings may apply to homebound individuals living with other chronic conditions and their families during a pandemic. Additionally, educational efforts should target to clinicians who work with older adults to assist their patients with identifying sources of spiritual support during the pandemic, thus contributing to person-centered care (Epps et al, 2020b;Kowalczyk et al., 2020). Incorporating religious practices and activities into treatment plans have been shown to positively improve health outcomes (Koenig, 2012). Conclusion Overall, the COVID-19 pandemic has created lasting changes for Black families living with dementia and their church communities. It is critical for churches to meet the needs of their congregants and communities during the COVID-19 pandemic and future public health crises (Monson et al., 2021;Williams et al., 2021). The findings from this study contribute to understanding of the spiritual needs of Black PLWDs and their caregivers when experiencing social isolation. More specifically, these findings have potential to support families living with dementia who wish to continue their engagement in religious practices and living in meaningful ways.
<reponame>priya-pp/Priya # Copyright 2016 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from tacker import context from tacker.db.nfvo import nfvo_db from tacker.nfvo import nfvo_plugin from tacker.tests.unit.db import base as db_base SECRET_PASSWORD = '***' class FakeDriverManager(mock.Mock): def invoke(self, *args, **kwargs): if 'create' in args: return str(uuid.uuid4()) class TestNfvoPlugin(db_base.SqlTestCase): def setUp(self): super(TestNfvoPlugin, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self._mock_driver_manager() mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin.__run__').start() self.nfvo_plugin = nfvo_plugin.NfvoPlugin() def _mock_driver_manager(self): self._driver_manager = mock.Mock(wraps=FakeDriverManager()) self._driver_manager.__contains__ = mock.Mock( return_value=True) fake_driver_manager = mock.Mock() fake_driver_manager.return_value = self._driver_manager self._mock( 'tacker.common.driver_manager.DriverManager', fake_driver_manager) def _insert_dummy_vim(self): session = self.context.session vim_db = nfvo_db.Vim( id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', tenant_id='ad7ebc56538745a08ef7c5e97f8bd437', name='fake_vim', description='fake_vim_description', type='openstack', status='Active', placement_attr={'regions': ['RegionOne']}) vim_auth_db = nfvo_db.VimAuth( vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff', password='<PASSWORD>', auth_url='http://localhost:5000', vim_project={'name': 'test_project'}, auth_cred={'username': 'test_user', 'user_domain_id': 'default', 'project_domain_d': 'default'}) session.add(vim_db) session.add(vim_auth_db) session.flush() def test_create_vim(self): vim_dict = {'vim': {'type': 'openstack', 'auth_url': 'http://localhost:5000', 'vim_project': {'name': 'test_project'}, 'auth_cred': {'username': 'test_user', 'password': '<PASSWORD>'}, 'name': 'VIM0', 'tenant_id': 'test-project'}} vim_type = 'openstack' res = self.nfvo_plugin.create_vim(self.context, vim_dict) self._driver_manager.invoke.assert_any_call(vim_type, 'register_vim', vim_obj=vim_dict['vim']) self._driver_manager.invoke.assert_any_call('openstack', 'vim_status', auth_url='http://localhost:5000') self.assertIsNotNone(res) self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password']) self.assertIn('id', res) self.assertIn('placement_attr', res) def test_delete_vim(self): self._insert_dummy_vim() vim_type = 'openstack' vim_id = '6261579e-d6f3-49ad-8bc3-a9cb974778ff' self.nfvo_plugin.delete_vim(self.context, vim_id) self._driver_manager.invoke.assert_called_once_with(vim_type, 'deregister_vim', vim_id=vim_id) def test_update_vim(self): vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff', 'vim_project': {'name': 'new_project'}, 'auth_cred': {'username': 'new_user', 'password': '<PASSWORD>'}}} vim_type = 'openstack' vim_auth_username = vim_dict['vim']['auth_cred']['username'] vim_project = vim_dict['vim']['vim_project'] self._insert_dummy_vim() res = self.nfvo_plugin.update_vim(self.context, vim_dict['vim']['id'], vim_dict) self._driver_manager.invoke.assert_called_once_with(vim_type, 'register_vim', vim_obj=mock.ANY) self.assertIsNotNone(res) self.assertIn('id', res) self.assertIn('placement_attr', res) self.assertEqual(vim_project, res['vim_project']) self.assertEqual(vim_auth_username, res['auth_cred']['username']) self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password'])
//Parse makes some checks based on a torrent file. //Maybe further checks should be made beyond these. func (m *MetaInfo) Parse() error { err := m.Info.Parse() if err != nil { return fmt.Errorf("metainfo parse: %w", err) } return nil }
def chuck_with(word, prefix) -> str: word = remove_chucks(word) word = prefix + word word = word[:-1] + '״' + word[-1] return word
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package membership import ( "crypto" "crypto/x509" ) // KeyInfo represents a (secret) key that is either already stored // in the bccsp/keystore or key material to be imported to the // bccsp key-store. In later versions it may contain also a // keystore identifier. type KeyInfo struct { // Identifier of the key inside the default keystore; this for // the case of Software BCCSP as well as the HSM BCCSP would be // the SKI of the key. KeyIdentifier string // KeyMaterial (optional) for the key to be imported; this // must be a supported PKCS#8 private key type of either // *rsa.PrivateKey, *ecdsa.PrivateKey, or ed25519.PrivateKey. KeyMaterial crypto.PrivateKey } // SigningIdentityInfo represents the configuration information // related to the signing identity the peer is to use for generating // endorsements. type SigningIdentityInfo struct { // PublicSigner carries the public information of the signing // identity. For an X.509 provider this would be represented by // an X.509 certificate. PublicSigner *x509.Certificate // PrivateSigner denotes a reference to the private key of the // peer's signing identity. PrivateSigner KeyInfo } // CryptoConfig contains configuration parameters // for the cryptographic algorithms used by the MSP // this configuration refers to. type CryptoConfig struct { // SignatureHashFamily is a string representing the hash family to be used // during sign and verify operations. // Allowed values are "SHA2" and "SHA3". SignatureHashFamily string // IdentityIdentifierHashFunction is a string representing the hash function // to be used during the computation of the identity identifier of an MSP identity. // Allowed values are "SHA256", "SHA384" and "SHA3_256", "SHA3_384". IdentityIdentifierHashFunction string } // OUIdentifier represents an organizational unit and // its related chain of trust identifier. type OUIdentifier struct { // Certificate represents the second certificate in a certification chain. // (Notice that the first certificate in a certification chain is supposed // to be the certificate of an identity). // It must correspond to the certificate of root or intermediate CA // recognized by the MSP this message belongs to. // Starting from this certificate, a certification chain is computed // and bound to the OrganizationUnitIdentifier specified. Certificate *x509.Certificate // OrganizationUnitIdentifier defines the organizational unit under the // MSP identified with MSPIdentifier. OrganizationalUnitIdentifier string } // NodeOUs contains configuration to tell apart clients from peers from orderers // based on OUs. If NodeOUs recognition is enabled then an msp identity // that does not contain any of the specified OU will be considered invalid. type NodeOUs struct { // If true then an msp identity that does not contain any of the specified OU will be considered invalid. Enable bool // OU Identifier of the clients. ClientOUIdentifier OUIdentifier // OU Identifier of the peers. PeerOUIdentifier OUIdentifier // OU Identifier of the admins. AdminOUIdentifier OUIdentifier // OU Identifier of the orderers. OrdererOUIdentifier OUIdentifier }
/** * A parser for <a href="http://tools.ietf.org/html/rfc2616#section-3.3">RFC2616 * Date format</a>. * * @author slandelle */ public class RFC2616DateParser { private final String string; private final int offset; private final int length; /** * @param string a string that will be fully parsed */ public RFC2616DateParser(String string) { this(string, 0, string.length()); } /** * @param string the string to be parsed * @param offset the offset where to start parsing * @param length the number of chars to parse */ public RFC2616DateParser(String string, int offset, int length) { if (string.length() + offset < length) throw new IllegalArgumentException("String length doesn't match offset and length"); this.string = string; this.offset = offset; this.length = length; } private static class Tokens { public final int[] starts; public final int[] ends; public final int length; public Tokens(int[] starts, int[] ends, int length) { this.starts = starts; this.ends = ends; this.length = length; } } private Tokens tokenize() { int[] starts = new int[8]; int[] ends = new int[8]; boolean inToken = false; int tokenCount = 0; int end = offset + length; for (int i = offset; i < end; i++) { char c = string.charAt(i); if (c == ' ' || c == ',' || c == '-' || c == ':') { if (inToken) { ends[tokenCount++] = i; inToken = false; } } else if (!inToken) { starts[tokenCount] = i; inToken = true; } } // finish lastToken if (inToken == true) ends[tokenCount++] = end; return new Tokens(starts, ends, tokenCount); } /** * @param validate if validation is to be enabled of non-critical elements, * such as day of week and timezone * @return null is the string is not a valid RFC2616 date */ public RFC2616Date parse() { Tokens tokens = tokenize(); if (tokens.length != 7 && tokens.length != 8) return null; // 1st token is ignored: ignore day of week // 8th token is ignored: supposed to always be GMT if (isDigit(string.charAt(tokens.starts[1]))) return buildDate(tokens); else return buildANSICDate(tokens); } private RFC2616Date buildDate(Tokens tokens) { // Sun, 06 Nov 1994 08:49:37 GMT Builder dateBuilder = new Builder(); if (isValidDayOfMonth(tokens.starts[1], tokens.ends[1], dateBuilder) && // isValidMonth(tokens.starts[2], tokens.ends[2], dateBuilder) && // isValidYear(tokens.starts[3], tokens.ends[3], dateBuilder) && // isValidHour(tokens.starts[4], tokens.ends[4], dateBuilder) && // isValidMinuteSecond(tokens.starts[5], tokens.ends[5], dateBuilder, true) && // isValidMinuteSecond(tokens.starts[6], tokens.ends[6], dateBuilder, false)) { return dateBuilder.build(); } return null; } private RFC2616Date buildANSICDate(Tokens tokens) { // Sun Nov 6 08:49:37 1994 Builder dateBuilder = new Builder(); if (isValidMonth(tokens.starts[1], tokens.ends[1], dateBuilder) && // isValidDayOfMonth(tokens.starts[2], tokens.ends[2], dateBuilder) && // isValidHour(tokens.starts[3], tokens.ends[3], dateBuilder) && // isValidMinuteSecond(tokens.starts[4], tokens.ends[4], dateBuilder, true) && // isValidMinuteSecond(tokens.starts[5], tokens.ends[5], dateBuilder, false) && // isValidYear(tokens.starts[6], tokens.ends[6], dateBuilder)) { return dateBuilder.build(); } return null; } private boolean isValid1DigitDayOfMonth(char c0, Builder dateBuilder) { if (isDigit(c0)) { dateBuilder.setDayOfMonth(getNumericValue(c0)); return true; } return false; } private boolean isValid2DigitsDayOfMonth(char c0, char c1, Builder dateBuilder) { if (isDigit(c0) && isDigit(c1)) { int i0 = getNumericValue(c0); int i1 = getNumericValue(c1); int day = i0 * 10 + i1; if (day <= 31) { dateBuilder.setDayOfMonth(day); return true; } } return false; } private boolean isValidDayOfMonth(int start, int end, Builder dateBuilder) { int tokenLength = end - start; if (tokenLength == 1) { char c0 = string.charAt(start); return isValid1DigitDayOfMonth(c0, dateBuilder); } else if (tokenLength == 2) { char c0 = string.charAt(start); char c1 = string.charAt(start + 1); return isValid2DigitsDayOfMonth(c0, c1, dateBuilder); } return false; } private boolean isValidJanuaryJuneJuly(char c0, char c1, char c2, Builder dateBuilder) { if (c0 == 'J' || c0 == 'j') if (c1 == 'a' || c1 == 'A') { if (c2 == 'n' || c2 == 'N') { dateBuilder.setJanuary(); return true; } } else if (c1 == 'u' || c1 == 'U') { if (c2 == 'n' || c2 == 'N') { dateBuilder.setJune(); return true; } else if (c2 == 'l' || c2 == 'L') { dateBuilder.setJuly(); return true; } } return false; } private boolean isValidFebruary(char c0, char c1, char c2, Builder dateBuilder) { if ((c0 == 'F' || c0 == 'f') && (c1 == 'e' || c1 == 'E') && (c2 == 'b' || c2 == 'B')) { dateBuilder.setFebruary(); return true; } return false; } private boolean isValidMarchMay(char c0, char c1, char c2, Builder dateBuilder) { if ((c0 == 'M' || c0 == 'm') && (c1 == 'a' || c1 == 'A')) { if (c2 == 'r' || c2 == 'R') { dateBuilder.setMarch(); return true; } else if (c2 == 'y' || c2 == 'M') { dateBuilder.setMay(); return true; } } return false; } private boolean isValidAprilAugust(char c0, char c1, char c2, Builder dateBuilder) { if (c0 == 'A' || c0 == 'a') if ((c1 == 'p' || c1 == 'P') && (c2 == 'r' || c2 == 'R')) { dateBuilder.setApril(); return true; } else if ((c1 == 'u' || c1 == 'U') && (c2 == 'g' || c2 == 'G')) { dateBuilder.setAugust(); return true; } return false; } private boolean isValidSeptember(char c0, char c1, char c2, Builder dateBuilder) { if ((c0 == 'S' || c0 == 's') && (c1 == 'e' || c1 == 'E') && (c2 == 'p' || c2 == 'P')) { dateBuilder.setSeptember(); return true; } return false; } private boolean isValidOctober(char c0, char c1, char c2, Builder dateBuilder) { if ((c0 == 'O' || c0 == 'o') && (c1 == 'c' || c1 == 'C') && (c2 == 't' || c2 == 'T')) { dateBuilder.setOctobre(); return true; } return false; } private boolean isValidNovember(char c0, char c1, char c2, Builder dateBuilder) { if ((c0 == 'N' || c0 == 'n') && (c1 == 'o' || c1 == 'O') && (c2 == 'v' || c2 == 'V')) { dateBuilder.setNovembre(); return true; } return false; } private boolean isValidDecember(char c0, char c1, char c2, Builder dateBuilder) { if (c0 == 'D' || c0 == 'd') if (c1 == 'e' || c1 == 'E') { if (c2 == 'c' || c2 == 'C') { dateBuilder.setDecember(); return true; } } return false; } private boolean isValidMonth(int start, int end, Builder dateBuilder) { if (end - start != 3) return false; char c0 = string.charAt(start); char c1 = string.charAt(start + 1); char c2 = string.charAt(start + 2); return isValidJanuaryJuneJuly(c0, c1, c2, dateBuilder) || // isValidFebruary(c0, c1, c2, dateBuilder) || // isValidMarchMay(c0, c1, c2, dateBuilder) || // isValidAprilAugust(c0, c1, c2, dateBuilder) || // isValidSeptember(c0, c1, c2, dateBuilder) || // isValidOctober(c0, c1, c2, dateBuilder) || // isValidNovember(c0, c1, c2, dateBuilder) || // isValidDecember(c0, c1, c2, dateBuilder); } private boolean isValid2DigitsYear(char c0, char c1, Builder dateBuilder) { if (isDigit(c0) && isDigit(c1)) { int i0 = getNumericValue(c0); int i1 = getNumericValue(c1); int year = i0 * 10 + i1; year = year < 70 ? year + 2000 : year + 1900; return setValidYear(year, dateBuilder); } return false; } private boolean isValid4DigitsYear(char c0, char c1, char c2, char c3, Builder dateBuilder) { if (isDigit(c0) && isDigit(c1) && isDigit(c2) && isDigit(c3)) { int i0 = getNumericValue(c0); int i1 = getNumericValue(c1); int i2 = getNumericValue(c2); int i3 = getNumericValue(c3); int year = i0 * 1000 + i1 * 100 + i2 * 10 + i3; return setValidYear(year, dateBuilder); } return false; } private boolean setValidYear(int year, Builder dateBuilder) { if (year >= 1601) { dateBuilder.setYear(year); return true; } return false; } private boolean isValidYear(int start, int end, Builder dateBuilder) { int length = end - start; if (length == 2) { char c0 = string.charAt(start); char c1 = string.charAt(start + 1); return isValid2DigitsYear(c0, c1, dateBuilder); } else if (length == 4) { char c0 = string.charAt(start); char c1 = string.charAt(start + 1); char c2 = string.charAt(start + 2); char c3 = string.charAt(start + 3); return isValid4DigitsYear(c0, c1, c2, c3, dateBuilder); } return false; } private boolean isValid1DigitHour(char c0, Builder dateBuilder) { if (isDigit(c0)) { int hour = getNumericValue(c0); dateBuilder.setHour(hour); return true; } return false; } private boolean isValid2DigitsHour(char c0, char c1, Builder dateBuilder) { if (isDigit(c0) && isDigit(c1)) { int i0 = getNumericValue(c0); int i1 = getNumericValue(c1); int hour = i0 * 10 + i1; if (hour <= 24) { dateBuilder.setHour(hour); return true; } } return false; } private boolean isValidHour(int start, int end, Builder dateBuilder) { int length = end - start; if (length == 1) { char c0 = string.charAt(start); return isValid1DigitHour(c0, dateBuilder); } else if (length == 2) { char c0 = string.charAt(start); char c1 = string.charAt(start + 1); return isValid2DigitsHour(c0, c1, dateBuilder); } return false; } private boolean isValid1DigitMinuteSecond(char c0, Builder dateBuilder, boolean minuteOrSecond) { if (isDigit(c0)) { int value = getNumericValue(c0); if (minuteOrSecond) dateBuilder.setMinute(value); else dateBuilder.setSecond(value); return true; } return false; } private boolean isValid2DigitsMinuteSecond(char c0, char c1, Builder dateBuilder, boolean minuteOrSecond) { if (isDigit(c0) && isDigit(c1)) { int i0 = getNumericValue(c0); int i1 = getNumericValue(c1); int value = i0 * 10 + i1; if (value <= 60) { if (minuteOrSecond) dateBuilder.setMinute(value); else dateBuilder.setSecond(value); return true; } } return false; } private boolean isValidMinuteSecond(int start, int end, Builder dateBuilder, boolean minuteOrSecond) { int length = end - start; if (length == 1) { char c0 = string.charAt(start); return isValid1DigitMinuteSecond(c0, dateBuilder, minuteOrSecond); } else if (length == 2) { char c0 = string.charAt(start); char c1 = string.charAt(start + 1); return isValid2DigitsMinuteSecond(c0, c1, dateBuilder, minuteOrSecond); } return false; } private boolean isDigit(char c) { return c >= '0' && c <= '9'; } private int getNumericValue(char c) { return (int) c - 48; } }
def state_size(self): return (tf.TensorShape([self._num_units]), tf.TensorShape([self._num_read_heads, self._embedding_size]), tf.TensorShape([self._memory_size, self._embedding_size]), tf.TensorShape([1, self._memory_size, 1]), tf.TensorShape([self._num_write_heads, self._memory_size, 1]))
//ToDo - Implement all methods @Service public class StatServiceImpl implements StatService { @Override public boolean areImported() { return false; } @Override public String readStatsFileContent() { return null; } @Override public String importStats() { return null; } }
Since he’s become the presumptive Republican presidential nominee, pro-life voters are starting to move in Donald Trump’s direction — especially knowing the alternative is abortion activists Hillary Clinton. But many pro-life voters and some pro-life leaders remain skeptical about supporting Trump given some of his misstatements and past support for abortion. To gain a better understanding and insight on his abortion views and to potentially be able to endorse his candidacy, a group of top pro-life leaders plan to meet with Trump this month. On Tuesday, June 21, national Christian conservative leaders will gather in New York City for a private summit with the businessman to meet with him regarding issues of importance to the pro-life community. Ohio Right to Life President Mike Gonidakis has been invited to attend the meeting as a representative of Ohio’s pro-life movement. He told LifeNews.com that he hopes to be able to get Trump to understand the importance of pro-life issues to voters and to defeating Clinton. “After seven long years of pro-abortion policies and hundreds of millions of our tax dollars poured into Planned Parenthood’s coffers, it is critical for Mr. Trump to understand the positive impact a pro-life president can make to save lives,” said Gonidakis. “Ohio is the key battleground state and engaging our state’s social conservatives must be priority number one for Trump to win Ohio.” Gonidakis told LifeNews.com that the goal of the meeting is to seek unity by allowing pro-life leaders and Trump to learn more about each other. The group will not be making any joint or collective endorsements, decisions or statements. SIGN THE PLEDGE: I Pledge to Vote for a Pro-Life Candidate for President “As we consider the candidacy of radical pro-abortion nominee Hillary Clinton, the pro-life community is looking for assurance that they have a pro-life alternative,” said Gonidakis. “Human lives hang in the balance. Ohio Right to Life is committed to getting Election 2016 right, and we believe this meeting will go a long way in ensuring that we do.” Students for Life of America president Kristan Hawkins will also be attending the meeting. She wrote about the meeting at LifeNews: A couple of weeks ago, I was presented with the opportunity to join with other key leaders in both the pro-life and Christian spheres to convene a meeting this June with Donald Trump and Dr. Ben Carson, so I took it. It’s just that – a meeting, or rather, a conservation where no big rallies or speeches will happen, but where a constructive dialogue will hopefully take place as we are able to share our deep concerns. While I know many feel conflicted right now about the upcoming election, we all agree that this November could change the trajectory of legal abortion in our nation. Planned Parenthood must be held accountable and at the very least, and be defunded of our taxpayer dollars. The Supreme Court must have a pro-life jurist replace Justice Scalia. This may be our only opportunity to sit with the potential leader of the free world and impress upon him the importance of sticking to his promises in helping us to end abortion and educate him on how to talk about our “issue” in a loving and compassionate, yet effective way. Recently, Trump released a well-received list of 11 potential Supreme Court nominees — a list pro-life groups hailed for having strong supporters of the Constitution. Trump also recently hired a key pro-life advocate has his domestic policy director. If Trump is the nominee, he would present a stark contrast on abortion to pro-abortion Democrat Hillary Clinton. Trump has specifically promised he would sign a bill as president to de-fund Planned Parenthood. In an interview with David Brody of CBN, Trump made that promise: David Brody: “As a President Trump, if a bill came to your desk that would defund Planned Parenthood you would support that, you would sign that?” Donald Trump: “Yes, because as long as they do the abortion I am not for funding Planned Parenthood… As long as they’re involved with abortion, as far as I’m concerned forget it, I wouldn’t fund them regardless. I would defund Planned Parenthood because of their view and the fact of their work on abortion…. I am for defunding Planned Parenthood as long as they are involved with abortion.” As far as Trump’s comments on Planned Parenthood funding are concerned, Trump has fairly consistently said he opposes taxpayer funding but he’s also made some remarks about the “good things” Planned Parenthood does that have alarmed pro-life voters — as if any “good thing” could make up for the fact that planned Parenthood kills 330,000 unborn babies a year in abortions and then sells their body parts for profit. Meanwhile, Trump said he thinks the Roe v. Wade Supreme Court case that ushered in an era of 48 million abortions was “wrongly decided.” Trump said he would appoint “very good judges” who would ultimately “change it” but he opposed Roe without specifically saying it should be overturned. Here are some of the headlines we’ve carried at LifeNews.com in recent months that provide further details on what Trump has said regarding Planned Parenthood funding: August 4: Donald Trump: Shut Down the Federal Government to De-Fund Planned Parenthood August 11: Donald Trump: Planned Parenthood is an “Abortion Factory” August 17: Donald Trump: Videos of Planned Parenthood Selling Aborted Babies Were “Disgusting,” De-Fund It August 26: Donald Trump: Planned Parenthood is an “Abortion Factory” That Sells Baby Parts Like Automobiles October 19: Donald Trump: “Planned Parenthood Should Absolutely be De-Funded” December 2: Donald Trump: De-Fund Planned Parenthood and “Look Carefully at” Overturning Roe v. Wade December 22: Donald Trump: Unless Planned Parenthood Stops Doing Abortions, We Should De-Fund It
Oxford graduate launches erotic website as alternative to hardcore porn to prevent men becoming 'inconsiderate lovers' Cindy Gallop, 52, believes site is sex education aid of the future Aims to 'reform porn' and 'rehabilitate' younger generation An Oxford graduate has set up an erotic website aiming to promote a tasteful style of adult video in a challenge to hardcore pornography. British businesswoman, Cindy Gallop, wants to urge more people to 'make love, not porn', believing her site is the sex education aid of the future. The 52-year-old claims that too many people - particularly young men - are learning their bedroom techniques chiefly from viewing hardcore porn on the internet, which makes them inconsiderate lovers. Cindy Gallop wants to challenge hardcore pornography by launching a sex website aiming to 'reform porn' with tasteful erotica She aims to 'reform porn' and 'rehabilitate' the younger generation with an alternative campaign for tasteful erotica. ' When you have sex with younger men you see the creeping ubiquity of hardcore porn in the culture, in an era where it is more freely and widely available than ever before and kids are accessing it younger and younger,' said Gallop to The Observer newspaper. Her new website, MakeLoveNotPorn.tv, seeks to offer an alternative to hardcore porn. Miss Gallop, who studied English literature at Oxford and now lives in New York, believes parents and sex education teachers still give too little guidance about how to develop healthy sexual relationships. Miss Gallop, who is enthusiastically single and unashamed to date men less than half her age, said hardcore porn had become the de facto sex education for men in their early twenties. Oxford University where Cindy Gallop studied English Literature before becoming a businesswoman She aims to re-educate people via the internet, ' so that young men don't think that's always the normal way of behaving in the bedroom and their girlfriends don't have to pretend to like it'. Her website features couples and individuals engaging in real-life sexual activities with the kind of genuine passion and intimacy missing from most porn. One popular video shows a couple who work in the pornography business, showing for the website how they make love together outside their work - with each partner enjoying equal pleasure and control. 'I wanted to separate the myths of hardcore porn behaviour from the reality of healthy but hot sexual relationships,' she said. Intimate: Miss Gallop wants to re-educate the younger generation through the internet MakeLoveNotPorn.tv is expected to go fully live before the end of the year. Invited members pay $5 per video chosen from the menu, and contributors who star in their own erotic show submit videos for inclusion on the site. The website is for over-18s only but Miss Gallop eventually wants to create gift vouchers that can be bought for younger teenagers.
<filename>pkg/agent/agent_test.go package agent_test import ( "testing" "k8s.io/client-go/kubernetes/fake" "github.com/flatcar-linux/flatcar-linux-update-operator/pkg/agent" "github.com/flatcar-linux/flatcar-linux-update-operator/pkg/updateengine" ) //nolint:funlen // Just many subtests. func Test_Creating_new_agent_returns_error_when(t *testing.T) { t.Parallel() t.Run("no_clientset_is_configured", func(t *testing.T) { t.Parallel() configWithoutClientset := testConfig() configWithoutClientset.Clientset = nil client, err := agent.New(configWithoutClientset) if err == nil { t.Fatalf("Expected error creating new agent") } if client != nil { t.Fatalf("No client should be returned when New failed") } }) t.Run("no_status_receiver_is_configured", func(t *testing.T) { t.Parallel() configWithoutStatusReceiver := testConfig() configWithoutStatusReceiver.StatusReceiver = nil client, err := agent.New(configWithoutStatusReceiver) if err == nil { t.Fatalf("Expected error creating new agent") } if client != nil { t.Fatalf("No client should be returned when New failed") } }) t.Run("no_rebooter_is_configured", func(t *testing.T) { t.Parallel() configWithoutStatusReceiver := testConfig() configWithoutStatusReceiver.Rebooter = nil client, err := agent.New(configWithoutStatusReceiver) if err == nil { t.Fatalf("Expected error creating new agent") } if client != nil { t.Fatalf("No client should be returned when New failed") } }) t.Run("empty_node_name_is_given", func(t *testing.T) { t.Parallel() configWithoutStatusReceiver := testConfig() configWithoutStatusReceiver.NodeName = "" client, err := agent.New(configWithoutStatusReceiver) if err == nil { t.Fatalf("Expected error creating new agent") } if client != nil { t.Fatalf("No client should be returned when New failed") } }) } func testConfig() *agent.Config { return &agent.Config{ Clientset: fake.NewSimpleClientset(), StatusReceiver: &mockStatusReceiver{}, Rebooter: &mockRebooter{}, NodeName: "testNodeName", } } type mockStatusReceiver struct{} func (m *mockStatusReceiver) ReceiveStatuses(rcvr chan<- updateengine.Status, stop <-chan struct{}) {} type mockRebooter struct{} func (m *mockRebooter) Reboot(bool) {}
“There really has been an exponential increase of media interest in what’s happening. I think that’s the result of new research, (and) the result of some major international conferences that are really establishing the field of psychedelic science and medicine.” Brad Burge of MAPS. It seems we’re finally at a turning point in The War on Drugs. All it took was a few decades of indoctrination, mass-incarceration, astronomical price tags and straight-up horrific body counts. Yet, society’s transition into a deeper understanding of these substances has been far from smooth. Yes, the people have clearly spoken on the subject of marijuana, and nearly half of all U.S. states have taken notice, putting some sort of marijuana-friendly law on the books. However, when it comes to Mary Jane’s more potent psychedelic cousins, the conversation is quite a bit more nuanced and controversial. Thankfully, for the first time in decades, the dialogue surrounding psychedelics is evolving. For that, we have organizations like MAPS to thank. As your eyeballs wiggle their way through these words, MAPS (The Multidisciplinary Association for Psychedelic Studies) is in the process of conducting a whole host of FDA-sanctioned studies using psychedelic substances on real-live human beings. The therapeutic use of MDMA, for example, has been shown to be a highly effective treatment for PTSD. A slow-clap-worthy 83 percent of participants were, in fact, totally PTSD free at their two month follow-up exams. While these developments are genuinely encouraging, the bleak fact of the matter is that psychedelic therapies are still a long way from legal, meaning very few of those in need are actually receiving these potentially highly potent treatments. To dig further into this, I spoke with Brad Burge of MAPS along with three-tour Army Ranger veteran and psychedelic success story, Tim Amoroso. Click these magical blue words to be transported to MAPS’ exponentially growing Twitter and Facebook pages! Join the thousands who already support Midwest Real. Follow us on Twitter and Facebook and don’t forget to subscribe and review on iTunes! P.S.-You can now tip the show!
Urban explorers from Ukraine say they have discovered logging operations inside the Chornobyl exclusion zone and warn that irradiated wood is being sold to unsuspecting consumers. "The first time we saw forests and the second time it wasn't there." -Artur Kalmykov "I wouldn't want to live in such a house," Artur Kalmykov tells As It Happens host Carol Off. The computer programmer from Kiev visits the exclusion zone frequently because he says it relaxes him. A radiation warning sign is placed near the check-point 'Maidan' of the state radiation ecology reserve inside the 30 km exclusion zone around the Chornobyl nuclear reactor. (Sergei Grits/AP) On a recent trip to the zone, Kalmykov and his explorers, who call themselves "stalkers," found that an area they had visited a month or so earlier had been completely clear cut by loggers. "The first time we saw forests and the second time it wasn't there," says Kalmykov. Thirty years ago this week, an accident at the Chornobyl nuclear power plant in the Ukraine dumped lethal amounts of radiation in the area surrounding the reactor, killing dozens within hours and thousands more since — an exact number is still a hotly debated topic. FILE - A 1986 file photo of an aerial view of the Chornobyl nuclear plant in Chornobyl, Ukraine showing damage from an explosion and fire in reactor four on April 26, 1986 that sent large amounts of radioactive material into the atmosphere. (Volodymyr/AP) A 30-kilometre exclusion zone was established around the reactor to minimize people's exposure to radiation, this zone is now 2,600 square kilometers. Now, it appears that parts of the forest are being logged for consumers to unwittingly buy. Kalmykov took his discovery to Stop Corruption, a political watchdog group. The group accuses the agency in charge of the exclusion zone of corruption and says irradiated wood from the zone could wind up in people's homes. In an interview with The New York Times, the director of the exclusion zone, Vitalii V. Petruk claimed illegal logging had not taken place since he assumed the job in the fall. An abandoned kindergarten in the deserted city of Pripyat, which was built to house the workers of the Chornobyl nuclear power station. Residents were told they would be returning within a matter of days so they would not bring a lot of personal items that may have been contaminated by the fallout. (Efrem Lukatsky/AP) "Stop Corruption made a film about it (illegal logging). They spoke with the manager of the exclusion zone...he told them he doesn't know who is cutting down the forest," says Kalmykov. Kalmykov also recently met some loggers working in the zone. They told him they didn't know who their bosses were either. A playground in the deserted town of Pripyat, Ukraine, about 3km from the Chornobyl nuclear plant in Ukraine. (Efrem Lukatsky/AP) Petruk has proposed increased logging in the area to feed a steam power plant that would reduce the need for Russian natural gas. In the aftermath of the disaster, an area of forest in the path of the fallout absorbed so much radiation that within days all the trees had turned red, earning it the nickname of "the Red Forest." These trees were eventually cut down and buried.
Fear of novelty in infant rats predicts adult corticosterone dynamics and an early death Individuals who are fearful of novelty have a larger hypothalamic-pituitary-adrenal axis response than do nonfearful individuals. We hypothesized that a fearful behavioral style emerging early in life would be associated with life-long altered adrenal activity. Because there is ample physiological evidence both costs and benefits of adrenal activation, we determined whether such a stable emotional-neuroendocrine trait was associated with differential morbidity and mortality. To conduct such lifespan work, we studied a relatively short-lived mammal: the Norway rat. We first established that an animal's hesitation or willingness to explore a novel environment (“neophobia” and “neophilia,” respectively) is an identifiable and stable behavioral trait in young-adult males and that neophobia, compared with neophilia, was associated with a greater glucocorticoid response to novelty. Second, we were able to detect behavioral differences among infant rats within a family, and this behavioral disposition at infancy predicted the magnitude of the glucocorticoid response in late middle age. Males identified as neophobic during infancy died sooner than their less fearful brothers. Although both types of males died with similar pathologies (tumors), neophobic males were 60% more likely to die at any point in time. This lifespan study identifies an emotional trait in infancy that predicts an early death and an associated neuroendocrine trait in adulthood that is a potential mechanism underlying the relationship between behavioral style and longevity.
{- Author : <NAME>. Date : 6/10/2018 Problem : Modified run-length encoding.Modify the result of problem 10 in such a way that if an element has no duplicates it is simply copied into the result list. Only elements with duplicates are transferred as (N E) lists. -} module Main where import Data.List data ListItem x = Single x | Multiple Int x deriving (Show) modify :: (Eq x) => [x] -> [ListItem x] modify = map h . f where h (1 , x) = Single x h (n , x) = Multiple n x f :: (Eq x) => [x] -> [(Int , x)] f = map (\x -> (length x , head x)) . group main = do print $ modify "aaaabccaadeeee"
<filename>flink-table-store-core/src/test/java/org/apache/flink/table/store/file/data/DataFileTest.java<gh_stars>10-100 /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.store.file.data; import org.apache.flink.core.fs.FileStatus; import org.apache.flink.core.fs.FileSystem; import org.apache.flink.core.fs.Path; import org.apache.flink.table.data.GenericRowData; import org.apache.flink.table.data.binary.BinaryRowDataUtil; import org.apache.flink.table.runtime.typeutils.RowDataSerializer; import org.apache.flink.table.store.file.KeyValue; import org.apache.flink.table.store.file.KeyValueSerializerTest; import org.apache.flink.table.store.file.TestKeyValueGenerator; import org.apache.flink.table.store.file.format.FlushingFileFormat; import org.apache.flink.table.store.file.stats.StatsTestUtils; import org.apache.flink.table.store.file.utils.FailingAtomicRenameFileSystem; import org.apache.flink.table.store.file.utils.FileStorePathFactory; import org.apache.flink.table.store.file.utils.RecordReaderIterator; import org.apache.flink.table.types.logical.BigIntType; import org.apache.flink.table.types.logical.IntType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.table.types.logical.VarCharType; import org.apache.flink.util.CloseableIterator; import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.concurrent.ThreadLocalRandom; import java.util.function.Function; import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link DataFileReader} and {@link DataFileWriter}. */ public class DataFileTest { private final DataFileTestDataGenerator gen = DataFileTestDataGenerator.builder().memTableCapacity(20).build(); @TempDir java.nio.file.Path tempDir; @RepeatedTest(10) public void testWriteAndReadDataFileWithStatsCollectingRollingFile() throws Exception { testWriteAndReadDataFileImpl("avro"); } @RepeatedTest(10) public void testWriteAndReadDataFileWithFileExtractingRollingFile() throws Exception { testWriteAndReadDataFileImpl("avro-extract"); } private void testWriteAndReadDataFileImpl(String format) throws Exception { DataFileTestDataGenerator.Data data = gen.next(); DataFileWriter writer = createDataFileWriter(tempDir.toString(), format); DataFileMetaSerializer serializer = new DataFileMetaSerializer( TestKeyValueGenerator.KEY_TYPE, TestKeyValueGenerator.ROW_TYPE); List<DataFileMeta> actualMetas = writer.write(CloseableIterator.fromList(data.content, kv -> {}), 0); checkRollingFiles(data.meta, actualMetas, writer.suggestedFileSize()); DataFileReader reader = createDataFileReader(tempDir.toString(), format, null, null); assertData( data, actualMetas, TestKeyValueGenerator.KEY_SERIALIZER, TestKeyValueGenerator.ROW_SERIALIZER, serializer, reader, kv -> kv); } @RepeatedTest(10) public void testCleanUpForException() throws IOException { FailingAtomicRenameFileSystem.get().reset(1, 10); DataFileTestDataGenerator.Data data = gen.next(); DataFileWriter writer = createDataFileWriter( FailingAtomicRenameFileSystem.getFailingPath(tempDir.toString()), "avro"); try { writer.write(CloseableIterator.fromList(data.content, kv -> {}), 0); } catch (Throwable e) { assertThat(e) .isExactlyInstanceOf(FailingAtomicRenameFileSystem.ArtificialException.class); Path root = new Path(tempDir.toString()); FileSystem fs = root.getFileSystem(); for (FileStatus bucketStatus : fs.listStatus(root)) { assertThat(bucketStatus.isDir()).isTrue(); assertThat(fs.listStatus(bucketStatus.getPath())).isEmpty(); } } } @Test public void testKeyProjection() throws Exception { DataFileTestDataGenerator.Data data = gen.next(); DataFileWriter dataFileWriter = createDataFileWriter(tempDir.toString(), "avro"); DataFileMetaSerializer serializer = new DataFileMetaSerializer( TestKeyValueGenerator.KEY_TYPE, TestKeyValueGenerator.ROW_TYPE); List<DataFileMeta> actualMetas = dataFileWriter.write(CloseableIterator.fromList(data.content, kv -> {}), 0); // projection: (shopId, orderId) -> (orderId) DataFileReader fileReader = createDataFileReader(tempDir.toString(), "avro", new int[][] {new int[] {1}}, null); RowType projectedKeyType = RowType.of(new LogicalType[] {new BigIntType(false)}, new String[] {"key_orderId"}); RowDataSerializer projectedKeySerializer = new RowDataSerializer(projectedKeyType); assertData( data, actualMetas, projectedKeySerializer, TestKeyValueGenerator.ROW_SERIALIZER, serializer, fileReader, kv -> new KeyValue() .replace( GenericRowData.of(kv.key().getLong(1)), kv.sequenceNumber(), kv.valueKind(), kv.value())); } @Test public void testValueProjection() throws Exception { DataFileTestDataGenerator.Data data = gen.next(); DataFileWriter dataFileWriter = createDataFileWriter(tempDir.toString(), "avro"); DataFileMetaSerializer serializer = new DataFileMetaSerializer( TestKeyValueGenerator.KEY_TYPE, TestKeyValueGenerator.ROW_TYPE); List<DataFileMeta> actualMetas = dataFileWriter.write(CloseableIterator.fromList(data.content, kv -> {}), 0); // projection: // (dt, hr, shopId, orderId, itemId, priceAmount, comment) -> // (shopId, itemId, dt, hr) DataFileReader fileReader = createDataFileReader( tempDir.toString(), "avro", null, new int[][] {new int[] {2}, new int[] {4}, new int[] {0}, new int[] {1}}); RowType projectedValueType = RowType.of( new LogicalType[] { new IntType(false), new BigIntType(), new VarCharType(false, 8), new IntType(false) }, new String[] {"shopId", "itemId", "dt", "hr"}); RowDataSerializer projectedValueSerializer = new RowDataSerializer(projectedValueType); assertData( data, actualMetas, TestKeyValueGenerator.KEY_SERIALIZER, projectedValueSerializer, serializer, fileReader, kv -> new KeyValue() .replace( kv.key(), kv.sequenceNumber(), kv.valueKind(), GenericRowData.of( kv.value().getInt(2), kv.value().isNullAt(4) ? null : kv.value().getLong(4), kv.value().getString(0), kv.value().getInt(1)))); } private DataFileWriter createDataFileWriter(String path, String format) { FileStorePathFactory pathFactory = new FileStorePathFactory(new Path(path)); int suggestedFileSize = ThreadLocalRandom.current().nextInt(8192) + 1024; return new DataFileWriter.Factory( TestKeyValueGenerator.KEY_TYPE, TestKeyValueGenerator.ROW_TYPE, // normal format will buffer changes in memory and we can't determine // if the written file size is really larger than suggested, so we use a // special format which flushes for every added element new FlushingFileFormat(format), pathFactory, suggestedFileSize) .create(BinaryRowDataUtil.EMPTY_ROW, 0); } private DataFileReader createDataFileReader( String path, String format, int[][] keyProjection, int[][] valueProjection) { FileStorePathFactory pathFactory = new FileStorePathFactory(new Path(path)); DataFileReader.Factory factory = new DataFileReader.Factory( TestKeyValueGenerator.KEY_TYPE, TestKeyValueGenerator.ROW_TYPE, new FlushingFileFormat(format), pathFactory); if (keyProjection != null) { factory.withKeyProjection(keyProjection); } if (valueProjection != null) { factory.withValueProjection(valueProjection); } return factory.create(BinaryRowDataUtil.EMPTY_ROW, 0); } private void assertData( DataFileTestDataGenerator.Data data, List<DataFileMeta> actualMetas, RowDataSerializer keySerializer, RowDataSerializer projectedValueSerializer, DataFileMetaSerializer dataFileMetaSerializer, DataFileReader fileReader, Function<KeyValue, KeyValue> toExpectedKv) throws Exception { Iterator<KeyValue> expectedIterator = data.content.iterator(); for (DataFileMeta meta : actualMetas) { // check the contents of data file CloseableIterator<KeyValue> actualKvsIterator = new RecordReaderIterator(fileReader.read(meta.fileName())); while (actualKvsIterator.hasNext()) { assertThat(expectedIterator.hasNext()).isTrue(); KeyValue actualKv = actualKvsIterator.next(); assertThat( KeyValueSerializerTest.equals( toExpectedKv.apply(expectedIterator.next()), actualKv, keySerializer, projectedValueSerializer)) .isTrue(); } actualKvsIterator.close(); // check that each data file meta is serializable assertThat(dataFileMetaSerializer.fromRow(dataFileMetaSerializer.toRow(meta))) .isEqualTo(meta); } assertThat(expectedIterator.hasNext()).isFalse(); } private void checkRollingFiles( DataFileMeta expected, List<DataFileMeta> actual, long suggestedFileSize) { // all but last file should be no smaller than suggestedFileSize for (int i = 0; i + 1 < actual.size(); i++) { assertThat(actual.get(i).fileSize() >= suggestedFileSize).isTrue(); } // expected.rowCount == sum(rowCount) assertThat(actual.stream().mapToLong(DataFileMeta::rowCount).sum()) .isEqualTo(expected.rowCount()); // expected.minKey == firstFile.minKey assertThat(actual.get(0).minKey()).isEqualTo(expected.minKey()); // expected.maxKey == lastFile.maxKey assertThat(actual.get(actual.size() - 1).maxKey()).isEqualTo(expected.maxKey()); // check stats for (int i = 0; i < expected.keyStats().length; i++) { int idx = i; StatsTestUtils.checkRollingFileStats( expected.keyStats()[i], actual, m -> m.keyStats()[idx]); } for (int i = 0; i < expected.valueStats().length; i++) { int idx = i; StatsTestUtils.checkRollingFileStats( expected.valueStats()[i], actual, m -> m.valueStats()[idx]); } // expected.minSequenceNumber == min(minSequenceNumber) assertThat(actual.stream().mapToLong(DataFileMeta::minSequenceNumber).min().orElse(-1)) .isEqualTo(expected.minSequenceNumber()); // expected.maxSequenceNumber == max(maxSequenceNumber) assertThat(actual.stream().mapToLong(DataFileMeta::maxSequenceNumber).max().orElse(-1)) .isEqualTo(expected.maxSequenceNumber()); // expected.level == eachFile.level for (DataFileMeta meta : actual) { assertThat(meta.level()).isEqualTo(expected.level()); } } }
<filename>fbpmp/pcf/call_process.py<gh_stars>0 #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import asyncio import logging import pathlib import sys from typing import List, Optional ENCODING = sys.getdefaultencoding() async def _read_stream( stream: Optional[asyncio.StreamReader], preamble: str, logger: logging.Logger ) -> None: """ This is a utility method for reading an `asyncio.StreamReader` into a given logger. It will run infinitely until the stream is invalidated or has run out of data to send. """ logger.debug("Listening to a new StreamReader") while True: if not stream: break line = await stream.readline() if not line: break logger.info(f"{preamble}: {line.decode(ENCODING).strip()}") async def run_command( command: List[str], operating_dir: pathlib.Path, logger: logging.Logger ) -> asyncio.subprocess.Process: """ This is a utility method for running a subprocess command. Both `stdout` and `stderr` will be redirected to the given logger. """ logger.info("Running new subprocess command") process = await asyncio.create_subprocess_exec( *command, cwd=operating_dir, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) logger.info(f"Started subprocess `{' '.join(command)}` (pid={process.pid})") try: while process.returncode is None: await asyncio.wait( [ _read_stream(process.stdout, "stdout", logger), _read_stream(process.stderr, "stderr", logger), ] ) finally: if process.returncode is None: logger.warning(f"Killing process: {process}") process.terminate() logger.info(f"pid={process.pid} exited with return code {process.returncode}") return process async def run_commands( commands: List[List[str]], operating_dirs: List[pathlib.Path], loggers: List[logging.Logger], timeout: Optional[int] = None, ) -> List[asyncio.subprocess.Process]: """ This is a utility method that runs a set of commands by internally calling `run_command` in a loop. """ procs = [ run_command(command, operating_dir, logger) for command, operating_dir, logger in zip(commands, operating_dirs, loggers) ] tasks = [asyncio.create_task(proc) for proc in procs] waits = [asyncio.wait_for(t, timeout=timeout) for t in tasks] return list(await asyncio.gather(*waits, return_exceptions=False))
use itertools::Itertools; use std::fmt; use std::str; use crate::bitboard::Factory as BBFactory; use crate::{Bitboard, Color, Hand, Move, MoveError, Piece, PieceType, SfenError, Square}; /// MoveRecord stores information necessary to undo the move. #[derive(Debug)] pub enum MoveRecord { Normal { from: Square, to: Square, placed: Piece, captured: Option<Piece>, promoted: bool, }, Drop { to: Square, piece: Piece, }, } impl MoveRecord { /// Converts the move into SFEN formatted string. pub fn to_sfen(&self) -> String { match *self { MoveRecord::Normal { from, to, promoted, .. } => format!("{}{}{}", from, to, if promoted { "+" } else { "" }), MoveRecord::Drop { to, piece: Piece { piece_type, .. }, } => format!("{}*{}", piece_type.to_string().to_uppercase(), to), } } } impl PartialEq<Move> for MoveRecord { fn eq(&self, other: &Move) -> bool { match (self, other) { ( &MoveRecord::Normal { from: f1, to: t1, promoted, .. }, &Move::Normal { from: f2, to: t2, promote, }, ) => f1 == f2 && t1 == t2 && promote == promoted, (&MoveRecord::Drop { to: t1, piece, .. }, &Move::Drop { to: t2, piece_type }) => { t1 == t2 && piece.piece_type == piece_type } _ => false, } } } struct PieceGrid([Option<Piece>; 81]); impl PieceGrid { pub fn get(&self, sq: Square) -> &Option<Piece> { &self.0[sq.index()] } pub fn set(&mut self, sq: Square, pc: Option<Piece>) { self.0[sq.index()] = pc; } } impl fmt::Debug for PieceGrid { fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(fmt, "PieceGrid {{ ")?; for pc in self.0.iter() { write!(fmt, "{:?} ", pc)?; } write!(fmt, "}}") } } /// Represents a state of the game. /// /// # Examples /// /// ``` /// use shogi::{Move, Position}; /// use shogi::bitboard::Factory as BBFactory; /// use shogi::square::consts::*; /// /// BBFactory::init(); /// let mut pos = Position::new(); /// pos.set_sfen("lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1").unwrap(); /// /// let m = Move::Normal{from: SQ_7G, to: SQ_7F, promote: false}; /// pos.make_move(m).unwrap(); /// /// assert_eq!("lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1 moves 7g7f", pos.to_sfen()); /// ``` #[derive(Debug)] pub struct Position { board: PieceGrid, hand: Hand, ply: u16, side_to_move: Color, move_history: Vec<MoveRecord>, sfen_history: Vec<(String, u16)>, occupied_bb: Bitboard, color_bb: [Bitboard; 2], type_bb: [Bitboard; 14], } ///////////////////////////////////////////////////////////////////////////// // Type implementation ///////////////////////////////////////////////////////////////////////////// impl Position { /// Creates a new instance of `Position` with an empty board. pub fn new() -> Position { Default::default() } ///////////////////////////////////////////////////////////////////////// // Accessors ///////////////////////////////////////////////////////////////////////// /// Returns a piece at the given square. pub fn piece_at(&self, sq: Square) -> &Option<Piece> { self.board.get(sq) } /// Returns a bitboard containing pieces of the given player. pub fn player_bb(&self, c: Color) -> &Bitboard { &self.color_bb[c.index()] } /// Returns a bitboard containing pieces of the given type. pub fn piece_bb(&self, p: PieceType) -> &Bitboard { &self.type_bb[p.index()] } /// Returns the number of the given piece in hand. pub fn hand(&self, p: Piece) -> u8 { self.hand.get(p) } /// Returns the side to make a move next. pub fn side_to_move(&self) -> Color { self.side_to_move } /// Returns the number of plies already completed by the current state. pub fn ply(&self) -> u16 { self.ply } /// Returns a history of all moves made since the beginning of the game. pub fn move_history(&self) -> &[MoveRecord] { &self.move_history } /// Checks if a player with the given color can declare winning. /// /// See the section 25 in http://www.computer-shogi.org/wcsc26/rule.pdf for more detail. pub fn try_declare_winning(&self, c: Color) -> bool { if c != self.side_to_move { return false; } let king_pos = self.find_king(c); if king_pos.is_none() { return false; } let king_pos = king_pos.unwrap(); if king_pos.relative_rank(c) >= 3 { return false; } let (mut point, count) = PieceType::iter() .filter(|&pt| pt != PieceType::King) .fold((0, 0), |accum, pt| { let unit = match pt { PieceType::Rook | PieceType::Bishop | PieceType::ProRook | PieceType::ProBishop => 5, _ => 1, }; let bb = &(&self.type_bb[pt.index()] & &self.color_bb[c.index()]) & &BBFactory::promote_zone(c); let count = bb.count() as u8; let point = count * unit; (accum.0 + point, accum.1 + count) }); if count < 10 { return false; } point += PieceType::iter() .filter(|pt| pt.is_hand_piece()) .fold(0, |acc, pt| { let num = self.hand.get(Piece { piece_type: pt, color: c, }); let pp = match pt { PieceType::Rook | PieceType::Bishop => 5, _ => 1, }; acc + num * pp }); let lowerbound = match c { Color::Black => 28, Color::White => 27, }; if point < lowerbound { return false; } if self.in_check(c) { return false; } true } /// Checks if the king with the given color is in check. pub fn in_check(&self, c: Color) -> bool { if let Some(king_sq) = self.find_king(c) { self.is_attacked_by(king_sq, c.flip()) } else { false } } /// Sets a piece at the given square. fn set_piece(&mut self, sq: Square, p: Option<Piece>) { self.board.set(sq, p); } fn is_attacked_by(&self, sq: Square, c: Color) -> bool { PieceType::iter().any(|pt| self.get_attackers_of_type(pt, sq, c).is_any()) } fn get_attackers_of_type(&self, pt: PieceType, sq: Square, c: Color) -> Bitboard { let bb = &self.type_bb[pt.index()] & &self.color_bb[c.index()]; if bb.is_empty() { return bb; } let attack_pc = Piece { piece_type: pt, color: c, }; &bb & &self.move_candidates(sq, attack_pc.flip()) } pub fn find_king(&self, c: Color) -> Option<Square> { let mut bb = &self.type_bb[PieceType::King.index()] & &self.color_bb[c.index()]; if bb.is_any() { Some(bb.pop()) } else { None } } fn log_position(&mut self) { // TODO: SFEN string is used to represent a state of position, but any transformation which uniquely distinguish positions can be used here. // Consider light-weight option if generating SFEN string for each move is time-consuming. let sfen = self.generate_sfen().split(' ').take(3).join(" "); let in_check = self.in_check(self.side_to_move()); let continuous_check = if in_check { let past = if self.sfen_history.len() >= 2 { let record = self.sfen_history.get(self.sfen_history.len() - 2).unwrap(); record.1 } else { 0 }; past + 1 } else { 0 }; self.sfen_history.push((sfen, continuous_check)); } ///////////////////////////////////////////////////////////////////////// // Making a move ///////////////////////////////////////////////////////////////////////// /// Makes the given move. Returns `Err` if the move is invalid or any special condition is met. pub fn make_move(&mut self, m: Move) -> Result<(), MoveError> { let res = match m { Move::Normal { from, to, promote } => self.make_normal_move(from, to, promote)?, Move::Drop { to, piece_type } => self.make_drop_move(to, piece_type)?, }; self.move_history.push(res); Ok(()) } fn make_normal_move( &mut self, from: Square, to: Square, promoted: bool, ) -> Result<MoveRecord, MoveError> { let stm = self.side_to_move(); let opponent = stm.flip(); let moved = self .piece_at(from) .ok_or(MoveError::Inconsistent("No piece found"))?; let captured = *self.piece_at(to); if moved.color != stm { return Err(MoveError::Inconsistent( "The piece is not for the side to move", )); } if promoted && !from.in_promotion_zone(stm) && !to.in_promotion_zone(stm) { return Err(MoveError::Inconsistent("The piece cannot promote")); } if !self.move_candidates(from, moved).any(|sq| sq == to) { return Err(MoveError::Inconsistent("The piece cannot move to there")); } if !promoted && !moved.is_placeable_at(to) { return Err(MoveError::NonMovablePiece); } let placed = if promoted { match moved.promote() { Some(promoted) => promoted, None => return Err(MoveError::Inconsistent("This type of piece cannot promote")), } } else { moved }; self.set_piece(from, None); self.set_piece(to, Some(placed)); self.occupied_bb ^= from; self.occupied_bb ^= to; self.type_bb[moved.piece_type.index()] ^= from; self.type_bb[placed.piece_type.index()] ^= to; self.color_bb[moved.color.index()] ^= from; self.color_bb[placed.color.index()] ^= to; if let Some(ref cap) = captured { self.occupied_bb ^= to; self.type_bb[cap.piece_type.index()] ^= to; self.color_bb[cap.color.index()] ^= to; let pc = cap.flip(); let pc = match pc.unpromote() { Some(unpromoted) => unpromoted, None => pc, }; self.hand.increment(pc); } if self.in_check(stm) { // Undo-ing the move. self.set_piece(from, Some(moved)); self.set_piece(to, captured); self.occupied_bb ^= from; self.occupied_bb ^= to; self.type_bb[moved.piece_type.index()] ^= from; self.type_bb[placed.piece_type.index()] ^= to; self.color_bb[moved.color.index()] ^= from; self.color_bb[placed.color.index()] ^= to; if let Some(ref cap) = captured { self.occupied_bb ^= to; self.type_bb[cap.piece_type.index()] ^= to; self.color_bb[cap.color.index()] ^= to; let pc = cap.flip(); let pc = match pc.unpromote() { Some(unpromoted) => unpromoted, None => pc, }; self.hand.decrement(pc); } return Err(MoveError::InCheck); } self.side_to_move = opponent; self.ply += 1; self.log_position(); self.detect_repetition()?; Ok(MoveRecord::Normal { from, to, placed, captured, promoted, }) } fn make_drop_move(&mut self, to: Square, pt: PieceType) -> Result<MoveRecord, MoveError> { let stm = self.side_to_move(); let opponent = stm.flip(); if self.piece_at(to).is_some() { return Err(MoveError::Inconsistent("There is already a piece in `to`")); } let pc = Piece { piece_type: pt, color: stm, }; if self.hand(pc) == 0 { return Err(MoveError::Inconsistent("The piece is not in the hand")); } if !pc.is_placeable_at(to) { return Err(MoveError::NonMovablePiece); } if pc.piece_type == PieceType::Pawn { // Nifu check. for i in 0..9 { if let Some(fp) = *self.piece_at(Square::new(to.file(), i).unwrap()) { if fp == pc { return Err(MoveError::Nifu); } } } // Uchifuzume check. if let Some(king_sq) = to.shift(0, if stm == Color::Black { -1 } else { 1 }) { // Is the dropped pawn attacking the opponent's king? if let Some( pc @ Piece { piece_type: PieceType::King, .. }, ) = *self.piece_at(king_sq) { if pc.color == opponent { // can any opponent's piece attack the dropped pawn? let pinned = self.pinned_bb(opponent); let not_attacked = PieceType::iter() .filter(|&pt| pt != PieceType::King) .flat_map(|pt| self.get_attackers_of_type(pt, to, opponent)) .all(|sq| (&pinned & sq).is_any()); if not_attacked { // can the opponent's king evade? let is_attacked = |sq| { if let Some(pc) = *self.piece_at(sq) { if pc.color == opponent { return true; } } self.is_attacked_by(sq, stm) }; if self.move_candidates(king_sq, pc).all(is_attacked) { return Err(MoveError::Uchifuzume); } } } } } } self.set_piece(to, Some(pc)); self.occupied_bb ^= to; self.type_bb[pc.piece_type.index()] ^= to; self.color_bb[pc.color.index()] ^= to; if self.in_check(stm) { // Undo-ing the move. self.set_piece(to, None); self.occupied_bb ^= to; self.type_bb[pc.piece_type.index()] ^= to; self.color_bb[pc.color.index()] ^= to; return Err(MoveError::InCheck); } self.hand.decrement(pc); self.side_to_move = opponent; self.ply += 1; self.log_position(); self.detect_repetition()?; Ok(MoveRecord::Drop { to, piece: pc }) } fn pinned_bb(&self, c: Color) -> Bitboard { let ksq = self.find_king(c); if ksq.is_none() { return Bitboard::empty(); } let ksq = ksq.unwrap(); [ ( PieceType::Rook, BBFactory::rook_attack(ksq, &Bitboard::empty()), ), ( PieceType::ProRook, BBFactory::rook_attack(ksq, &Bitboard::empty()), ), ( PieceType::Bishop, BBFactory::bishop_attack(ksq, &Bitboard::empty()), ), ( PieceType::ProBishop, BBFactory::bishop_attack(ksq, &Bitboard::empty()), ), ( PieceType::Lance, BBFactory::lance_attack(c, ksq, &Bitboard::empty()), ), ] .iter() .fold(Bitboard::empty(), |mut accum, &(pt, ref mask)| { let bb = &(&self.type_bb[pt.index()] & &self.color_bb[c.flip().index()]) & mask; for psq in bb { let between = &BBFactory::between(ksq, psq) & &self.occupied_bb; if between.count() == 1 && (&between & &self.color_bb[c.index()]).is_any() { accum |= &between; } } accum }) } /// Undoes the last move. pub fn unmake_move(&mut self) -> Result<(), MoveError> { if self.move_history.is_empty() { // TODO: error? return Ok(()); } let last = self.move_history.pop().unwrap(); match last { MoveRecord::Normal { from, to, ref placed, ref captured, promoted, } => { if *self.piece_at(from) != None { return Err(MoveError::Inconsistent( "`from` of the move is filled by another piece", )); } let moved = if promoted { match placed.unpromote() { Some(unpromoted) => unpromoted, None => return Err(MoveError::Inconsistent("Cannot unpromoted the piece")), } } else { *placed }; if *self.piece_at(to) != Some(*placed) { return Err(MoveError::Inconsistent( "Expected piece is not found in `to`", )); } self.set_piece(from, Some(moved)); self.set_piece(to, *captured); self.occupied_bb ^= from; self.occupied_bb ^= to; self.type_bb[moved.piece_type.index()] ^= from; self.type_bb[placed.piece_type.index()] ^= to; self.color_bb[moved.color.index()] ^= from; self.color_bb[placed.color.index()] ^= to; if let Some(ref cap) = *captured { self.occupied_bb ^= to; self.type_bb[cap.piece_type.index()] ^= to; self.color_bb[cap.color.index()] ^= to; let unpromoted_cap = cap.unpromote().unwrap_or(*cap); self.hand.decrement(unpromoted_cap.flip()); } } MoveRecord::Drop { to, piece } => { if *self.piece_at(to) != Some(piece) { return Err(MoveError::Inconsistent( "Expected piece is not found in `to`", )); } self.set_piece(to, None); self.occupied_bb ^= to; self.type_bb[piece.piece_type.index()] ^= to; self.color_bb[piece.color.index()] ^= to; self.hand.increment(piece); } }; self.side_to_move = self.side_to_move.flip(); self.ply -= 1; self.sfen_history.pop(); Ok(()) } /// Returns a list of squares to where the given pieve at the given square can move. pub fn move_candidates(&self, sq: Square, p: Piece) -> Bitboard { let bb = match p.piece_type { PieceType::Rook => BBFactory::rook_attack(sq, &self.occupied_bb), PieceType::Bishop => BBFactory::bishop_attack(sq, &self.occupied_bb), PieceType::Lance => BBFactory::lance_attack(p.color, sq, &self.occupied_bb), PieceType::ProRook => { &BBFactory::rook_attack(sq, &self.occupied_bb) | &BBFactory::attacks_from(PieceType::King, p.color, sq) } PieceType::ProBishop => { &BBFactory::bishop_attack(sq, &self.occupied_bb) | &BBFactory::attacks_from(PieceType::King, p.color, sq) } PieceType::ProSilver | PieceType::ProKnight | PieceType::ProLance | PieceType::ProPawn => BBFactory::attacks_from(PieceType::Gold, p.color, sq), pt => BBFactory::attacks_from(pt, p.color, sq), }; &bb & &!&self.color_bb[p.color.index()] } fn detect_repetition(&self) -> Result<(), MoveError> { if self.sfen_history.len() < 9 { return Ok(()); } let cur = self.sfen_history.last().unwrap(); let mut cnt = 0; for (i, entry) in self.sfen_history.iter().rev().enumerate() { if entry.0 == cur.0 { cnt += 1; if cnt == 4 { let prev = self.sfen_history.get(self.sfen_history.len() - 2).unwrap(); if cur.1 * 2 >= (i as u16) { return Err(MoveError::PerpetualCheckLose); } else if prev.1 * 2 >= (i as u16) { return Err(MoveError::PerpetualCheckWin); } else { return Err(MoveError::Repetition); } } } } Ok(()) } ///////////////////////////////////////////////////////////////////////// // SFEN serialization / deserialization ///////////////////////////////////////////////////////////////////////// pub fn from_sfen(s: &str) -> Result<Position, SfenError> { let mut pos = Position::new(); let r = pos.set_sfen(s); if r.is_ok() { Ok(pos) } else { Err(SfenError {}) } } /// Parses the given SFEN string and updates the game state. pub fn set_sfen(&mut self, sfen_str: &str) -> Result<(), SfenError> { let u_sfen = sfen_str.replace("_", " "); let mut parts = u_sfen.split_whitespace(); // Build the initial position, only the board part is required, other must be valid or missing parts .next() .ok_or(SfenError {}) .and_then(|s| self.parse_sfen_board(s))?; let color = parts.next().unwrap_or("b"); self.parse_sfen_stm(color)?; let hand = parts.next().unwrap_or("-"); self.parse_sfen_hand(hand)?; let ply = parts.next().unwrap_or("1"); self.parse_sfen_ply(ply)?; self.sfen_history.clear(); self.log_position(); // Make moves following the initial position, optional. if let Some("moves") = parts.next() { for m in parts { if let Some(m) = Move::from_sfen(m) { // Stop if any error occurrs. match self.make_move(m) { Ok(_) => { self.log_position(); } Err(_) => break, } } else { return Err(SfenError {}); } } } Ok(()) } /// Converts the current state into SFEN formatted string. pub fn to_sfen(&self) -> String { if self.sfen_history.is_empty() { return self.generate_sfen(); } if self.move_history.is_empty() { return format!("{} {}", self.sfen_history.first().unwrap().0, self.ply); } let mut sfen = format!( "{} {} moves", &self.sfen_history.first().unwrap().0, self.ply - self.move_history.len() as u16 ); for m in self.move_history.iter() { sfen.push_str(&format!(" {}", &m.to_sfen())); } sfen } fn parse_sfen_board(&mut self, s: &str) -> Result<(), SfenError> { let rows = s.split('/'); self.occupied_bb = Bitboard::empty(); self.color_bb = Default::default(); self.type_bb = Default::default(); for (i, row) in rows.enumerate() { if i >= 9 { return Err(SfenError {}); } let mut j = 0; let mut is_promoted = false; for c in row.chars() { match c { '+' => { is_promoted = true; } n if n.is_digit(10) => { if let Some(n) = n.to_digit(10) { for _ in 0..n { if j >= 9 { return Err(SfenError {}); } let sq = Square::new(8 - j, i as u8).unwrap(); self.set_piece(sq, None); j += 1; } } } s => match Piece::from_sfen(s) { Some(mut piece) => { if j >= 9 { return Err(SfenError {}); } if is_promoted { if let Some(promoted) = piece.piece_type.promote() { piece.piece_type = promoted; } else { return Err(SfenError {}); } } let sq = Square::new(8 - j, i as u8).unwrap(); self.set_piece(sq, Some(piece)); self.occupied_bb |= sq; self.color_bb[piece.color.index()] |= sq; self.type_bb[piece.piece_type.index()] |= sq; j += 1; is_promoted = false; } None => return Err(SfenError {}), }, } } } Ok(()) } fn parse_sfen_stm(&mut self, s: &str) -> Result<(), SfenError> { self.side_to_move = match s { "b" => Color::Black, "w" => Color::White, _ => return Err(SfenError {}), }; Ok(()) } fn parse_sfen_hand(&mut self, s: &str) -> Result<(), SfenError> { if s == "-" { self.hand.clear(); return Ok(()); } let mut num_pieces: u8 = 0; for c in s.chars() { match c { n if n.is_digit(10) => { if let Some(n) = n.to_digit(10) { num_pieces = num_pieces * 10 + (n as u8); } } s => { match Piece::from_sfen(s) { Some(p) => self .hand .set(p, if num_pieces == 0 { 1 } else { num_pieces }), None => return Err(SfenError {}), }; num_pieces = 0; } } } Ok(()) } fn parse_sfen_ply(&mut self, s: &str) -> Result<(), SfenError> { self.ply = s.parse()?; Ok(()) } fn generate_sfen(&self) -> String { let board = (0..9) .map(|row| { let mut s = String::new(); let mut num_spaces = 0; for file in (0..9).rev() { match *self.piece_at(Square::new(file, row).unwrap()) { Some(pc) => { if num_spaces > 0 { s.push_str(&num_spaces.to_string()); num_spaces = 0; } s.push_str(&pc.to_string()); } None => num_spaces += 1, } } if num_spaces > 0 { s.push_str(&num_spaces.to_string()); } s }) .join("/"); let color = if self.side_to_move == Color::Black { "b" } else { "w" }; let mut hand = [Color::Black, Color::White] .iter() .map(|c| { PieceType::iter() .filter(|pt| pt.is_hand_piece()) .map(|pt| { let pc = Piece { piece_type: pt, color: *c, }; let n = self.hand.get(pc); if n == 0 { "".to_string() } else if n == 1 { format!("{}", pc) } else { format!("{}{}", n, pc) } }) .join("") }) .join(""); if hand.is_empty() { hand = "-".to_string(); } format!("{} {} {} {}", board, color, hand, self.ply) } } ///////////////////////////////////////////////////////////////////////////// // Trait implementations ///////////////////////////////////////////////////////////////////////////// impl Default for Position { fn default() -> Position { Position { side_to_move: Color::Black, board: PieceGrid([None; 81]), hand: Default::default(), ply: 1, move_history: Default::default(), sfen_history: Default::default(), occupied_bb: Default::default(), color_bb: Default::default(), type_bb: Default::default(), } } } impl str::FromStr for Position { type Err = SfenError; fn from_str(s: &str) -> Result<Position, Self::Err> { Position::from_sfen(s) } } impl fmt::Display for Position { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, " 9 8 7 6 5 4 3 2 1")?; writeln!(f, "+---+---+---+---+---+---+---+---+---+")?; for row in 0..9 { write!(f, "|")?; for file in (0..9).rev() { if let Some(ref piece) = *self.piece_at(Square::new(file, row).unwrap()) { write!(f, "{:>3}|", piece.to_string())?; } else { write!(f, " |")?; } } writeln!(f, " {}", (('a' as usize + row as usize) as u8) as char)?; writeln!(f, "+---+---+---+---+---+---+---+---+---+")?; } writeln!( f, "Side to move: {}", if self.side_to_move == Color::Black { "Black" } else { "White" } )?; let fmt_hand = |color: Color, f: &mut fmt::Formatter| -> fmt::Result { for pt in PieceType::iter().filter(|pt| pt.is_hand_piece()) { let pc = Piece { piece_type: pt, color, }; let n = self.hand.get(pc); if n > 0 { write!(f, "{}{} ", pc, n)?; } } Ok(()) }; write!(f, "Hand (Black): ")?; fmt_hand(Color::Black, f)?; writeln!(f)?; write!(f, "Hand (White): ")?; fmt_hand(Color::White, f)?; writeln!(f)?; write!(f, "Ply: {}", self.ply)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::square::consts::*; fn setup() { BBFactory::init(); } #[test] fn new() { setup(); let pos = Position::new(); for i in 0..9 { for j in 0..9 { let sq = Square::new(i, j).unwrap(); assert_eq!(None, *pos.piece_at(sq)); } } } #[test] fn in_check() { setup(); let test_cases = [ ( "lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1", false, false, ), ("9/3r5/9/9/6B2/9/9/9/3K5 b P 1", true, false), ( "ln2r1knl/2gb1+Rg2/4Pp1p1/p1pp1sp1p/1N2pN1P1/2P2PP2/PP1G1S2R/1SG6/LK6L w 2PSp 1", false, true, ), ( "lnsg1gsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSG1GSNL b - 1", false, false, ), ]; let mut pos = Position::new(); for case in test_cases.iter() { pos.set_sfen(case.0).expect("failed to parse SFEN string"); assert_eq!(case.1, pos.in_check(Color::Black)); assert_eq!(case.2, pos.in_check(Color::White)); } } #[test] fn player_bb() { setup(); let cases: &[(&str, &[Square], &[Square])] = &[ ( "R6gk/9/8p/9/4p4/9/9/8L/B8 b - 1", &[SQ_9A, SQ_1H, SQ_9I], &[SQ_2A, SQ_1A, SQ_1C, SQ_5E], ), ("9/3r5/9/9/6B2/9/9/9/3K5 b P 1", &[SQ_3E, SQ_6I], &[SQ_6B]), ]; let mut pos = Position::new(); for case in cases { pos.set_sfen(case.0).expect("faled to parse SFEN string"); let black = pos.player_bb(Color::Black); let white = pos.player_bb(Color::White); assert_eq!(case.1.len(), black.count() as usize); for sq in case.1 { assert!((black & *sq).is_any()); } assert_eq!(case.2.len(), white.count() as usize); for sq in case.2 { assert!((white & *sq).is_any()); } } } #[test] fn pinned_bb() { setup(); let cases: &[(&str, &[Square], &[Square])] = &[( "R6gk/9/8p/9/4p4/9/9/8L/B8 b - 1", &[], &[SQ_2A, SQ_1C, SQ_5E], )]; let mut pos = Position::new(); for case in cases { pos.set_sfen(case.0).expect("faled to parse SFEN string"); let black = pos.pinned_bb(Color::Black); let white = pos.pinned_bb(Color::White); assert_eq!(case.1.len(), black.count()); for sq in case.1 { assert!((&black & *sq).is_any()); } assert_eq!(case.2.len(), white.count()); for sq in case.2 { assert!((&white & *sq).is_any()); } } } #[test] fn move_candidates() { setup(); let mut pos = Position::new(); pos.set_sfen("lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1") .expect("failed to parse SFEN string"); let mut sum = 0; for sq in Square::iter() { let pc = pos.piece_at(sq); if let Some(pc) = *pc { if pc.color == pos.side_to_move() { sum += pos.move_candidates(sq, pc).count(); } } } assert_eq!(30, sum); } #[test] fn make_normal_move() { setup(); let base_sfen = "l6nl/5+P1gk/2np1S3/p1p4Pp/3P2Sp1/1PPb2P1P/P5GS1/R8/LN4bKL w GR5pnsg 1"; let test_cases = [ (SQ_2B, SQ_2C, false, true), (SQ_7C, SQ_6E, false, true), (SQ_3I, SQ_4H, true, true), (SQ_6F, SQ_9I, true, true), (SQ_2B, SQ_2C, false, true), (SQ_9C, SQ_9D, false, false), (SQ_9B, SQ_8B, false, false), (SQ_9B, SQ_9D, false, false), (SQ_2B, SQ_2C, true, false), ]; let mut pos = Position::new(); for case in test_cases.iter() { pos.set_sfen(base_sfen) .expect("failed to parse SFEN string"); assert_eq!(case.3, pos.make_normal_move(case.0, case.1, case.2).is_ok()); } // Leaving the checked king is illegal. pos.set_sfen("9/3r5/9/9/6B2/9/9/9/3K5 b P 1") .expect("failed to parse SFEN string"); assert!(pos.make_normal_move(SQ_6I, SQ_6H, false).is_err()); pos.set_sfen("9/3r5/9/9/6B2/9/9/9/3K5 b P 1") .expect("failed to parse SFEN string"); assert!(pos.make_normal_move(SQ_6I, SQ_7I, false).is_ok()); } #[test] fn make_drop_move() { setup(); let base_sfen = "l6nl/5+P1gk/2np1S3/p1p4Pp/3P2Sp1/1PPb2P1P/P5GS1/R8/LN4bKL w GR5pnsg 1"; let test_cases = [ (SQ_5E, PieceType::Pawn, true), (SQ_5E, PieceType::Rook, false), (SQ_9A, PieceType::Pawn, false), (SQ_6F, PieceType::Pawn, false), (SQ_9B, PieceType::Pawn, false), (SQ_5I, PieceType::Pawn, false), ]; let mut pos = Position::new(); for case in test_cases.iter() { pos.set_sfen(base_sfen) .expect("failed to parse SFEN string"); assert_eq!( case.2, pos.make_move(Move::Drop { to: case.0, piece_type: case.1, }) .is_ok() ); } } #[test] fn nifu() { setup(); let ng_cases = [( "ln1g5/1ks1g3l/1p2p1n2/p1pGs2rp/1P1N1ppp1/P1SB1P2P/1S1p1bPP1/LKG6/4R2NL \ w 2Pp 91", SQ_6C, )]; let ok_cases = [( "ln1g5/1ks1g3l/1p2p1n2/p1pGs2rp/1P1N1ppp1/P1SB1P2P/1S1+p1bPP1/LKG6/4R2NL \ w 2Pp 91", SQ_6C, )]; let mut pos = Position::new(); for (i, case) in ng_cases.iter().enumerate() { pos.set_sfen(case.0).expect("failed to parse SFEN string"); assert_eq!( Some(MoveError::Nifu), pos.make_move(Move::Drop { to: case.1, piece_type: PieceType::Pawn, }) .err(), "failed at #{}", i ); } for (i, case) in ok_cases.iter().enumerate() { pos.set_sfen(case.0).expect("failed to parse SFEN string"); assert!( pos.make_move(Move::Drop { to: case.1, piece_type: PieceType::Pawn, }) .is_ok(), "failed at #{}", i ); } } #[test] fn uchifuzume() { setup(); let ng_cases = [ ("9/9/7sp/6ppk/9/7G1/9/9/9 b P 1", SQ_1E), ("7nk/9/7S1/6b2/9/9/9/9/9 b P 1", SQ_1B), ("7nk/7g1/6BS1/9/9/9/9/9/9 b P 1", SQ_1B), ("R6gk/9/7S1/9/9/9/9/9/9 b P 1", SQ_1B), ]; let ok_cases = [ ("9/9/7pp/6psk/9/7G1/7N1/9/9 b P 1", SQ_1E), ("7nk/9/7Sg/6b2/9/9/9/9/9 b P 1", SQ_1B), ( "9/8p/3pG1gp1/2p2kl1N/3P1p1s1/lPP6/2SGBP3/PK1S2+p2/LN7 w RSL3Prbg2n4p 1", SQ_8G, ), ]; let mut pos = Position::new(); for (i, case) in ng_cases.iter().enumerate() { pos.set_sfen(case.0).expect("failed to parse SFEN string"); assert_eq!( Some(MoveError::Uchifuzume), pos.make_move(Move::Drop { to: case.1, piece_type: PieceType::Pawn, }) .err(), "failed at #{}", i ); } for (i, case) in ok_cases.iter().enumerate() { pos.set_sfen(case.0).expect("failed to parse SFEN string"); assert!( pos.make_move(Move::Drop { to: case.1, piece_type: PieceType::Pawn, }) .is_ok(), "failed at #{}", i ); } } #[test] fn repetition() { setup(); let mut pos = Position::new(); pos.set_sfen("ln7/ks+R6/pp7/9/9/9/9/9/9 b Ss 1") .expect("failed to parse SFEN string"); for _ in 0..2 { assert!(pos.make_drop_move(SQ_7A, PieceType::Silver).is_ok()); assert!(pos.make_drop_move(SQ_7C, PieceType::Silver).is_ok()); assert!(pos.make_normal_move(SQ_7A, SQ_8B, true).is_ok()); assert!(pos.make_normal_move(SQ_7C, SQ_8B, false).is_ok()); } assert!(pos.make_drop_move(SQ_7A, PieceType::Silver).is_ok()); assert!(pos.make_drop_move(SQ_7C, PieceType::Silver).is_ok()); assert!(pos.make_normal_move(SQ_7A, SQ_8B, true).is_ok()); assert_eq!( Some(MoveError::Repetition), pos.make_normal_move(SQ_7C, SQ_8B, false).err() ); } #[test] fn percetual_check() { setup(); // Case 1. Starting from a check move. let mut pos = Position::new(); pos.set_sfen("8l/6+P2/6+Rpk/8p/9/7S1/9/9/9 b - 1") .expect("failed to parse SFEN string"); for _ in 0..2 { assert!(pos.make_normal_move(SQ_3C, SQ_2B, false).is_ok()); assert!(pos.make_normal_move(SQ_1C, SQ_2D, false).is_ok()); assert!(pos.make_normal_move(SQ_2B, SQ_3C, false).is_ok()); assert!(pos.make_normal_move(SQ_2D, SQ_1C, false).is_ok()); } assert!(pos.make_normal_move(SQ_3C, SQ_2B, false).is_ok()); assert!(pos.make_normal_move(SQ_1C, SQ_2D, false).is_ok()); assert!(pos.make_normal_move(SQ_2B, SQ_3C, false).is_ok()); assert_eq!( Some(MoveError::PerpetualCheckWin), pos.make_normal_move(SQ_2D, SQ_1C, false).err() ); // Case 2. Starting from an escape move. pos.set_sfen("6p1k/9/8+R/9/9/9/9/9/9 w - 1") .expect("failed to parse SFEN string"); for _ in 0..2 { assert!(pos.make_normal_move(SQ_1A, SQ_2A, false).is_ok()); assert!(pos.make_normal_move(SQ_1C, SQ_2C, false).is_ok()); assert!(pos.make_normal_move(SQ_2A, SQ_1A, false).is_ok()); assert!(pos.make_normal_move(SQ_2C, SQ_1C, false).is_ok()); } assert!(pos.make_normal_move(SQ_1A, SQ_2A, false).is_ok()); assert!(pos.make_normal_move(SQ_1C, SQ_2C, false).is_ok()); assert!(pos.make_normal_move(SQ_2A, SQ_1A, false).is_ok()); assert_eq!( Some(MoveError::PerpetualCheckLose), pos.make_normal_move(SQ_2C, SQ_1C, false).err() ); } #[test] fn unmake_move() { setup(); let mut pos = Position::new(); let base_sfen = "l6nl/4+p+P1gk/2n2S3/p1p4Pp/3P2Sp1/1PPb2P1P/4+P1GS1/R8/LN4bKL w RG5gsnp 1"; pos.set_sfen(base_sfen) .expect("failed to parse SFEN string"); let base_state = format!("{}", pos); println!("{}", base_state); let test_cases = [ Move::Drop { to: SQ_5E, piece_type: PieceType::Pawn, }, // No capture by unpromoted piece Move::Normal { from: SQ_6F, to: SQ_7G, promote: false, }, // No capture by promoting piece Move::Normal { from: SQ_6F, to: SQ_7G, promote: true, }, // No capture by promoted piece Move::Normal { from: SQ_5B, to: SQ_5A, promote: false, }, // Capture of unpromoted piece by unpromoted piece Move::Normal { from: SQ_6F, to: SQ_9I, promote: false, }, // Capture of unpromoted piece by promoting piece Move::Normal { from: SQ_6F, to: SQ_9I, promote: true, }, // Capture of unpromoted piece by promoted piece Move::Normal { from: SQ_5B, to: SQ_4C, promote: false, }, // Capture of promoted piece by unpromoted piece Move::Normal { from: SQ_6F, to: SQ_5G, promote: false, }, // Capture of promoted piece by promoting piece Move::Normal { from: SQ_6F, to: SQ_5G, promote: true, }, // Capture of promoted piece by promoted piece Move::Normal { from: SQ_5B, to: SQ_4B, promote: false, }, ]; for case in test_cases.iter() { pos.set_sfen(base_sfen) .expect("failed to parse SFEN string"); pos.make_move(*case) .unwrap_or_else(|_| panic!("failed to make a move: {}", case)); pos.unmake_move() .unwrap_or_else(|_| panic!("failed to unmake a move: {}", case)); assert_eq!( base_sfen, pos.to_sfen(), "{}", format!("sfen unmatch for {}", case).as_str() ); assert_eq!( base_state, format!("{}", pos), "{}", format!("state unmatch for {}", case).as_str() ); } } #[test] fn try_declare_winning() { setup(); let mut pos = Position::new(); pos.set_sfen("lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1") .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); pos.set_sfen("1K7/+NG+N+NGG3/P+S+P+P+PS3/9/7s1/9/+b+rppp+p+s1+p/3+p1+bk2/9 b R4L7Pgnp 1") .expect("failed to parse SFEN string"); assert!(pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); pos.set_sfen( "1K6l/1+N7/+PG2+Ns1p1/2+N5p/6p2/3+b4P/4+p+p+bs1/+r1s4+lk/1g1g3+r1 w \ Gns2l11p 1", ) .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(pos.try_declare_winning(Color::White)); pos.set_sfen( "1K6l/1+N7/+PG2+Ns1p1/2+N5p/6p2/3+b4P/4+p+p+bs1/+r1s4+lk/1g1g3+r1 b \ Gns2l11p 1", ) .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); pos.set_sfen( "1K6l/1+N7/+PG2+Ns1p1/2+N5p/6p2/3+b4P/4+p+p+bs1/+r1s4+l1/1g1g3+r1 b \ Gns2l11p 1", ) .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); pos.set_sfen( "1K6l/1+N7/+PG2+Ns1p1/2+N5p/6p2/1k1+b4P/4+p+p+bs1/+r1s4+l1/1g1g3+r1 b \ Gns2l11p 1", ) .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); pos.set_sfen( "1K6l/1+N7/+PG2+Ns1p1/2+N5p/6p2/3+b4P/4+p+p+bs1/+r1s4+lk/1g1g3+rG w \ ns2l11p 1", ) .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); pos.set_sfen("1K6l/1+N7/+PG2+Ns1p1/2+N5p/6p2/3+b4P/5+p+bs1/+r1s4+lk/1g1g3+rG w ns2l12p 1") .expect("failed to parse SFEN string"); assert!(!pos.try_declare_winning(Color::Black)); assert!(!pos.try_declare_winning(Color::White)); } #[test] fn set_sfen_normal() { setup(); let mut pos = Position::new(); pos.set_sfen("lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1") .expect("failed to parse SFEN string"); let filled_squares = [ (0, 0, PieceType::Lance, Color::White), (1, 0, PieceType::Knight, Color::White), (2, 0, PieceType::Silver, Color::White), (3, 0, PieceType::Gold, Color::White), (4, 0, PieceType::King, Color::White), (5, 0, PieceType::Gold, Color::White), (6, 0, PieceType::Silver, Color::White), (7, 0, PieceType::Knight, Color::White), (8, 0, PieceType::Lance, Color::White), (7, 1, PieceType::Rook, Color::White), (1, 1, PieceType::Bishop, Color::White), (0, 2, PieceType::Pawn, Color::White), (1, 2, PieceType::Pawn, Color::White), (2, 2, PieceType::Pawn, Color::White), (3, 2, PieceType::Pawn, Color::White), (4, 2, PieceType::Pawn, Color::White), (5, 2, PieceType::Pawn, Color::White), (6, 2, PieceType::Pawn, Color::White), (7, 2, PieceType::Pawn, Color::White), (8, 2, PieceType::Pawn, Color::White), (0, 6, PieceType::Pawn, Color::Black), (1, 6, PieceType::Pawn, Color::Black), (2, 6, PieceType::Pawn, Color::Black), (3, 6, PieceType::Pawn, Color::Black), (4, 6, PieceType::Pawn, Color::Black), (5, 6, PieceType::Pawn, Color::Black), (6, 6, PieceType::Pawn, Color::Black), (7, 6, PieceType::Pawn, Color::Black), (8, 6, PieceType::Pawn, Color::Black), (7, 7, PieceType::Bishop, Color::Black), (1, 7, PieceType::Rook, Color::Black), (0, 8, PieceType::Lance, Color::Black), (1, 8, PieceType::Knight, Color::Black), (2, 8, PieceType::Silver, Color::Black), (3, 8, PieceType::Gold, Color::Black), (4, 8, PieceType::King, Color::Black), (5, 8, PieceType::Gold, Color::Black), (6, 8, PieceType::Silver, Color::Black), (7, 8, PieceType::Knight, Color::Black), (8, 8, PieceType::Lance, Color::Black), ]; let empty_squares = [ (0, 1, 1), (2, 1, 5), (8, 1, 1), (0, 3, 9), (0, 4, 9), (0, 5, 9), (0, 7, 1), (2, 7, 5), (8, 7, 1), ]; let hand_pieces = [ (PieceType::Pawn, 0), (PieceType::Lance, 0), (PieceType::Knight, 0), (PieceType::Silver, 0), (PieceType::Gold, 0), (PieceType::Rook, 0), (PieceType::Bishop, 0), ]; for case in filled_squares.iter() { let (file, row, pt, c) = *case; assert_eq!( Some(Piece { piece_type: pt, color: c, }), *pos.piece_at(Square::new(file, row).unwrap()) ); } for case in empty_squares.iter() { let (file, row, len) = *case; for i in file..(file + len) { assert_eq!(None, *pos.piece_at(Square::new(i, row).unwrap())); } } for case in hand_pieces.iter() { let (pt, n) = *case; assert_eq!( n, pos.hand(Piece { piece_type: pt, color: Color::Black, }) ); assert_eq!( n, pos.hand(Piece { piece_type: pt, color: Color::White, }) ); } assert_eq!(Color::Black, pos.side_to_move()); assert_eq!(1, pos.ply()); } #[test] fn to_sfen() { setup(); let test_cases = [ "7k1/9/7P1/9/9/9/9/9/9 b G2r2b3g4s4n4l17p 1", "lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL b - 1", "lnsgk+Lpnl/1p5+B1/p1+Pps1ppp/9/9/9/P+r1PPpPPP/1R7/LNSGKGSN1 w BGP2p \ 1024", ]; let mut pos = Position::new(); for case in test_cases.iter() { pos.set_sfen(case).expect("failed to parse SFEN string"); assert_eq!(*case, pos.to_sfen()); } } #[test] fn set_sfen_custom() { setup(); let mut pos = Position::new(); pos.set_sfen("lnsgk+Lpnl/1p5+B1/p1+Pps1ppp/9/9/9/P+r1PPpPPP/1R7/LNSGKGSN1 w BGP2p 1024") .expect("failed to parse SFEN string"); let filled_squares = [ (8, 0, PieceType::Lance, Color::White), (7, 0, PieceType::Knight, Color::White), (6, 0, PieceType::Silver, Color::White), (5, 0, PieceType::Gold, Color::White), (4, 0, PieceType::King, Color::White), (3, 0, PieceType::ProLance, Color::Black), (2, 0, PieceType::Pawn, Color::White), (1, 0, PieceType::Knight, Color::White), (0, 0, PieceType::Lance, Color::White), (7, 1, PieceType::Pawn, Color::White), (1, 1, PieceType::ProBishop, Color::Black), (8, 2, PieceType::Pawn, Color::White), (6, 2, PieceType::ProPawn, Color::Black), (5, 2, PieceType::Pawn, Color::White), (4, 2, PieceType::Silver, Color::White), (2, 2, PieceType::Pawn, Color::White), (1, 2, PieceType::Pawn, Color::White), (0, 2, PieceType::Pawn, Color::White), (8, 6, PieceType::Pawn, Color::Black), (7, 6, PieceType::ProRook, Color::White), (5, 6, PieceType::Pawn, Color::Black), (4, 6, PieceType::Pawn, Color::Black), (3, 6, PieceType::Pawn, Color::White), (2, 6, PieceType::Pawn, Color::Black), (1, 6, PieceType::Pawn, Color::Black), (0, 6, PieceType::Pawn, Color::Black), (7, 7, PieceType::Rook, Color::Black), (8, 8, PieceType::Lance, Color::Black), (7, 8, PieceType::Knight, Color::Black), (6, 8, PieceType::Silver, Color::Black), (5, 8, PieceType::Gold, Color::Black), (4, 8, PieceType::King, Color::Black), (3, 8, PieceType::Gold, Color::Black), (2, 8, PieceType::Silver, Color::Black), (1, 8, PieceType::Knight, Color::Black), ]; let empty_squares = [ (0, 1, 1), (2, 1, 5), (8, 1, 1), (3, 2, 1), (7, 2, 1), (0, 3, 9), (0, 4, 9), (0, 5, 9), (6, 6, 1), (0, 7, 7), (8, 7, 1), (0, 8, 1), ]; let hand_pieces = [ ( Piece { piece_type: PieceType::Pawn, color: Color::Black, }, 1, ), ( Piece { piece_type: PieceType::Gold, color: Color::Black, }, 1, ), ( Piece { piece_type: PieceType::Bishop, color: Color::Black, }, 1, ), ( Piece { piece_type: PieceType::Pawn, color: Color::White, }, 2, ), ]; for case in filled_squares.iter() { let (file, row, pt, c) = *case; assert_eq!( Some(Piece { piece_type: pt, color: c, }), *pos.piece_at(Square::new(file, row).unwrap()) ); } for case in empty_squares.iter() { let (file, row, len) = *case; for i in file..(file + len) { assert_eq!(None, *pos.piece_at(Square::new(i, row).unwrap())); } } for case in hand_pieces.iter() { let (p, n) = *case; assert_eq!(n, pos.hand(p)); } assert_eq!(Color::White, pos.side_to_move()); assert_eq!(1024, pos.ply()); } }
<reponame>NDNLink/NDN-Chord /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ /* * Copyright (c) 2005 INRIA * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: <NAME> <<EMAIL>> */ #include "ns3/log.h" #include "ns3/pointer.h" #include "dca-txop.h" #include "dcf-manager.h" #include "dcf-state.h" #include "wifi-mac-queue.h" #include "mac-tx-middle.h" #undef NS_LOG_APPEND_CONTEXT #define NS_LOG_APPEND_CONTEXT if (m_low != 0) { std::clog << "[mac=" << m_low->GetAddress () << "] "; } namespace ns3 { NS_LOG_COMPONENT_DEFINE ("DcaTxop"); NS_OBJECT_ENSURE_REGISTERED (DcaTxop); TypeId DcaTxop::GetTypeId (void) { static TypeId tid = TypeId ("ns3::DcaTxop") .SetParent<ns3::Object> () .SetGroupName ("Wifi") .AddConstructor<DcaTxop> () .AddAttribute ("MinCw", "The minimum value of the contention window.", UintegerValue (15), MakeUintegerAccessor (&DcaTxop::SetMinCw, &DcaTxop::GetMinCw), MakeUintegerChecker<uint32_t> ()) .AddAttribute ("MaxCw", "The maximum value of the contention window.", UintegerValue (1023), MakeUintegerAccessor (&DcaTxop::SetMaxCw, &DcaTxop::GetMaxCw), MakeUintegerChecker<uint32_t> ()) .AddAttribute ("Aifsn", "The AIFSN: the default value conforms to simple DCA.", UintegerValue (2), MakeUintegerAccessor (&DcaTxop::SetAifsn, &DcaTxop::GetAifsn), MakeUintegerChecker<uint32_t> ()) .AddAttribute ("TxopLimit", "The TXOP limit: the default value conforms to simple DCA.", TimeValue (MilliSeconds (0)), MakeTimeAccessor (&DcaTxop::SetTxopLimit, &DcaTxop::GetTxopLimit), MakeTimeChecker ()) .AddAttribute ("Queue", "The WifiMacQueue object", PointerValue (), MakePointerAccessor (&DcaTxop::GetQueue), MakePointerChecker<WifiMacQueue> ()) ; return tid; } DcaTxop::DcaTxop () : m_manager (0), m_currentPacket (0) { NS_LOG_FUNCTION (this); m_dcf = CreateObject<DcfState> (this); m_queue = CreateObject<WifiMacQueue> (); m_rng = CreateObject<UniformRandomVariable> (); } DcaTxop::~DcaTxop () { NS_LOG_FUNCTION (this); } void DcaTxop::DoDispose (void) { NS_LOG_FUNCTION (this); m_queue = 0; m_low = 0; m_stationManager = 0; m_dcf = 0; m_rng = 0; m_txMiddle = 0; } void DcaTxop::SetManager (const Ptr<DcfManager> manager) { NS_LOG_FUNCTION (this << manager); m_manager = manager; m_manager->Add (m_dcf); } void DcaTxop::SetTxMiddle (const Ptr<MacTxMiddle> txMiddle) { NS_LOG_FUNCTION (this); m_txMiddle = txMiddle; } void DcaTxop::SetLow (const Ptr<MacLow> low) { NS_LOG_FUNCTION (this << low); m_low = low; } void DcaTxop::SetWifiRemoteStationManager (const Ptr<WifiRemoteStationManager> remoteManager) { NS_LOG_FUNCTION (this << remoteManager); m_stationManager = remoteManager; } void DcaTxop::SetTxOkCallback (TxOk callback) { NS_LOG_FUNCTION (this << &callback); m_txOkCallback = callback; } void DcaTxop::SetTxFailedCallback (TxFailed callback) { NS_LOG_FUNCTION (this << &callback); m_txFailedCallback = callback; } void DcaTxop::SetTxDroppedCallback (TxDropped callback) { NS_LOG_FUNCTION (this << &callback); m_txDroppedCallback = callback; m_queue->TraceConnectWithoutContext ("Drop", MakeCallback (&DcaTxop::TxDroppedPacket, this)); } void DcaTxop::TxDroppedPacket (Ptr<const WifiMacQueueItem> item) { if (!m_txDroppedCallback.IsNull ()) { m_txDroppedCallback (item->GetPacket ()); } } Ptr<WifiMacQueue > DcaTxop::GetQueue () const { NS_LOG_FUNCTION (this); return m_queue; } void DcaTxop::SetMinCw (uint32_t minCw) { NS_LOG_FUNCTION (this << minCw); m_dcf->SetCwMin (minCw); } void DcaTxop::SetMaxCw (uint32_t maxCw) { NS_LOG_FUNCTION (this << maxCw); m_dcf->SetCwMax (maxCw); } void DcaTxop::SetAifsn (uint32_t aifsn) { NS_LOG_FUNCTION (this << aifsn); m_dcf->SetAifsn (aifsn); } void DcaTxop::SetTxopLimit (Time txopLimit) { NS_LOG_FUNCTION (this << txopLimit); m_dcf->SetTxopLimit (txopLimit); } uint32_t DcaTxop::GetMinCw (void) const { return m_dcf->GetCwMin (); } uint32_t DcaTxop::GetMaxCw (void) const { return m_dcf->GetCwMax (); } uint32_t DcaTxop::GetAifsn (void) const { return m_dcf->GetAifsn (); } Time DcaTxop::GetTxopLimit (void) const { return m_dcf->GetTxopLimit (); } void DcaTxop::Queue (Ptr<const Packet> packet, const WifiMacHeader &hdr) { NS_LOG_FUNCTION (this << packet << &hdr); m_stationManager->PrepareForQueue (hdr.GetAddr1 (), &hdr, packet); m_queue->Enqueue (Create<WifiMacQueueItem> (packet, hdr)); StartAccessIfNeeded (); } int64_t DcaTxop::AssignStreams (int64_t stream) { NS_LOG_FUNCTION (this << stream); m_rng->SetStream (stream); return 1; } void DcaTxop::RestartAccessIfNeeded (void) { NS_LOG_FUNCTION (this); if ((m_currentPacket != 0 || !m_queue->IsEmpty ()) && !m_dcf->IsAccessRequested ()) { m_manager->RequestAccess (m_dcf); } } void DcaTxop::StartAccessIfNeeded (void) { NS_LOG_FUNCTION (this); if (m_currentPacket == 0 && !m_queue->IsEmpty () && !m_dcf->IsAccessRequested ()) { m_manager->RequestAccess (m_dcf); } } Ptr<MacLow> DcaTxop::GetLow (void) const { NS_LOG_FUNCTION (this); return m_low; } void DcaTxop::DoInitialize () { NS_LOG_FUNCTION (this); m_dcf->ResetCw (); m_dcf->StartBackoffNow (m_rng->GetInteger (0, m_dcf->GetCw ())); } bool DcaTxop::NeedRtsRetransmission (Ptr<const Packet> packet, const WifiMacHeader &hdr) { NS_LOG_FUNCTION (this); return m_stationManager->NeedRtsRetransmission (hdr.GetAddr1 (), &hdr, packet); } bool DcaTxop::NeedDataRetransmission (Ptr<const Packet> packet, const WifiMacHeader &hdr) { NS_LOG_FUNCTION (this); return m_stationManager->NeedDataRetransmission (hdr.GetAddr1 (), &hdr, packet); } bool DcaTxop::NeedFragmentation (void) const { NS_LOG_FUNCTION (this); return m_stationManager->NeedFragmentation (m_currentHdr.GetAddr1 (), &m_currentHdr, m_currentPacket); } void DcaTxop::NextFragment (void) { NS_LOG_FUNCTION (this); m_fragmentNumber++; } uint32_t DcaTxop::GetFragmentSize (void) const { NS_LOG_FUNCTION (this); return m_stationManager->GetFragmentSize (m_currentHdr.GetAddr1 (), &m_currentHdr, m_currentPacket, m_fragmentNumber); } bool DcaTxop::IsLastFragment (void) const { NS_LOG_FUNCTION (this); return m_stationManager->IsLastFragment (m_currentHdr.GetAddr1 (), &m_currentHdr, m_currentPacket, m_fragmentNumber); } uint32_t DcaTxop::GetNextFragmentSize (void) const { NS_LOG_FUNCTION (this); return m_stationManager->GetFragmentSize (m_currentHdr.GetAddr1 (), &m_currentHdr, m_currentPacket, m_fragmentNumber + 1); } uint32_t DcaTxop::GetFragmentOffset (void) const { NS_LOG_FUNCTION (this); return m_stationManager->GetFragmentOffset (m_currentHdr.GetAddr1 (), &m_currentHdr, m_currentPacket, m_fragmentNumber); } Ptr<Packet> DcaTxop::GetFragmentPacket (WifiMacHeader *hdr) { NS_LOG_FUNCTION (this << hdr); *hdr = m_currentHdr; hdr->SetFragmentNumber (m_fragmentNumber); uint32_t startOffset = GetFragmentOffset (); Ptr<Packet> fragment; if (IsLastFragment ()) { hdr->SetNoMoreFragments (); } else { hdr->SetMoreFragments (); } fragment = m_currentPacket->CreateFragment (startOffset, GetFragmentSize ()); return fragment; } void DcaTxop::NotifyAccessGranted (void) { NS_LOG_FUNCTION (this); if (m_currentPacket == 0) { if (m_queue->IsEmpty ()) { NS_LOG_DEBUG ("queue empty"); return; } Ptr<WifiMacQueueItem> item = m_queue->Dequeue (); NS_ASSERT (item != 0); m_currentPacket = item->GetPacket (); m_currentHdr = item->GetHeader (); NS_ASSERT (m_currentPacket != 0); uint16_t sequence = m_txMiddle->GetNextSequenceNumberFor (&m_currentHdr); m_currentHdr.SetSequenceNumber (sequence); m_stationManager->UpdateFragmentationThreshold (); m_currentHdr.SetFragmentNumber (0); m_currentHdr.SetNoMoreFragments (); m_currentHdr.SetNoRetry (); m_fragmentNumber = 0; NS_LOG_DEBUG ("dequeued size=" << m_currentPacket->GetSize () << ", to=" << m_currentHdr.GetAddr1 () << ", seq=" << m_currentHdr.GetSequenceControl ()); } m_currentParams.DisableOverrideDurationId (); if (m_currentHdr.GetAddr1 ().IsGroup ()) { m_currentParams.DisableRts (); m_currentParams.DisableAck (); m_currentParams.DisableNextData (); GetLow ()->StartTransmission (m_currentPacket, &m_currentHdr, m_currentParams, this); NS_LOG_DEBUG ("tx broadcast"); } else { m_currentParams.EnableAck (); if (NeedFragmentation ()) { WifiMacHeader hdr; Ptr<Packet> fragment = GetFragmentPacket (&hdr); if (IsLastFragment ()) { NS_LOG_DEBUG ("fragmenting last fragment size=" << fragment->GetSize ()); m_currentParams.DisableNextData (); } else { NS_LOG_DEBUG ("fragmenting size=" << fragment->GetSize ()); m_currentParams.EnableNextData (GetNextFragmentSize ()); } GetLow ()->StartTransmission (fragment, &hdr, m_currentParams, this); } else { m_currentParams.DisableNextData (); GetLow ()->StartTransmission (m_currentPacket, &m_currentHdr, m_currentParams, this); } } } void DcaTxop::NotifyInternalCollision (void) { NS_LOG_FUNCTION (this); NotifyCollision (); } void DcaTxop::NotifyCollision (void) { NS_LOG_FUNCTION (this); m_dcf->StartBackoffNow (m_rng->GetInteger (0, m_dcf->GetCw ())); RestartAccessIfNeeded (); } void DcaTxop::NotifyChannelSwitching (void) { NS_LOG_FUNCTION (this); m_queue->Flush (); m_currentPacket = 0; } void DcaTxop::NotifySleep (void) { NS_LOG_FUNCTION (this); if (m_currentPacket != 0) { m_queue->PushFront (Create<WifiMacQueueItem> (m_currentPacket, m_currentHdr)); m_currentPacket = 0; } } void DcaTxop::NotifyWakeUp (void) { NS_LOG_FUNCTION (this); RestartAccessIfNeeded (); } void DcaTxop::MissedCts (void) { NS_LOG_FUNCTION (this); NS_LOG_DEBUG ("missed cts"); if (!NeedRtsRetransmission (m_currentPacket, m_currentHdr)) { NS_LOG_DEBUG ("Cts Fail"); m_stationManager->ReportFinalRtsFailed (m_currentHdr.GetAddr1 (), &m_currentHdr); if (!m_txFailedCallback.IsNull ()) { m_txFailedCallback (m_currentHdr); } //to reset the dcf. m_currentPacket = 0; m_dcf->ResetCw (); } else { m_dcf->UpdateFailedCw (); } m_dcf->StartBackoffNow (m_rng->GetInteger (0, m_dcf->GetCw ())); RestartAccessIfNeeded (); } void DcaTxop::GotAck (void) { NS_LOG_FUNCTION (this); if (!NeedFragmentation () || IsLastFragment ()) { NS_LOG_DEBUG ("got ack. tx done."); if (!m_txOkCallback.IsNull ()) { m_txOkCallback (m_currentHdr); } /* we are not fragmenting or we are done fragmenting * so we can get rid of that packet now. */ m_currentPacket = 0; m_dcf->ResetCw (); m_dcf->StartBackoffNow (m_rng->GetInteger (0, m_dcf->GetCw ())); RestartAccessIfNeeded (); } else { NS_LOG_DEBUG ("got ack. tx not done, size=" << m_currentPacket->GetSize ()); } } void DcaTxop::MissedAck (void) { NS_LOG_FUNCTION (this); NS_LOG_DEBUG ("missed ack"); if (!NeedDataRetransmission (m_currentPacket, m_currentHdr)) { NS_LOG_DEBUG ("Ack Fail"); m_stationManager->ReportFinalDataFailed (m_currentHdr.GetAddr1 (), &m_currentHdr); if (!m_txFailedCallback.IsNull ()) { m_txFailedCallback (m_currentHdr); } //to reset the dcf. m_currentPacket = 0; m_dcf->ResetCw (); } else { NS_LOG_DEBUG ("Retransmit"); m_currentHdr.SetRetry (); m_dcf->UpdateFailedCw (); } m_dcf->StartBackoffNow (m_rng->GetInteger (0, m_dcf->GetCw ())); RestartAccessIfNeeded (); } void DcaTxop::StartNextFragment (void) { NS_LOG_FUNCTION (this); NS_LOG_DEBUG ("start next packet fragment"); /* this callback is used only for fragments. */ NextFragment (); WifiMacHeader hdr; Ptr<Packet> fragment = GetFragmentPacket (&hdr); m_currentParams.EnableAck (); m_currentParams.DisableRts (); m_currentParams.DisableOverrideDurationId (); if (IsLastFragment ()) { m_currentParams.DisableNextData (); } else { m_currentParams.EnableNextData (GetNextFragmentSize ()); } GetLow ()->StartTransmission (fragment, &hdr, m_currentParams, this); } void DcaTxop::Cancel (void) { NS_LOG_FUNCTION (this); NS_LOG_DEBUG ("transmission cancelled"); } void DcaTxop::EndTxNoAck (void) { NS_LOG_FUNCTION (this); NS_LOG_DEBUG ("a transmission that did not require an ACK just finished"); m_currentPacket = 0; m_dcf->ResetCw (); m_dcf->StartBackoffNow (m_rng->GetInteger (0, m_dcf->GetCw ())); StartAccessIfNeeded (); } bool DcaTxop::IsEdca () { return false; } void DcaTxop::StartNextPacket (void) { NS_LOG_WARN ("StartNext should not be called for non QoS!"); } void DcaTxop::GotBlockAck (const CtrlBAckResponseHeader *blockAck, Mac48Address recipient, double rxSnr, WifiMode txMode, double dataSnr) { NS_LOG_WARN ("GotBlockAck should not be called for non QoS!"); } void DcaTxop::MissedBlockAck (uint8_t nMpdus) { NS_LOG_WARN ("MissedBlockAck should not be called for non QoS!"); } bool DcaTxop::HasTxop (void) const { return false; } } //namespace ns3
def is_64bit(): is64 = True if architecture()[0] == "64bit" else False return is64
/// Returns whether all elements of `xs` represent valid spins. static auto is_valid_spin(__m256 const xs) noexcept -> bool { return _mm256_movemask_ps(_mm256_or_ps( _mm256_cmp_ps(xs, _mm256_set1_ps(1.0f), _CMP_EQ_OQ), _mm256_cmp_ps(xs, _mm256_set1_ps(-1.0f), _CMP_EQ_OQ))) == 0xFF; }
package fast3d.complex.light; import fast3d.graphics.Color; import fast3d.graphics.Texture; /** * combines all data used to render under light * * @author <NAME> */ public class Material extends java.lang.Object { /** * the color used to illuminate with ambient light (background-light) */ public final Color ambient; /** * the color used to illuminate with diffuse light (directional/point-light) */ public final Color diffuse; /** * the color used to illuminate with specular light (shine-light) */ public final Color specular; /** * the factor how wide the shine-dot of the shine-light is */ public double shininess; /** * the color that is visible at minimum even if no light source shines to * the surfaces with this material */ public final Color emissive; /** * the alpha-component of the colors is not respected due to illumination * has to be on solid surfaces - this alpha is applied to the visible color * as result of the lighting-calculation<br> * opaque is 1d, transparent is 0d */ public double alpha = 1; /** * if not null an AdvTriangle has a lighted texture on it */ public Texture texture; /** * all colors will be white, shininess=1, alpha=1, no texture */ public Material() { this(new Color(1, 1, 1), new Color(1, 1, 1), new Color(1, 1, 1), new Color(1, 1, 1), 1, null); } /** * initiates the materials data * * @param ambient * the ambient-field value * @param diffuse * the diffuse-field value * @param specular * the specular-field value * @param emissive * the emissive-field value * @param shininess * the shininess-field value * @param texture * the texture-field value */ public Material(final Color ambient, final Color diffuse, final Color specular, final Color emissive, final double shininess, final Texture texture) { this.ambient = ambient; this.diffuse = diffuse; this.specular = specular; this.emissive = emissive; this.shininess = shininess; this.texture = texture; } /** * initiates the materials data * * @param ambient * the ambient-field value * @param diffuse * the diffuse-field value * @param specular * the specular-field value * @param emissive * the emissive-field value * @param shininess * the shininess-field value * @param alpha * the alpha-field value * @param texture * the texture-field value */ public Material(final Color ambient, final Color diffuse, final Color specular, final Color emissive, final double shininess, final double alpha, final Texture texture) { this.ambient = ambient; this.diffuse = diffuse; this.specular = specular; this.emissive = emissive; this.shininess = shininess; this.alpha = alpha; this.texture = texture; } /** * initiates the materials data, opaque, no texture * * @param ambient * the ambient-field value * @param diffuse * the diffuse-field value * @param specular * the specular-field value * @param emissive * the emissive-field value * @param shininess * the shininess-field value */ public Material(Color ambient, Color diffuse, Color specular, Color emissive, double shininess) { this(ambient, diffuse, specular, emissive, shininess, null); } /** * initiates the materials data, no texture * * @param ambient * the ambient-field value * @param diffuse * the diffuse-field value * @param specular * the specular-field value * @param emissive * the emissive-field value * @param shininess * the shininess-field value * @param alpha * the alpha-field value */ public Material(Color ambient, Color diffuse, Color specular, Color emissive, double shininess, double alpha) { this(ambient, diffuse, specular, emissive, shininess, alpha, null); } /** * @return the shininess-field value */ public double getShininess() { return shininess; } /** * @return the alpha-field value */ public double getAlpha() { return alpha; } /** * @return a clone of the ambient-field value */ public Color getAmbient() { return ambient.clone(); } /** * @return a clone of the diffuse-field value */ public Color getDiffuse() { return diffuse.clone(); } /** * @return a clone of the specular-field value */ public Color getSpecular() { return specular.clone(); } /** * @return a clone of the emissive-field value */ public Color getEmissive() { return emissive.clone(); } /** * clones the field data */ @Override public Material clone() { return new Material(ambient.clone(), diffuse.clone(), specular.clone(), emissive.clone(), shininess, alpha, texture); } /** * @return true if the given Material has equal field data */ @Override public boolean equals(java.lang.Object obj) { if (this == obj) return true; if (obj instanceof Material) { final Material m = (Material) obj; return ambient.equals(m.ambient) && diffuse.equals(m.diffuse) && specular.equals(m.specular) && emissive.equals(m.emissive) && shininess == m.shininess && alpha == m.alpha && (texture == null ? m.texture == null : texture.equals(m.texture)); } else return false; } /** * * @return fast3d.complex.light.Material[_parameter_] **/ @Override public String toString() { return "fast3d.complex.light.Material[ambient=" + ambient + ";diffuse=" + diffuse + ";specular=" + specular + ";emissive=" + emissive + ";shininess=" + shininess + ";a=" + alpha + ";texture=" + texture + "]"; } /** * copies the field-data of the parameter to this * * @param mat * the new field-data */ public void setTo(final Material mat) { if (mat == null) return; if (mat.ambient != null) this.ambient.set(mat.ambient); if (mat.diffuse != null) this.diffuse.set(mat.diffuse); if (mat.specular != null) this.specular.set(mat.specular); if (mat.emissive != null) this.emissive.set(mat.emissive); this.texture = mat.texture; this.shininess = mat.shininess; this.alpha = mat.alpha; } }
<filename>src/app/ai/ultimate-tic-tac-toe.ts import { GameMove, GameWinner, SearchableGame } from './searchable-game'; import { TicTacToe, TicTacToeMark, TicTacToeMove } from './tic-tac-toe'; export class UltimateTicTacToeMove implements GameMove { constructor(public row: number, public col: number, public submove: TicTacToeMove) { } } export class UltimateTicTacToe implements SearchableGame { public state: Array<Array<TicTacToe>>; public scores: Array<number>; public boardActive: Array<Array<boolean>>; private moveCount: number; private currPlayer: number; private static maxPlayers: number = 2; private static boardSize: number = 3; constructor() { this.state = [ [new TicTacToe(), new TicTacToe, new TicTacToe], [new TicTacToe(), new TicTacToe, new TicTacToe], [new TicTacToe(), new TicTacToe, new TicTacToe] ]; this.boardActive = [ [true, true, true], [true, true, true], [true, true, true] ]; //[row1, row2, row3, col1, col2, col3, diag1, diag2]. this.scores = [0, 0, 0, 0, 0, 0, 0, 0]; this.moveCount = 0; this.currPlayer = 1; } getCurrentPlayer() { return this.currPlayer; } getHeuristicValue(player: number) { let sum = this.scores.reduce((a, b) => a + b) * 27; for (let i = 0; i < this.state.length; i++) { for (let j = 0; j < this.state[i].length; j++) { sum += this.state[i][j].getHeuristicValue(player); } } if (player == 1) { return sum; } return -sum; } getMoves() { let moves = []; for (let i = 0; i < this.state.length; i++) { for (let j = 0; j < this.state[i].length; j++) { if (this.boardActive[i][j] && this.state[i][j].getWinner() == GameWinner.inProgress) this.state[i][j].getMoves().forEach(move => { moves.push(new UltimateTicTacToeMove(i, j, move)); }) } } //console.log(moves); return moves; } clone(): UltimateTicTacToe { let clone = new UltimateTicTacToe(); clone.moveCount = this.moveCount; clone.currPlayer = this.currPlayer; for (let i = 0; i < this.state.length; i++) { for (let j = 0; j < this.state[i].length; j++) { clone.state[i][j] = this.state[i][j].clone(); clone.boardActive[i][j] = this.boardActive[i][j]; } } for (let i = 0; i < this.scores.length; i++) { clone.scores[i] = this.scores[i]; } //console.log(this, clone); return clone; } executeMove(move: UltimateTicTacToeMove) { let player = this.currPlayer; let subgame = this.state[move.row][move.col]; subgame.executeMove(move.submove); this.moveCount++; //console.log(subgame.getWinner(), GameWinner.inProgress) if (subgame.getWinner() > GameWinner.inProgress) { let score = this.scores; let point = player == 1 ? 1 : -1; score[move.row] += point; score[UltimateTicTacToe.boardSize + move.col] += point; if (move.row == move.col) score[2 * UltimateTicTacToe.boardSize] += point; if (UltimateTicTacToe.boardSize - 1 - move.col == move.row) score[2 * UltimateTicTacToe.boardSize + 1] += point; } this.currPlayer = (UltimateTicTacToe.maxPlayers + 1) - player; let nextGameDone = this.state[move.submove.row][move.submove.col].getWinner() != GameWinner.inProgress; for (let r = 0; r < UltimateTicTacToe.boardSize; r++) { for (let c = 0; c < UltimateTicTacToe.boardSize; c++) { this.state[r][c].setCurrentPlayer(this.currPlayer); this.boardActive[r][c] = nextGameDone || (r == move.submove.row && c == move.submove.col); } } } private static angles = [ { from: { r: 0.5, c: 0 }, to: { r: 0.5, c: 3 } }, // row1 { from: { r: 1.5, c: 0 }, to: { r: 1.5, c: 3 } }, // row2 { from: { r: 2.5, c: 0 }, to: { r: 2.5, c: 3 } }, // row3 { from: { r: 0, c: 0.5 }, to: { r: 3, c: 0.5 } }, // col1 { from: { r: 0, c: 1.5 }, to: { r: 3, c: 1.5 } }, // col2 { from: { r: 0, c: 2.5 }, to: { r: 3, c: 2.5 } }, // col3 { from: { r: 0, c: 0 }, to: { r: 3, c: 3 } }, // diag1 { from: { r: 0, c: 3 }, to: { r: 3, c: 0 } }, // diag2 ]; getWinningAngle() { let winning = this.scores.findIndex(score => Math.abs(score) == UltimateTicTacToe.boardSize); return UltimateTicTacToe.angles[winning]; } getWinner() { for (let i = 0; i < this.scores.length; i++) { if (this.scores[i] == UltimateTicTacToe.boardSize) { return 1; } else if (this.scores[i] == -UltimateTicTacToe.boardSize) { return 2; } } if (this.moveCount >= Math.pow(UltimateTicTacToe.boardSize, 4)) { return -1; } return 0; } toString() { return this.state.map(row => row.map(cell => { return cell.toString(); }).join('|') ).join('\n-+-+-\n') } }
// NewHTTPReqDatum creates a datum from a HttpReq func NewHTTPReqDatum(httpreq *HTTPReqDatum) *Datum { return &Datum{ Val: &Datum_HttpReq{ HttpReq: httpreq, }, } }
Synthesis of some acrylophenones with N-methylpiperazine and evaluation of their cytotoxicities Abstract In this study, the compounds having acrylophenone structure, 1-aryl-2-(N-methylpiperazinomethyl)-2-propen-1-one dihydrochlorides, were synthesized and their chemical structures were identified with 1H NMR, 13C NMR and HRMS spectra. The cytotoxicities of the compounds were tested towards Ca9-22 (human gingival carcinoma), HSC-2 (human oral squamous carcinoma), HSC-3 (human oral squamous carcinoma) and HSC-4 (human oral squamous carcinoma) cell lines as tumor cell lines and HGF (gingival fibroblasts), HPLF (periodontal ligament fibroblasts) and HPC (pulp cells) cell lines as non-tumor cell lines. PSE of the compound TA2, which has a methyl substituent on phenyl ring, pointed out the compound TA2 as a leader compound to be considered.
import { action, computed, observable } from "mobx"; import { Cell, ICellSnapshot } from "./cell"; import { getDefaultConfig, ISimulationConfig, getUrlConfig } from "../config"; import { cellAtGrid, getCellNeighbors4, getCellNeighbors8 } from "./utils/grid-utils"; import { FloodingEngine } from "./engine/flooding-engine"; import { FloodingEngineGPU } from "./engine/flooding-engine-gpu"; import EventEmitter from "eventemitter3"; import { populateCellsData } from "./utils/load-and-initialize-cells"; import { log } from "@concord-consortium/lara-interactive-api"; const MIN_RAIN_DURATION_IN_DAYS = 1; const MAX_RAIN_DURATION_IN_DAYS = 4; export enum RainIntensity { light, medium, heavy, extreme } export enum RiverStage { low = 0, medium = 1/3, high = 2/3, crest = 1 } export type Weather = "sunny" | "partlyCloudy" | "lightRain" | "mediumRain" | "heavyRain" | "extremeRain"; export type Event = "hourChange" | "restart" | "start"; export interface ICrossSectionState { centerCell: Cell; leftCell: Cell; rightCell: Cell; leftLeveeCell: Cell; rightLeveeCell: Cell; } export interface ISimulationSnapshot { time: number; cellSnapshots: ICellSnapshot[]; } // This class is responsible for data loading and general setup. It's more focused // on management and interactions handling. Core calculations are delegated to FloodingEngine. // Also, all the observable properties should be here, so the view code can observe them. export class SimulationModel { public config: ISimulationConfig; public dataReadyPromise: Promise<void>; public engineCPU: FloodingEngine | null = null; public engineGPU: FloodingEngineGPU | null = null; // Cells are not directly observable. Changes are broadcasted using cellsSimulationStateFlag and cellsBaseStateFlag. public cells: Cell[] = []; public riverCells: Cell[] = []; public edgeCells: Cell[] = []; @observable public riverBankSegments: Cell[][] = []; @observable public time = 0; @observable public dataReady = false; @observable public simulationStarted = false; @observable public simulationRunning = false; // Simulation parameters. @observable public rainIntensity: RainIntensity = RainIntensity.medium; @observable public rainDurationInDays = 2; @observable public _initialRiverStage: number = RiverStage.medium; // Simulation outputs. @observable public crossSectionState: ICrossSectionState[] = []; // These flags can be used by view to trigger appropriate rendering. Theoretically, view could/should check // every single cell and re-render when it detects some changes. In practice, we perform these updates in very // specific moments and usually for all the cells, so this approach can be way more efficient. @observable public cellsSimulationStateFlag = 0; @observable public cellsBaseStateFlag = 0; @observable public leveesCount = 0; private emitter = new EventEmitter(); constructor(presetConfig: Partial<ISimulationConfig>) { this.load(presetConfig); } @computed public get ready() { return this.dataReady; } @computed public get gridWidth() { return this.config.gridWidth; } @computed public get gridHeight() { return this.config.gridHeight; } @computed public get timeInHours() { return Math.floor(this.time * this.config.modelTimeToHours); } @computed public get timeInDays() { return Math.floor(this.time * this.config.modelTimeToHours) / 24; } @computed public get initialWaterSaturation() { return this._initialRiverStage; } @computed public get remainingLevees() { return this.config.maxLevees - this.leveesCount; } public get floodArea() { // in square meters return this.engineCPU?.floodArea || 0; } public get crossSections() { return this.config.crossSections; } public getCrossSectionCell(index: number, type: "riverGauge" | "leftLevee" | "rightLevee" | "leftLandGauge" | "rightLandGauge") { const cs = this.crossSections[index]; const coords = cs[type]; return this.cellAtGrid(Math.round(this.config.gridWidth * coords.x), Math.round(this.config.gridHeight * coords.y)); } public getRiverDepth(gaugeIndex: number) { const cs = this.crossSections[gaugeIndex]; if (!cs) { return 0; } const gaugeCell = this.getCrossSectionCell(gaugeIndex, "riverGauge"); if (!gaugeCell) { return 0; } return cs.minRiverDepth + (cs.maxRiverDepth - cs.minRiverDepth) * gaugeCell.waterSaturation + gaugeCell.waterDepth; } public getCrossSectionState(index: number) { return { centerCell: this.getCrossSectionCell(index, "riverGauge"), leftCell: this.getCrossSectionCell(index, "leftLandGauge"), rightCell: this.getCrossSectionCell(index, "rightLandGauge"), leftLeveeCell: this.getCrossSectionCell(index, "leftLevee"), rightLeveeCell: this.getCrossSectionCell(index, "rightLevee") } as ICrossSectionState; } // Update observable crossSectionState array, so cross-section view can re-render itself. // It's impossible to observe method results directly (e.g. getRiverDepth). public updateCrossSectionStates() { this.crossSectionState = this.crossSections.map((g, idx) => this.getCrossSectionState(idx)); } @computed public get weather(): Weather { const rainStart = this.config.rainStartDay; if (this.timeInDays < rainStart) { return "partlyCloudy"; } if (this.timeInDays >= rainStart && this.timeInDays < rainStart + this.rainDurationInDays) { if (this.rainIntensity === RainIntensity.light) { return "lightRain"; } if (this.rainIntensity === RainIntensity.medium) { return "mediumRain"; } if (this.rainIntensity === RainIntensity.heavy) { return "heavyRain"; } return "extremeRain"; } return "sunny"; } public get currentRiverWaterIncrement() { const weather = this.weather; if (weather === "lightRain") { return this.config.rainStrength[0]; } if (weather === "mediumRain") { return this.config.rainStrength[1]; } if (weather === "heavyRain") { return this.config.rainStrength[2]; } if (weather === "extremeRain") { return this.config.rainStrength[3]; } if (weather === "partlyCloudy") { return 0; } // Sunny. return -0.0025; } public get engine() { return this.engineGPU || this.engineCPU; } public get waterDepthTexture() { return this.engineGPU?.getWaterDepthTexture(); } public on(event: Event, callback: any) { this.emitter.on(event, callback); } public off(event: Event, callback: any) { this.emitter.off(event, callback); } public emit(event: Event) { this.emitter.emit(event); } public cellAtGrid(gridX: number, gridY: number) { return cellAtGrid(gridX, gridY, this.cells, this.config.gridWidth, this.config.gridHeight); } public getCellNeighbors4(cell: Cell) { return getCellNeighbors4(cell, this.cells, this.config.gridWidth, this.config.gridHeight); } public getCellNeighbors8(cell: Cell) { return getCellNeighbors8(cell, this.cells, this.config.gridWidth, this.config.gridHeight); } public cellAt(xInM: number, yInM: number) { const gridX = Math.floor(xInM / this.config.cellSize); const gridY = Math.floor(yInM / this.config.cellSize); return this.cellAtGrid(gridX, gridY); } @action.bound public setRainIntensity(value: number) { this.rainIntensity = value; } @action.bound public setRainDurationInDays(value: number) { this.rainDurationInDays = Math.max(MIN_RAIN_DURATION_IN_DAYS, Math.min(MAX_RAIN_DURATION_IN_DAYS, value)); } @action.bound public setInitialWaterSaturation(value: number) { this._initialRiverStage = value; for (const cell of this.cells) { cell.initialWaterSaturation = value; cell.waterSaturation = value; } // Update observable crossSectionState array, so cross-section view can re-render itself. this.updateCrossSectionStates(); } // Adds or removes levee in the provided river bank. @action.bound public toggleLevee(riverBankIdx: number) { const leveeHeight = this.config.leveeHeight; this.riverBankSegments[riverBankIdx].forEach(cell => { cell.leveeHeight = cell.isLevee ? 0 : leveeHeight; }); // Don't use first or last one cell in segment, as they are shared between segments. const segmentCell = this.riverBankSegments[riverBankIdx][1]; const isLevee = segmentCell?.isLevee; this.leveesCount += isLevee ? 1 : -1; this.updateCellsBaseStateFlag(); this.updateCrossSectionStates(); if (isLevee) { log("LeveeAdded", { x: segmentCell.x / this.config.gridWidth, y: segmentCell.y / this.config.gridHeight }); } else { log("LeveeRemoved", { x: segmentCell.x / this.config.gridWidth, y: segmentCell.y / this.config.gridHeight }); } } @action.bound public async load(presetConfig: Partial<ISimulationConfig>) { this.dataReadyPromise = (async () => { // Configuration are joined together. Default values can be replaced by preset, and preset values can be replaced // by URL parameters. this.config = Object.assign(getDefaultConfig(), presetConfig, getUrlConfig()); await this.populateCellsData(); this.setDefaultInputs(); this.restart(); })(); return this.dataReadyPromise; } @action.bound public async populateCellsData() { this.dataReady = false; return populateCellsData(this.config).then(result => { this.cells = result.cells; this.edgeCells = result.edgeCells; this.riverCells = result.riverCells; this.riverBankSegments = result.riverBankSegments; if (this.config.useGPU) { this.engineGPU = new FloodingEngineGPU(this.cells, this.config); } else { this.engineCPU = new FloodingEngine(this.cells, this.config); } this.updateCellsBaseStateFlag(); this.updateCellsSimulationStateFlag(); this.updateCrossSectionStates(); this.dataReady = true; }); } @action.bound public start() { if (!this.ready) { return; } if (!this.simulationStarted) { this.simulationStarted = true; } this.simulationRunning = true; requestAnimationFrame(this.rafCallback); this.emit("start"); } @action.bound public stop() { this.simulationRunning = false; } @action.bound public setDefaultInputs() { this.setRainIntensity(RainIntensity[this.config.rainIntensity]); this.setRainDurationInDays(this.config.rainDuration); this.setInitialWaterSaturation(RiverStage[this.config.startingWaterLevel]); this.cells.forEach(c => { c.leveeHeight = 0; }); this.leveesCount = 0; this.updateCellsBaseStateFlag(); } @action.bound public restart() { this.simulationRunning = false; this.simulationStarted = false; this.cells.forEach(cell => cell.reset()); this.time = 0; if (this.config.useGPU) { this.engineGPU = new FloodingEngineGPU(this.cells, this.config); } else { this.engineCPU = new FloodingEngine(this.cells, this.config); } this.updateCrossSectionStates(); this.updateCellsSimulationStateFlag(); this.emit("restart"); // used by graphs } @action.bound public reload() { this.setDefaultInputs(); this.restart(); } @action.bound public rafCallback() { if (this.timeInDays >= this.config.simulationLength) { this.stop(); } if (!this.simulationRunning) { return; } requestAnimationFrame(this.rafCallback); this.tick(); } @action.bound public tick() { if (this.engine) { const oldTimeInHours = this.timeInHours; if (this.timeInHours === 0) { // Used by graphs. Make sure that initial point (0) is handled by graphs. this.emit("hourChange"); } for (let i = 0; i < this.config.speedMult; i += 1) { this.time += this.config.timeStep; if (this.time > this.config.rainStartDay) { // this._riverStage += this.currentRiverWaterIncrement * this.config.riverStageIncreaseSpeed; this.engine.waterSaturationIncrement = this.currentRiverWaterIncrement; } this.engine.update(); } if (this.timeInHours !== oldTimeInHours) { // Copy data from GPU to CPU. if (this.engineGPU) { const { waterDepth, waterSaturation } = this.engineGPU?.readWaterOutput(); const cellsCount = waterDepth.length; for (let i = 0; i < cellsCount; i += 1) { this.cells[i].waterDepth = waterDepth[i]; this.cells[i].waterSaturation = waterSaturation[i]; } } this.emit("hourChange"); // used by graphs } } this.updateCellsSimulationStateFlag(); // Update observable crossSectionState array, so cross-section view can re-render itself. this.updateCrossSectionStates(); } @action.bound public updateCellsBaseStateFlag() { this.cellsBaseStateFlag += 1; } @action.bound public updateCellsSimulationStateFlag() { this.cellsSimulationStateFlag += 1; } public snapshot(): ISimulationSnapshot { return { time: this.time, cellSnapshots: this.cells.map(c => c.snapshot()) }; } public restoreSnapshot(snapshot: ISimulationSnapshot) { this.time = snapshot.time; snapshot.cellSnapshots.forEach((cellSnapshot, idx) => { this.cells[idx].restoreSnapshot(cellSnapshot); }); this.updateCellsBaseStateFlag(); this.updateCellsSimulationStateFlag(); this.updateCrossSectionStates(); } }
/** * A change has occured in the selection, update the action bar */ @Override public void sync() { final Set<Editable> edits = data.getEditables(); vbox.getChildren().clear(); vbox.getChildren().add(selector.createLinkedControlBox()); Editable current = selector.current(); if (edits.size() == 0) { syncAddables(); return; } else { if (edits.size() == 1) { current = edits.iterator().next(); } if (current != null) { syncSingle(current); } else { syncCommon(edits); } } }
input1 = raw_input() input2 = raw_input() l = len(input1) if l == len(input2): match = True for i in range(0,l): if input1[i] != input2[l-i-1]: match = False break if match: print 'YES' else: print 'NO' else: print 'NO'
//%2005//////////////////////////////////////////////////////////////////////// // // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. // Copyright (c) 2003 BMC Software; Hewlett-Packard Development Company, L.P.; // IBM Corp.; EMC Corporation, The Open Group. // Copyright (c) 2004 BMC Software; Hewlett-Packard Development Company, L.P.; // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; // EMC Corporation; VERITAS Software Corporation; The Open Group. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE SHALL BE INCLUDED IN // ALL COPIES OR SUBSTANTIAL PORTIONS OF THE SOFTWARE. THE SOFTWARE IS PROVIDED // "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT // LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // //============================================================================== // // Author: <NAME>, Hewlett-Packard Company (<EMAIL>) // // Modified By: <NAME>, Hewlett-Packard Company (<EMAIL>) // <NAME>, IBM (amita<EMAIL>) for PEP#101 // <NAME>, Hewlett-Packard Company (<EMAIL>) // <NAME>, IBM (<EMAIL>) for Bug#2498 // <NAME>, Hewlett-Packard Company (<EMAIL>) // //%///////////////////////////////////////////////////////////////////////////// #include <Pegasus/Common/Config.h> #include <Pegasus/Common/Tracer.h> #include <Pegasus/Common/Thread.h> #include <Pegasus/Common/IPC.h> #include <Pegasus/Common/System.h> PEGASUS_USING_STD; PEGASUS_NAMESPACE_BEGIN // Set the trace levels // These levels will be compared against a trace level mask to determine // if a specific trace level is enabled const Uint32 Tracer::LEVEL1 = (1 << 0); const Uint32 Tracer::LEVEL2 = (1 << 1); const Uint32 Tracer::LEVEL3 = (1 << 2); const Uint32 Tracer::LEVEL4 = (1 << 3); // Set the return codes const Boolean Tracer::_SUCCESS = 1; const Boolean Tracer::_FAILURE = 0; // Set the Enter and Exit messages const char Tracer::_METHOD_ENTER_MSG[] = "Entering method"; const char Tracer::_METHOD_EXIT_MSG[] = "Exiting method"; // Set Log messages const char Tracer::_LOG_MSG[] = "LEVEL1 may only be used with trace macros PEG_METHOD_ENTER/PEG_METHOD_EXIT."; // Initialize singleton instance of Tracer Tracer* Tracer::_tracerInstance = 0; // Set component separator const char Tracer::_COMPONENT_SEPARATOR = ','; // Set the number of defined components const Uint32 Tracer::_NUM_COMPONENTS = sizeof(TRACE_COMPONENT_LIST)/sizeof(TRACE_COMPONENT_LIST[0]); // Set the line maximum const Uint32 Tracer::_STRLEN_MAX_UNSIGNED_INT = 21; // Set the max PID and Thread ID Length const Uint32 Tracer::_STRLEN_MAX_PID_TID = 20; //////////////////////////////////////////////////////////////////////////////// // Tracer constructor // Constructor is private to preclude construction of Tracer objects // Single Instance of Tracer is maintained for each process. //////////////////////////////////////////////////////////////////////////////// Tracer::Tracer() { // Initialize Trace File Handler _traceHandler.reset(new TraceFileHandler()); _traceLevelMask=0; _traceComponentMask.reset(new Boolean[_NUM_COMPONENTS]); // Initialize ComponentMask array to false for (Uint32 index=0;index < _NUM_COMPONENTS; (_traceComponentMask.get())[index++]=false); } //////////////////////////////////////////////////////////////////////////////// //Tracer destructor //////////////////////////////////////////////////////////////////////////////// Tracer::~Tracer() { delete _tracerInstance; } //////////////////////////////////////////////////////////////////////////////// //Traces the given message //////////////////////////////////////////////////////////////////////////////// void Tracer::_trace( const Uint32 traceComponent, const Uint32 traceLevel, const char* fmt, va_list argList) { if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if (_isTraceEnabled(traceComponent,traceLevel)) { _trace(traceComponent,"",fmt,argList); } } } //////////////////////////////////////////////////////////////////////////////// //Traces the given message - Overloaded for including FileName and Line number //////////////////////////////////////////////////////////////////////////////// void Tracer::_trace( const char* fileName, const Uint32 lineNum, const Uint32 traceComponent, const Uint32 traceLevel, const char* fmt, va_list argList) { char* message; if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if (_isTraceEnabled(traceComponent,traceLevel)) { // // Allocate memory for the message string // Needs to be updated if additional info is added // message = new char[ strlen(fileName) + _STRLEN_MAX_UNSIGNED_INT + (_STRLEN_MAX_PID_TID * 2) + 8 ]; sprintf( message, #if defined(PEGASUS_OS_VMS) // // pegasus_thread_self returns long-long-unsigned. // "[%d:%llu:%s:%u]: ", // "[%x:%llx:%s:%u]: ", System::getPID(), pegasus_thread_self(), #else "[%d:%u:%s:%u]: ", System::getPID(), Uint32(pegasus_thread_self()), #endif fileName, lineNum); _trace(traceComponent,message,fmt,argList); delete []message; } } } //////////////////////////////////////////////////////////////////////////////// //Traces the given buffer //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceBuffer( const Uint32 traceComponent, const Uint32 traceLevel, const char* data, const Uint32 size) { if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if (_isTraceEnabled(traceComponent,traceLevel)) { char* tmpBuf = new char[size+1]; strncpy( tmpBuf, data, size ); tmpBuf[size] = '\0'; trace(traceComponent,traceLevel,"%s",tmpBuf); delete []tmpBuf; } } } //////////////////////////////////////////////////////////////////////////////// //Traces the given buffer - Overloaded for including FileName and Line number //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceBuffer( const char* fileName, const Uint32 lineNum, const Uint32 traceComponent, const Uint32 traceLevel, const char* data, const Uint32 size) { if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if ( _isTraceEnabled( traceComponent, traceLevel ) ) { char* tmpBuf = new char[size+1]; strncpy( tmpBuf, data, size ); tmpBuf[size] = '\0'; trace(fileName,lineNum,traceComponent,traceLevel,"%s",tmpBuf); delete []tmpBuf; } } } //////////////////////////////////////////////////////////////////////////////// //Traces the given string //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceString( const Uint32 traceComponent, const Uint32 traceLevel, const String& traceString) { if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if (_isTraceEnabled(traceComponent,traceLevel)) { trace(traceComponent,traceLevel,"%s", (const char *)traceString.getCString()); } } } //////////////////////////////////////////////////////////////////////////////// //Traces the given string - Overloaded to include the fileName and line number //of trace origin. //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceString( const char* fileName, const Uint32 lineNum, const Uint32 traceComponent, const Uint32 traceLevel, const String& traceString) { if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if ( _isTraceEnabled( traceComponent, traceLevel ) ) { trace(fileName,lineNum,traceComponent,traceLevel,"%s", (const char *)traceString.getCString()); } } } //////////////////////////////////////////////////////////////////////////////// //Traces the message in the given CIMException object. //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceCIMException( const Uint32 traceComponent, const Uint32 traceLevel, CIMException cimException) { if ( traceLevel == LEVEL1 ) { trace( traceComponent, Tracer::LEVEL4, "%s", _LOG_MSG ); } else { if ( _isTraceEnabled( traceComponent, traceLevel ) ) { // get the CIMException trace message string String traceMsg = TraceableCIMException(cimException).getTraceDescription(); // trace the string _traceString(traceComponent, traceLevel, traceMsg); } } } //////////////////////////////////////////////////////////////////////////////// //Traces method entry //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceEnter( const char* fileName, const Uint32 lineNum, const Uint32 traceComponent, const char* fmt, ...) { va_list argList; char* message; if (_isTraceEnabled(traceComponent,LEVEL1)) { va_start(argList,fmt); // // Allocate memory for the message string // Needs to be updated if additional info is added // message = new char[ strlen(fileName) + _STRLEN_MAX_UNSIGNED_INT + (_STRLEN_MAX_PID_TID * 2) + 8 ]; #if defined(PEGASUS_OS_VMS) // // pegasus_thread_self returns long-long-unsigned. // sprintf( message, "[%d:%llu:%s:%u]: ", System::getPID(), pegasus_thread_self(), fileName, lineNum); #else sprintf( message, "[%d:%u:%s:%u]: ", System::getPID(), Uint32(pegasus_thread_self()), fileName, lineNum); #endif _trace(traceComponent,message,fmt,argList); va_end(argList); delete []message; } } //////////////////////////////////////////////////////////////////////////////// //Traces method exit //////////////////////////////////////////////////////////////////////////////// void Tracer::_traceExit( const char* fileName, const Uint32 lineNum, const Uint32 traceComponent, const char* fmt ...) { va_list argList; char* message; if (_isTraceEnabled(traceComponent,LEVEL1)) { va_start(argList,fmt); // // Allocate memory for the message string // Needs to be updated if additional info is added // message = new char[ strlen(fileName) + _STRLEN_MAX_UNSIGNED_INT + (_STRLEN_MAX_PID_TID * 2) + 8 ]; #if defined(PEGASUS_OS_VMS) // // pegasus_thread_self returns long-long-unsigned. // sprintf( message, "[%d:%llu:%s:%u]: ", System::getPID(), pegasus_thread_self(), fileName, lineNum); #else sprintf( message, "[%d:%u:%s:%u]: ", System::getPID(), Uint32(pegasus_thread_self()), fileName, lineNum); #endif _trace(traceComponent,message,fmt,argList); va_end(argList); delete []message; } } //////////////////////////////////////////////////////////////////////////////// //Checks if trace is enabled for the given component and level //////////////////////////////////////////////////////////////////////////////// Boolean Tracer::_isTraceEnabled(const Uint32 traceComponent, const Uint32 traceLevel) { Tracer* instance = _getInstance(); if (traceComponent >= _NUM_COMPONENTS) { return false; } return (((instance->_traceComponentMask.get())[traceComponent]) && (traceLevel & instance->_traceLevelMask)); } //////////////////////////////////////////////////////////////////////////////// //Called by all trace interfaces to log message to trace file //////////////////////////////////////////////////////////////////////////////// void Tracer::_trace( const Uint32 traceComponent, const char* message, const char* fmt, va_list argList) { char* msgHeader; // Get the current system time and prepend to message String currentTime = System::getCurrentASCIITime(); CString timeStamp = currentTime.getCString(); // // Allocate messageHeader. // Needs to be updated if additional info is added // // Construct the message header // The message header is in the following format // timestamp: <component name> [file name:line number] if (*message != '\0') { // << Wed Jul 16 10:58:40 2003 mdd >> _STRLEN_MAX_PID_TID is not used in this format string msgHeader = new char [strlen(message) + strlen(TRACE_COMPONENT_LIST[traceComponent]) + strlen(timeStamp) + _STRLEN_MAX_PID_TID + 5]; sprintf(msgHeader,"%s: %s %s",(const char*)timeStamp, TRACE_COMPONENT_LIST[traceComponent] ,message); //delete [] msgHeader; } else { // // Since the message is blank form a string using the pid and tid // char* tmpBuffer; // // Allocate messageHeader. // Needs to be updated if additional info is added // tmpBuffer = new char[_STRLEN_MAX_PID_TID + 6]; #if defined(PEGASUS_OS_VMS) // // pegasus_thread_self returns long-long-unsigned. // sprintf(tmpBuffer, "[%u:%llu]: ", System::getPID(), pegasus_thread_self()); #else sprintf(tmpBuffer, "[%u:%u]: ", System::getPID(), Uint32(pegasus_thread_self())); #endif msgHeader = new char [ strlen(timeStamp) + strlen(TRACE_COMPONENT_LIST[traceComponent]) + strlen(tmpBuffer) + 1 + 5 ]; sprintf(msgHeader,"%s: %s %s ",(const char*)timeStamp, TRACE_COMPONENT_LIST[traceComponent] ,tmpBuffer ); delete []tmpBuffer; //delete [] msgHeader; } // Call trace file handler to write message _getInstance()->_traceHandler->handleMessage(msgHeader,fmt,argList); delete [] msgHeader; } //////////////////////////////////////////////////////////////////////////////// //Validate the trace file //////////////////////////////////////////////////////////////////////////////// Boolean Tracer::isValidFileName(const char* filePath) { String moduleName = _getInstance()->_moduleName; if (moduleName == String::EMPTY) { return (_getInstance()->_traceHandler->isValidFilePath(filePath)); } else { String extendedFilePath = String(filePath) + "." + moduleName; return (_getInstance()->_traceHandler->isValidFilePath( extendedFilePath.getCString())); } } //////////////////////////////////////////////////////////////////////////////// //Validate the trace components //////////////////////////////////////////////////////////////////////////////// Boolean Tracer::isValidComponents(const String& traceComponents) { String invalidComponents; return isValidComponents(traceComponents, invalidComponents); } Boolean Tracer::isValidComponents( const String& traceComponents, String& invalidComponents) { // Validate the trace components and modify the traceComponents argument // to reflect the invalid components Uint32 position=0; Uint32 index=0; String componentName = String::EMPTY; String componentStr = String::EMPTY; Boolean validComponent=false; Boolean retCode=true; componentStr = traceComponents; invalidComponents = String::EMPTY; if (componentStr != String::EMPTY) { // Check if ALL is specified if (String::equalNoCase(componentStr,"ALL")) { return _SUCCESS; } // Append _COMPONENT_SEPARATOR to the end of the traceComponents componentStr.append(_COMPONENT_SEPARATOR); while (componentStr != String::EMPTY) { // // Get the Component name from traceComponents. // Components are separated by _COMPONENT_SEPARATOR // position = componentStr.find(_COMPONENT_SEPARATOR); componentName = componentStr.subString(0,(position)); // Lookup the index for Component name in TRACE_COMPONENT_LIST index = 0; validComponent = false; while (index < _NUM_COMPONENTS) { if (String::equalNoCase( componentName, TRACE_COMPONENT_LIST[index])) { // Found component, break from the loop validComponent = true; break; } else { index++; } } // Remove the searched componentname from the traceComponents componentStr.remove(0,position+1); if ( !validComponent ) { invalidComponents.append(componentName); invalidComponents.append(_COMPONENT_SEPARATOR); } } } else { // trace components is empty, it is a valid value so return true return _SUCCESS; } if ( invalidComponents != String::EMPTY ) { retCode = false; // // Remove the extra ',' at the end // invalidComponents.remove( invalidComponents.reverseFind(_COMPONENT_SEPARATOR)); } return retCode; } //////////////////////////////////////////////////////////////////////////////// //Set the name of the module being traced //////////////////////////////////////////////////////////////////////////////// void Tracer::setModuleName(const String& moduleName) { _getInstance()->_moduleName = moduleName; } //////////////////////////////////////////////////////////////////////////////// //Returns the Singleton instance of the Tracer //////////////////////////////////////////////////////////////////////////////// Tracer* Tracer::_getInstance() { if (_tracerInstance == 0) { _tracerInstance = new Tracer(); } return _tracerInstance; } // PEGASUS_REMOVE_TRACE defines the compile time inclusion of the Trace // interfaces. If defined the interfaces map to empty functions #ifndef PEGASUS_REMOVE_TRACE //////////////////////////////////////////////////////////////////////////////// //Set the trace file //////////////////////////////////////////////////////////////////////////////// Uint32 Tracer::setTraceFile(const char* traceFile) { if (*traceFile == 0) { return 1; } String moduleName = _getInstance()->_moduleName; if (moduleName == String::EMPTY) { return (_getInstance()->_traceHandler->setFileName(traceFile)); } else { String extendedTraceFile = String(traceFile) + "." + moduleName; return (_getInstance()->_traceHandler->setFileName( extendedTraceFile.getCString())); } } //////////////////////////////////////////////////////////////////////////////// //Set the trace level //////////////////////////////////////////////////////////////////////////////// Uint32 Tracer::setTraceLevel(const Uint32 traceLevel) { Uint32 retCode = 0; switch (traceLevel) { case LEVEL1: _getInstance()->_traceLevelMask = 0x01; break; case LEVEL2: _getInstance()->_traceLevelMask = 0x03; break; case LEVEL3: _getInstance()->_traceLevelMask = 0x07; break; case LEVEL4: _getInstance()->_traceLevelMask = 0x0F; break; default: _getInstance()->_traceLevelMask = 0; retCode = 1; } return retCode; } //////////////////////////////////////////////////////////////////////////////// // Set components to be traced. //////////////////////////////////////////////////////////////////////////////// void Tracer::setTraceComponents(const String& traceComponents) { Uint32 position = 0; Uint32 index = 0; String componentName = String::EMPTY; String componentStr = traceComponents; String invalidComponents = String::EMPTY; if (componentStr != String::EMPTY) { // Check if ALL is specified if (String::equalNoCase(componentStr,"ALL")) { for (index=0; index < _NUM_COMPONENTS; (_getInstance()->_traceComponentMask.get())[index++] = true); return ; } // initialise ComponentMask array to False for (index = 0;index < _NUM_COMPONENTS; (_getInstance()->_traceComponentMask.get())[index++] = false); // Append _COMPONENT_SEPARATOR to the end of the traceComponents componentStr.append(_COMPONENT_SEPARATOR); while (componentStr != String::EMPTY) { // Get the Component name from traceComponents. // Components are separated by _COMPONENT_SEPARATOR position = componentStr.find(_COMPONENT_SEPARATOR); componentName = componentStr.subString(0,(position)); // Lookup the index for Component name in TRACE_COMPONENT_LIST index = 0; while (index < _NUM_COMPONENTS) { if (String::equalNoCase( componentName,TRACE_COMPONENT_LIST[index])) { (_getInstance()->_traceComponentMask.get())[index]=true; // Found component, break from the loop break; } else { index++; } } // Remove the searched componentname from the traceComponents componentStr.remove(0,position+1); } } else { // initialise ComponentMask array to False for (Uint32 index = 0;index < _NUM_COMPONENTS; (_getInstance()->_traceComponentMask.get())[index++] = false); } return ; } #endif PEGASUS_NAMESPACE_END
<reponame>ohno418/rust // Test that a `limit` of 0 is valid #![recursion_limit = "0"] macro_rules! test { () => {}; ($tt:tt) => { test!(); }; } test!(test); //~ ERROR recursion limit reached while expanding `test!` fn main() {}
/** * * Keeps track of monitoring modules; it's an old class somehow related with error handling from the initial Thread pool executor * Somehow outdated ... * * @since the beginning * @author Iosif Legrand, ramiro */ public class TaskManager implements ResultNotification { /** The Logger */ private static final Logger logger = Logger.getLogger(TaskManager.class.getName()); /** defulat repeat time for a job */ private static final long DEFAULT_REPET_TIME = TimeUnit.SECONDS.toMillis(30); private final SchJobExecutor ThP; final MFarm farm; final FarmMonitor main; final Map<String, MonModuleInfo> moduleInfo; final Map<String, ModuleParams> mpHash; private final Map<String, MonitoringModule> activeModules; // boolean debug; private volatile URLClassLoader my_loader; public TaskManager(FarmMonitor main, MFarm farm, Map<String, MonModuleInfo> moduleInfo, Map<String, ModuleParams> mpHash) throws Exception { this.farm = farm; this.main = main; this.moduleInfo = moduleInfo; this.mpHash = mpHash; // all accesses are synchornized activeModules = new HashMap<String, MonitoringModule>(); URLClassLoader tLoader = null; try { URL[] list = getExternalURLs(); if (list != null) { tLoader = new URLClassLoader(list, Class.forName(getClass().getName()).getClassLoader()); } } catch (Throwable e2) { logger.log(Level.SEVERE, "\n\n FAILED to initialize ClassLoader", e2); } my_loader = tLoader; ThP = new SchJobExecutor(2, 30, this); } public void notifyResult(final SchJobInt j, Object result, Throwable ex) { if (ex == null) { if (result != null) { // buffer the Results main.addResult(result); } // if (j.get_repet_time() > 0) { // re-schedule the job // // if (j instanceof MonitoringModule) { // final MonitoringModule job = (MonitoringModule) j; // job.getInfo().setErrorCount(0); // job.getNode().error_count = 0; // } // // j.set_exec_time(System.currentTimeMillis() + j.get_repet_time()); // // ThP.addJob(j); // } // // if (j.getExeTime() > 0) { // total_eff_time += j.getExeTime(); // total_jobs_eff_time++; // } return; } if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " TaskManager got exc from module", ex); } // if (j instanceof MonitoringModule) { // final MonitoringModule job = (MonitoringModule) j; // MNode node = job.getNode(); // if (node == null) { // return; // } // node.error_count++; // // MonModuleInfo info = job.getInfo(); // info.addErrorCount(); // info.setErrorDesc(ex.toString()); // // String tname = job.getTaskName(); // // if (logger.isLoggable(Level.FINER)) { // logger.log(Level.FINER, "ERROR Monitoring Node " + node + " task =" + tname + " ErrCount =" + info.error_count, ex); // } // // if (info.getErrorCount() >= MAX_MONITORING_ERRORS_COUNT) { // if (job.canSuspend()) { // logger.log(Level.WARNING, "SUSPEND module =" + job.getTaskName() + " at node " + node + " for " + (DEFAULT_MODULE_ERROR_TIMEOUT / 1000 / 60) + " min "); // job.set_exec_time(System.currentTimeMillis() + DEFAULT_MODULE_ERROR_TIMEOUT); // ThP.addJob(job); // return; // } // if (logger.isLoggable(Level.FINER)) { // logger.log(Level.FINER, "Module =" + job.getTaskName() + " at node " + node + " MAX_MONITORING_ERRORS_COUNT [ " + MAX_MONITORING_ERRORS_COUNT + " ] reached, but job cannot be suspended. Setting error count to 0!"); // job.getNode().error_count = 0; // } // } // // if (job.get_repet_time() > 0) { // job.set_exec_time(System.currentTimeMillis() + job.get_repet_time()); // ThP.addJob(job); // return; // } // // return; // } // logger.log(Level.SEVERE, "\n\n Not a MonitoringModule Job ???? Job class = " + j.getClass(), ex); } public void startMonitoring() { ThP.startMonitoring(); } synchronized MonModuleInfo createExModule(String module, MNode mn, String arg, long reptime) { MonitoringModule job = null; long sch_time; try { Class<MonitoringModule> cjob = (Class<MonitoringModule>) Class.forName("lia.Monitor.modules." + module); job = cjob.newInstance(); } catch (Throwable t) { if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "Cannot instantiate lia.Monitor.modules." + module, t); } } if (job == null) { // try external class loader try { job = (MonitoringModule) (my_loader.loadClass(module).newInstance()); } catch (Throwable t) { if (logger.isLoggable(Level.FINEST)) { logger.log(Level.FINEST, "Cannot instantiate " + module + " from external URL", t); } } } if (job == null) { logger.log(Level.INFO, "Failed to load class ! " + module); return null; } MonModuleInfo info = job.init(mn, arg); long repeat = 30000; if (reptime > 0) { repeat = reptime; } // TRACEPATH - HACK if (module != null && module.equals("monTracepath")) { boolean limitTracepathRate = true; try { limitTracepathRate = Boolean.valueOf(AppConfig.getProperty("lia.Monitor.Farm.TaskManager.LIMIT_TRACEPATH_RATE", "true")).booleanValue(); } catch (Throwable ignore) { if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " [ TaskManager ] - LIMIT_TRACEPATH_RATE Exc: ", ignore); } limitTracepathRate = true; } long tracepathRate = 120 * 1000; try { long ltr = Long.valueOf(AppConfig.getProperty("lia.Monitor.Farm.TaskManager.TRACEPATH_RATE", "120")).longValue(); tracepathRate = ltr * 1000; } catch (Throwable ignore) { if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " [ TaskManager ] - LIMIT_TRACEPATH_RATE Exc: ", ignore); } tracepathRate = 120 * 1000; } if (limitTracepathRate) { if (repeat < tracepathRate) { repeat = tracepathRate; } logger.log(Level.INFO, " [ TaskManager ] Limit tracepath rate @ [ " + repeat + " ] ms"); } } long now = System.currentTimeMillis(); double jit = repeat * Math.random(); sch_time = now + (long) jit; if (job.isRepetitive()) { job.set_exec_time(sch_time); job.set_repet_time(repeat); job.set_max_time(repeat); } else { job.set_exec_time(sch_time); job.set_repet_time(0); job.set_max_time(DEFAULT_REPET_TIME); } ThP.addJob(job); activeModules.put("XXXX" + module, job); if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, " Added EXTERNAL module " + module + " arg:" + arg + "reptime: " + repeat); } if (job instanceof dbStore) { logger.log(Level.INFO, "Adding a dbStore from a module: " + module); main.addOtherDBStores((dbStore) job); } return info; } synchronized MonModuleInfo createModule(String module, MNode node) { return createModule(module, node, -1); } synchronized MonModuleInfo createModule(String module, MNode node, long repTime) { MonitoringModule job = null; try { Class<MonitoringModule> cjob = (Class<MonitoringModule>) Class.forName("lia.Monitor.modules." + module); job = cjob.newInstance(); } catch (ClassNotFoundException cne) { if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " [ TaskManager ] Unable to load module: " + module + ". Not found in the standard jar files. Will try the external path.", cne); } else { logger.log(Level.INFO, " [ TaskManager ] Unable to load module: " + module + ". Not found in the standard jar files. Will try the external path."); } } catch (MLModuleInstantiationException mlmie) { logger.log(Level.INFO, " [ TaskManager ] The module " + module + " failed to initialize from MonALISA jar files. Cause: ", mlmie); node.removeModule(module); return null; } catch (Throwable t) { logger.log(Level.WARNING, " [ TaskManager ] The module " + module + " failed to initialize from MonALISA jar files. Cause: ", t); } if (job == null) { try { if (module.indexOf("!") > 0) { // first remove the old naming URL[] oldurl = my_loader != null ? my_loader.getURLs() : new URL[0]; URL newurl = new URL(module.substring(0, module.indexOf("!"))); module = module.substring(module.lastIndexOf("!") + 1); boolean bExists = false; logger.log(Level.INFO, "new url : " + newurl.toString()); for (int i = 0; i < oldurl.length; i++) { logger.log(Level.INFO, "Old URL : " + oldurl[i].toString()); if (oldurl[i].equals(newurl)) { if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "already exists, skipping"); } bExists = true; break; } } if (!bExists) { if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "adding the new url"); } URL[] newurls = new URL[oldurl.length + 1]; for (int j = 0; j < oldurl.length; j++) { newurls[j] = oldurl[j]; } newurls[newurls.length - 1] = newurl; my_loader = new URLClassLoader(newurls, Class.forName("lia.Monitor.Farm.TaskManager").getClassLoader()); } } job = (MonitoringModule) (my_loader.loadClass(module).newInstance()); } catch (Throwable t) { logger.log(Level.WARNING, " [ TaskManager ] Failed to load module " + module + " from external path also. Cause: ", t); } } if (job == null) { logger.log(Level.WARNING, " Failed to load class ! " + module); node.removeModule(module); return null; } StringTokenizer st = new StringTokenizer(module, ".\\/"); while (st.hasMoreTokens()) { module = st.nextToken(); } String key = node.getKey(module); String param = null; // to be changed long repeat = 30000; // boolean byRequest = true; if ((key != null) && (mpHash.containsKey(key))) { ModuleParams minfo = mpHash.remove(node.getKey(module)); param = minfo.param; repeat = minfo.repeat; } MonModuleInfo info = job.init(node, param); info.name = module; if (job.isRepetitive()) { job.set_repet_time((repTime < 0) ? repeat : repTime); job.set_max_time(repeat); } else { job.set_repet_time(0); job.set_max_time(DEFAULT_REPET_TIME); } if (byRequest) { node.addModule(module); } node.addParameters(job.ResTypes()); ThP.addJob(job); activeModules.put(node.getKey(module), job); if (logger.isLoggable(Level.FINEST)) { logger.log(Level.FINEST, " Added module " + module + " for Node [ " + node + ", " + node.getIPaddress() + " ]"); } moduleInfo.put(node.getKey(module), info); if (job instanceof dbStore) { logger.log(Level.INFO, "Adding a dbStore from a module: " + module); main.addOtherDBStores((dbStore) job); } return info; } synchronized void deleteModule(String module, MNode node) { String key = node.getKey(module); if (activeModules.containsKey(key)) { MonitoringModule job = activeModules.remove(key); MonModuleInfo info = moduleInfo.remove(key); job.stop(); job.set_repet_time(-1); main.ed.removeModule(node, module, info.ResTypes); if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " removed module " + module + " for " + node); } } else { if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " Failed to remove MOdule " + module + " from node" + node); } } } public String getIPaddress(MNode n, boolean checkNodeIPAddress) { String ad = null; try { ad = (InetAddress.getByName(n.getName())).getHostAddress(); n.ipAddress = ad; } catch (Throwable t) { if (checkNodeIPAddress) { logger.log(Level.WARNING, " Can not get ip for " + n, t); } else { logger.log(Level.FINE, " Can not get ip for " + n, t); } } return ad; } public synchronized void task_init() { final boolean checkNodeIPAddress = AppConfig.getb("lia.Monitor.Farm.TaskManager.checkNodeIPAddress", true); if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, " [ TaskManager ] [ task_init ] lia.Monitor.Farm.TaskManager.checkNodeIPAddress = " + checkNodeIPAddress); } Vector<MCluster> clus = farm.getClusters(); for (final MCluster cl : clus) { if (cl.externalModule != null) { MNode mn = new MNode(cl.externalNode, null, cl, farm); String nodeIP = getIPaddress(mn, checkNodeIPAddress); if (checkNodeIPAddress && nodeIP == null) { continue; } int indTime = cl.externalParam.indexOf("%^&"); long reptime = 0; if (indTime != -1) { String sTime = cl.externalParam.substring(indTime + 3); try { reptime = Long.parseLong(sTime) * 1000; } catch (Throwable t) { } cl.externalParam = cl.externalParam.substring(0, indTime); } createExModule(cl.externalModule, mn, cl.externalParam, reptime); } } Vector<MNode> nodes = farm.getNodes(); StringBuilder sb = new StringBuilder(); for (final MNode n : nodes) { String nodeIP = getIPaddress(n, checkNodeIPAddress); if (checkNodeIPAddress && nodeIP == null) { logger.log(Level.WARNING, "Can not find the IP address for Node =" + n + " REMOVED it! "); main.ed.removeNode(n.getClusterName(), n); continue; } Vector<String> modules = (Vector<String>) n.getModuleList().clone(); for (final String mod : modules) { MonModuleInfo infoc = createModule(mod, n); if (infoc == null) { main.ed.removeModule(n, mod, null); sb.append("\nErrors adding Module " + mod + " for Node [ " + n + "/" + n.getIPaddress() + " ] "); } else { sb.append("\nModule " + mod + " ADDED for Node " + n + "/" + n.getIPaddress() + " ] "); } } } // external modules for a farm if (main != null && main.externalModules != null && main.externalModules.length > 0) { for (int iem = 0; iem < main.externalModules.length; iem++) { try { MNode mn = new MNode(); mn.farm = farm; createExModule(main.externalModules[iem], mn, main.externalModParams[iem], Long.valueOf(main.externalModRTime[iem]).longValue()); logger.log(Level.INFO, "Created external param for a Farm [ " + main.externalModules[iem] + ", " + main.externalModParams[iem] + ", " + main.externalModRTime[iem] + " ]"); } catch (Throwable tt) { logger.log(Level.WARNING, "Cannot create external param for a Farm [ " + main.externalModules[iem] + ", " + main.externalModParams[iem] + ", " + main.externalModRTime[iem] + " ]", tt); } } } logger.log(Level.CONFIG, sb.toString()); // self monitoring part ! new MCluster("MonaLisa", farm); try { main.ConfigAdd("MonaLisa", "localhost", null, 0); } catch (Exception e) { if (logger.isLoggable(Level.FINEST)) { logger.log(Level.FINEST, "General Exception in ConfigAdd", e); } } MNode me = (farm.getCluster("MonaLisa")).getNode("localhost"); String nodeIP = getIPaddress(me, true); if (nodeIP == null) { logger.log(Level.SEVERE, " localhost does not have IP ??!?!"); } myMon self = new myMon(main); self.init(me, null); me.addModule("mona"); me.addParameters(self.ResTypes()); long now = System.currentTimeMillis(); self.set_exec_time(now + 1115); self.set_repet_time(60 * 1000); self.set_max_time(DEFAULT_REPET_TIME); ThP.addJob(self); new MCluster("MonaLisa_LocalSysMon", farm); try { main.ConfigAdd("MonaLisa_LocalSysMon", "localhost", null, 0); } catch (Exception e) { if (logger.isLoggable(Level.FINEST)) { logger.log(Level.FINEST, "General Exception in ConfigAdd", e); } } MNode meMon = (farm.getCluster("MonaLisa_LocalSysMon")).getNode("localhost"); String nodeIP2 = getIPaddress(meMon, true); if (nodeIP2 == null) { logger.log(Level.SEVERE, " localhost does not have IP ??!?!"); } if(AppConfig.getb("lia.util.threads.exportmon", true)) { MCluster cl = new MCluster("MonaLisa_ThPStat", farm); MNode mn = new MNode(cl.externalNode, null, cl, farm); createExModule("monThPStat", mn, null, 60 * 1000); } if (Boolean.valueOf(AppConfig.getProperty("lia.monitor.Farm.use_SNMP", "false")).booleanValue()) { createModule("snmp_Load", me); createModule("snmp_IO", me); createModule("snmp_CPU", me); } else { createModule("monProcLoad", me, 60 * 1000); createModule("monProcIO", me, 60 * 1000); createModule("monProcStat", me, 60 * 1000); createModule("monDiskIOStat", meMon, 60 * 1000); } createModule("monMLStat", me, 60 * 1000); } public synchronized void changeRepTime(MNode node, String module, long time) { MonitoringModule job = activeModules.get(node.getKey(module)); if (job != null) { job.set_repet_time(time); if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, "Changing repetition time for " + node.toString() + " " + module); } } else { if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, " Can not find node / module tochange repetition time "); } } } private static URL[] getExternalURLs() { try { List<URL> _returnURLs = new LinkedList<URL>(); String[] strURL = AppConfig.getVectorProperty("lia.Monitor.CLASSURLs"); if (strURL != null && strURL.length > 0) { for (final String possibleURL : strURL) { try { _returnURLs.add(new URL(possibleURL)); logger.log(Level.INFO, "[ lia.Monitor.CLASSURLs ] external URL: " + possibleURL + " added to URLs"); } catch (MalformedURLException ex) { logger.log(Level.WARNING, " [ lia.Monitor.CLASSURLs ] GOT A BAD URL" + possibleURL + " ...SKIPPING IT!!", ex); } catch (Throwable t) { if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, "GOT a general Exception", t); } } } } return _returnURLs.toArray(new URL[_returnURLs.size()]); } catch (Throwable t) {/* this should not happened */ if (logger.isLoggable(Level.FINER)) { logger.log(Level.FINER, "GOT a general Exception", t); } } return null; } }
<reponame>AnthonyM/service-fabric<gh_stars>1-10 // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once namespace Reliability { namespace ReconfigurationAgentComponent { class ReconfigurationAgentProxy; struct ActionListInfo { ProxyActionsListTypes::Enum Name; bool ImpactsServiceAvailability; void WriteTo(Common::TextWriter & w, Common::FormatOptions const &) const; }; // Represents a FailoverUnit in RA inside the service host class FailoverUnitProxy : public std::enable_shared_from_this<FailoverUnitProxy> , public ReplicationComponent::IReplicatorHealthClient { DENY_COPY(FailoverUnitProxy); public: typedef std::map<Federation::NodeId, Reliability::ReplicaDescription> ConfigurationReplicaStore; class ConfigurationUtility; FailoverUnitProxy( Reliability::FailoverUnitDescription const & failoverUnitDescription, ReconfigurationAgentProxy & reconfigurationAgentProxy, std::wstring const & runtimeId, Common::AsyncOperationSPtr const & root) : isOpenForBusiness_(true), isMarkedForClose_(false), isMarkedForAbort_(false), currentlyExecutingActionsLists_(), lock_(), remoteReplicas_(), configurationReplicas_(), messageStage_(ProxyMessageStage::None), replicaState_(FailoverUnitProxyStates::Closed), replicatorState_(ReplicatorStates::Closed), failoverUnitDescription_(failoverUnitDescription), runtimeId_(runtimeId), replicaDescription_(), serviceDescription_(), currentServiceRole_(ReplicaRole::Unknown), currentReplicatorRole_(ReplicaRole::Unknown), currentReplicaState_(ReplicaStates::InCreate), lastUpdateEpochPrimaryEpochValue_(Epoch::InvalidEpoch()), replicaOpenMode_(ReplicaOpenMode::Invalid), statelessService_(), statefulService_(), statefulServicePartition_(), statelessServicePartition_(), replicator_(), configurationStage_(ProxyConfigurationStage::Current), catchupResult_(CatchupResult::NotStarted), isCatchupCancel_(false), reconfigurationAgentProxy_(reconfigurationAgentProxy), root_(root), isServiceAvailabilityImpacted_(false), isDeleted_(false) {} virtual ~FailoverUnitProxy(); __declspec(property(get = get_IsDeleted)) bool IsDeleted; bool get_IsDeleted() const { return isDeleted_; } __declspec(property(get=get_ReplicatorFactoryObj)) Reliability::ReplicationComponent::IReplicatorFactory & ReplicatorFactory; Reliability::ReplicationComponent::IReplicatorFactory & get_ReplicatorFactoryObj() const { return reconfigurationAgentProxy_.ReplicatorFactory; } __declspec(property(get=get_TransactionalReplicatorFactoryObj)) TxnReplicator::ITransactionalReplicatorFactory & TransactionalReplicatorFactory; TxnReplicator::ITransactionalReplicatorFactory & get_TransactionalReplicatorFactoryObj() const { return reconfigurationAgentProxy_.TransactionalReplicatorFactory; } __declspec(property(get=get_ApplicationHostObj)) Hosting2::IApplicationHost & ApplicationHostObj; Hosting2::IApplicationHost & get_ApplicationHostObj() const { return reconfigurationAgentProxy_.ApplicationHostObj; } __declspec(property(get=get_ReconfigurationAgentProxyId)) ReconfigurationAgentProxyId const & RAPId; ReconfigurationAgentProxyId const & get_ReconfigurationAgentProxyId() const { return reconfigurationAgentProxy_.Id; } // Property accessors __declspec(property(get=get_IsOpened)) bool const IsOpened; bool const get_IsOpened() const { return state_ == FailoverUnitProxyStates::Opened; } __declspec(property(get=get_IsOpening)) bool const IsOpening; bool const get_IsOpening() const { return state_ == FailoverUnitProxyStates::Opening; } __declspec(property(get=get_IsClosed)) bool const IsClosed; bool const get_IsClosed() const { return state_ == FailoverUnitProxyStates::Closed; } __declspec(property(get=get_IsClosing)) bool const IsClosing; bool const get_IsClosing() const { return state_ == FailoverUnitProxyStates::Closing; } __declspec(property(get=get_IsValid)) bool const IsValid; bool const get_IsValid() const { return failoverUnitDescription_.CurrentConfigurationEpoch != Epoch::InvalidEpoch(); } __declspec(property(get=get_IsServiceCreated)) bool const IsServiceCreated; bool const get_IsServiceCreated() { return statelessService_ || statefulService_; } __declspec(property(get=get_IsReplicatorCreated)) bool const IsReplicatorCreated; bool const get_IsReplicatorCreated() const { return (replicator_ != nullptr); } __declspec(property(get = get_DoesReplicatorSupportQueries)) bool const DoesReplicatorSupportQueries; bool const get_DoesReplicatorSupportQueries() const { return replicator_ && replicator_->DoesReplicatorSupportQueries; } __declspec(property(get=get_CurrentConfigurationEpoch)) Reliability::Epoch const CurrentConfigurationEpoch; Reliability::Epoch const get_CurrentConfigurationEpoch() const { return failoverUnitDescription_.CurrentConfigurationEpoch; } __declspec(property(get=get_IsReconfiguring)) bool IsReconfiguring; bool get_IsReconfiguring() const { return failoverUnitDescription_.PreviousConfigurationEpoch != Epoch::InvalidEpoch(); } __declspec(property(get=get_IsSwapPrimary)) bool IsSwapPrimary; bool get_IsSwapPrimary() const { return IsReconfiguring && CurrentConfigurationRole == ReplicaRole::Secondary && PreviousConfigurationRole == ReplicaRole::Primary; } __declspec(property(get=get_FailoverUnitId)) Reliability::FailoverUnitId const FailoverUnitId; Reliability::FailoverUnitId const get_FailoverUnitId() const { return failoverUnitDescription_.FailoverUnitId; } __declspec(property(get=get_CurrentConfigurationRole)) ReplicaRole::Enum const CurrentConfigurationRole; ReplicaRole::Enum const get_CurrentConfigurationRole() const { return replicaDescription_.CurrentConfigurationRole; } __declspec(property(get=get_PreviousConfigurationRole)) ReplicaRole::Enum PreviousConfigurationRole; ReplicaRole::Enum get_PreviousConfigurationRole() const { return replicaDescription_.PreviousConfigurationRole; } __declspec(property(get=get_CurrentReplicatorRole)) ReplicaRole::Enum const CurrentReplicatorRole; ReplicaRole::Enum const get_CurrentReplicatorRole() const { return currentReplicatorRole_; } __declspec(property(get=get_CurrentServiceRole)) ReplicaRole::Enum const CurrentServiceRole; ReplicaRole::Enum const get_CurrentServiceRole() const { return currentServiceRole_; } __declspec(property(get = get_AreServiceAndReplicatorRoleCurrent)) bool AreServiceAndReplicatorRoleCurrent; bool get_AreServiceAndReplicatorRoleCurrent() const { return currentServiceRole_ == replicaDescription_.CurrentConfigurationRole && currentReplicatorRole_ == replicaDescription_.CurrentConfigurationRole; } __declspec(property(get = get_IsStatefulService)) bool IsStatefulService; bool get_IsStatefulService() const { return serviceDescription_.IsStateful; } __declspec(property(get=get_State, put=set_State)) FailoverUnitProxyStates::Enum State; FailoverUnitProxyStates::Enum get_State() const { return state_; } void set_State(FailoverUnitProxyStates::Enum const & state) { state_ = state; } __declspec(property(get=get_IsRunningActions)) bool IsRunningActions; bool get_IsRunningActions() const { return currentlyExecutingActionsLists_.size() > 0; } __declspec(property(get=get_ConfigurationStage, put=set_ConfigurationStage)) ProxyConfigurationStage::Enum ConfigurationStage; ProxyConfigurationStage::Enum get_ConfigurationStage() const { return configurationStage_; } void set_ConfigurationStage(ProxyConfigurationStage::Enum const & configurationStage) { configurationStage_ = configurationStage; } __declspec(property(get=get_IsCatchupCancel)) bool IsCatchupCancel; bool get_IsCatchupCancel() const { return isCatchupCancel_; } __declspec(property(get=get_IsCatchupPending)) bool IsCatchupPending; bool get_IsCatchupPending() const; __declspec(property(get = get_CatchupResult)) CatchupResult::Enum CatchupResult; CatchupResult::Enum get_CatchupResult() const { return catchupResult_; } __declspec(property(get=get_ReplicaDescription, put=set_ReplicaDescription)) ReplicaDescription ReplicaDescription; Reliability::ReplicaDescription const& get_ReplicaDescription() const{ return replicaDescription_; } void set_ReplicaDescription(Reliability::ReplicaDescription const & replicaDescription) { replicaDescription_ = replicaDescription; } __declspec(property(get=get_ServiceDescription, put=set_ServiceDescription)) ServiceDescription ServiceDescription; Reliability::ServiceDescription const & get_ServiceDescription() const { return serviceDescription_; } void set_ServiceDescription(Reliability::ServiceDescription const & serviceDescription) { serviceDescription_ = serviceDescription; } __declspec(property(get=get_FailoverUnitDescription, put=set_FailoverUnitDescription)) FailoverUnitDescription FailoverUnitDescription; Reliability::FailoverUnitDescription const & get_FailoverUnitDescription() const { return failoverUnitDescription_; } Reliability::FailoverUnitDescription & get_FailoverUnitDescription() { return failoverUnitDescription_; } void set_FailoverUnitDescription(Reliability::FailoverUnitDescription const & failoverUnitDescription) { failoverUnitDescription_ = failoverUnitDescription; } __declspec(property(get=get_CurrentReplicaState, put=set_CurrentReplicaState)) ReplicaStates::Enum CurrentReplicaState; ReplicaStates::Enum const get_CurrentReplicaState() { return currentReplicaState_; } void set_CurrentReplicaState(ReplicaStates::Enum const & currentReplicaState) { currentReplicaState_ = currentReplicaState; } __declspec(property(get=get_ReplicaOpenMode, put=set_ReplicaOpenMode)) Reliability::ReplicaOpenMode::Enum ReplicaOpenMode; Reliability::ReplicaOpenMode::Enum & get_ReplicaOpenMode() { return replicaOpenMode_; } void set_ReplicaOpenMode(Reliability::ReplicaOpenMode::Enum const & replicaOpenMode) { replicaOpenMode_ = replicaOpenMode; } __declspec(property(get=get_AreOperationManagersOpenForBusiness)) bool AreOperationManagersOpenForBusiness; bool get_AreOperationManagersOpenForBusiness() const { return isOpenForBusiness_; } __declspec(property(get = get_IsPreWriteStatusCatchupEnabled)) bool IsPreWriteStatusCatchupEnabled; bool get_IsPreWriteStatusCatchupEnabled() const { return FailoverConfig::GetConfig().IsPreWriteStatusRevokeCatchupEnabled ; } __declspec(property(get = get_RuntimeId)) std::wstring const & RuntimeId; std::wstring const & get_RuntimeId() const { return runtimeId_; } bool TryAddToCurrentlyExecutingActionsLists( ProxyActionsListTypes::Enum const & plannedActions, bool impactServiceAvailability, FailoverUnitProxyContext<ProxyRequestMessageBody> & msgContext); bool TryAddToCurrentlyExecutingActionsLists( ProxyActionsListTypes::Enum const & plannedActions, bool impactServiceAvailability, FailoverUnitProxyContext<ProxyRequestMessageBody> & msgContext, __out bool & cancelNeeded); ProxyOutgoingMessageUPtr ComposeReadWriteStatusRevokedNotification(Common::AcquireExclusiveLock & lock); void SendReadWriteStatusRevokedNotification(ProxyOutgoingMessageUPtr &&); void DoneCancelReplicatorCatchupReplicaSet(); void Reuse(Reliability::FailoverUnitDescription const & failoverUnitDescription, std::wstring const & runtimeId); bool TryMarkForAbort(); // Delete implies that the FUP shared_ptr has been removed from the LFUPM bool TryDelete(); void AcquireLock(); void ReleaseLock(); Common::AsyncOperationSPtr BeginOpenInstance( __in Hosting2::IApplicationHost & applicationHost, Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndOpenInstance(Common::AsyncOperationSPtr const & asyncOperation, __out std::wstring & serviceLocation); Common::AsyncOperationSPtr BeginCloseInstance( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndCloseInstance(Common::AsyncOperationSPtr const & asyncOperation); Common::AsyncOperationSPtr BeginOpenReplica( __in Hosting2::IApplicationHost & applicationHost, Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndOpenReplica(Common::AsyncOperationSPtr const & asyncOperation); Common::AsyncOperationSPtr BeginOpenReplicator( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndOpenReplicator(Common::AsyncOperationSPtr const & asyncOperation, __out std::wstring & replicationEndpoint); Common::AsyncOperationSPtr BeginChangeReplicaRole( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndChangeReplicaRole(Common::AsyncOperationSPtr const & asyncOperation, __out std::wstring & serviceLocation); Common::AsyncOperationSPtr BeginChangeReplicatorRole( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndChangeReplicatorRole(Common::AsyncOperationSPtr const & asyncOperation); Common::AsyncOperationSPtr BeginCloseReplica( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndCloseReplica(Common::AsyncOperationSPtr const & asyncOperation); Common::AsyncOperationSPtr BeginCloseReplicator( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndCloseReplicator(Common::AsyncOperationSPtr const & asyncOperation); void AbortPartition(); void AbortReplicator(); void AbortReplica(); void AbortInstance(); void Abort(bool keepFUPOpen); void CancelOperations(); void Cleanup(); void DoneExecutingActionsList(ProxyActionsListTypes::Enum const & completedActions); void PublishEndpoint(Transport::MessageUPtr && outMsg); void ProcessReplicaEndpointUpdatedReply(Reliability::ReplicaDescription const & msgReplica); void ProcessReadWriteStatusRevokedNotificationReply(Reliability::ReplicaDescription const & msgReplica); void UpdateServiceDescription(Reliability::ServiceDescription const & newServiceDescription); ProxyErrorCode UpdateConfiguration( ProxyRequestMessageBody const & msgBody, UpdateConfigurationReason::Enum reason); void OnReconfigurationStarting(); void OnReconfigurationEnding(); void OpenPartition(); void AssertCatchupNotStartedCallerHoldsLock() const; Common::AsyncOperationSPtr BeginReplicatorBuildIdleReplica( Reliability::ReplicaDescription const & idleReplicaDescription, Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndReplicatorBuildIdleReplica(Common::AsyncOperationSPtr const & asyncOperation); Common::AsyncOperationSPtr BeginReplicatorOnDataLoss( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndReplicatorOnDataLoss(Common::AsyncOperationSPtr const & asyncOperation, int64 & lastLSN); ProxyErrorCode ReplicatorRemoveIdleReplica(Reliability::ReplicaDescription const & idleReplicaDescription); Common::AsyncOperationSPtr BeginReplicatorUpdateEpoch( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndReplicatorUpdateEpoch(Common::AsyncOperationSPtr const & asyncOperation); ProxyErrorCode ReplicatorGetStatus(__out FABRIC_SEQUENCE_NUMBER & firstLsn, __out FABRIC_SEQUENCE_NUMBER & lastLsn); ProxyErrorCode ReplicatorGetQuery(__out ServiceModel::ReplicatorStatusQueryResultSPtr & result); Common::ErrorCode ReplicaGetQuery(__out ServiceModel::ReplicaStatusQueryResultSPtr & result) const; Common::AsyncOperationSPtr BeginReplicatorCatchupReplicaSet( CatchupType::Enum type, Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); ProxyErrorCode EndReplicatorCatchupReplicaSet(Common::AsyncOperationSPtr const & asyncOperation); ProxyErrorCode CancelReplicatorCatchupReplicaSet(); Common::AsyncOperationSPtr BeginMarkForCloseAndDrainOperations( bool isAbort, Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); Common::ErrorCode EndMarkForCloseAndDrainOperations(Common::AsyncOperationSPtr const & asyncOperation); // This method does not honor the locks // It is the callers reponsibility to ensure that there are no concurrent operations against the FUP Common::AsyncOperationSPtr BeginClose( Common::AsyncCallback const & callback, Common::AsyncOperationSPtr const & parent); Common::ErrorCode EndClose(Common::AsyncOperationSPtr const & asyncOperation); void CleanupEventHandler(); void RegisterFailoverUnitProxyForCleanupEvent(); void UnregisterFailoverUnitProxyForCleanupEvent(); Common::ErrorCode ReportLoad(std::vector<LoadBalancingComponent::LoadMetric> &&); Common::ErrorCode ReportReplicatorHealth( Common::SystemHealthReportCode::Enum reportCode, std::wstring const & dynamicProperty, std::wstring const & extraDescription, FABRIC_SEQUENCE_NUMBER sequenceNumber, Common::TimeSpan const & timeToLive); Common::ErrorCode ReportFault(FaultType::Enum); Common::ErrorCode ReportHealth( ServiceModel::HealthReport && healthReport, ServiceModel::HealthReportSendOptionsUPtr && sendOptions); Common::ApiMonitoring::ApiCallDescriptionSPtr CreateApiCallDescription( Common::ApiMonitoring::ApiNameDescription && nameDescription, Reliability::ReplicaDescription const & replicaDescription, bool isHealthReportEnabled, bool traceServiceType); void StartOperationMonitoring( Common::ApiMonitoring::ApiCallDescriptionSPtr const & description); void StopOperationMonitoring( Common::ApiMonitoring::ApiCallDescriptionSPtr const & description, Common::ErrorCode const & error); void TraceBeforeOperation( Common::AcquireExclusiveLock &, Common::ApiMonitoring::InterfaceName::Enum, Common::ApiMonitoring::ApiName::Enum api) const; void TraceBeforeOperation( Common::AcquireExclusiveLock &, Common::ApiMonitoring::InterfaceName::Enum , Common::ApiMonitoring::ApiName::Enum api, Common::TraceCorrelatedEventBase const &) const; void WriteTo(Common::TextWriter&, Common::FormatOptions const &) const; std::wstring ToString() const; static std::string AddField(Common::TraceEvent & traceEvent, std::string const & name); void FillEventData(Common::TraceEventContext & context) const; Common::ErrorCode IsQueryAllowed(bool & isReplicatorQueryRequired) const; ServiceModel::DeployedServiceReplicaDetailQueryResult GetQueryResult() const; bool IsConfigurationMessageBodyStaleCallerHoldsLock( std::vector<Reliability::ReplicaDescription> const & replicaDescriptions) const; bool CheckConfigurationMessageBodyForUpdatesCallerHoldsLock( std::vector<Reliability::ReplicaDescription> const & replicaDescriptions, bool shouldApply); void UpdateReadWriteStatus(); ProxyErrorCode FinalizeDemoteToSecondary(); private: // FUP async operations class CloseAsyncOperation; class WaitForDrainAsyncOperation; class UserApiInvoker; class UserApiInvokerAsyncOperationBase; // Service async operations class OpenInstanceAsyncOperation; class CloseInstanceAsyncOperation; class OpenReplicaAsyncOperation; class ChangeReplicaRoleAsyncOperation; class CloseReplicaAsyncOperation; // Replicator async operations class OpenReplicatorAsyncOperation; class ChangeReplicatorRoleAsyncOperation; class CloseReplicatorAsyncOperation; class ReplicatorBuildIdleReplicaAsyncOperation; class ReplicatorCatchupReplicaSetAsyncOperation; class ReplicatorOnDataLossAsyncOperation; class ReplicatorUpdateEpochAsyncOperation; class ReadWriteStatusCalculator; static const Common::Global<ReadWriteStatusCalculator> ReadWriteStatusCalculatorObj; // This class is used to access fup under lock friend class LockedFailoverUnitProxyPtr; // ProxyOutgoingMessage queries fup for message resend friend class ProxyOutgoingMessage; bool ShouldResendMessage(Transport::MessageUPtr const& message); void GetReplicaSetConfiguration( ::FABRIC_REPLICA_SET_CONFIGURATION & replicaSetConfiguration, std::vector<::FABRIC_REPLICA_INFORMATION> & replicas, int setCount, int setNonDroppedCount); void UpdateReadAndWriteStatus(Common::AcquireExclusiveLock &); bool HasMinReplicaSetAndWriteQuorum(Common::AcquireExclusiveLock &, bool includePCCheck) const; void CancelBuildIdleReplicaOperations(); void OpenForBusiness(); void CloseForBusiness(bool isAbort); bool IsStatefulServiceFailoverUnitProxy() const { return serviceDescription_.IsStateful && statefulService_; } bool IsStatelessServiceFailoverUnitProxy() const { return !serviceDescription_.IsStateful && statelessService_; } void AddActionList(ProxyActionsListTypes::Enum const & plannedActions, bool impactsServiceAvailability); void TransitionReplicatorToClosed(Common::AcquireExclusiveLock &); void TransitionServiceToClosed(Common::AcquireExclusiveLock &); // Class member variables std::vector<ActionListInfo> currentlyExecutingActionsLists_; bool isServiceAvailabilityImpacted_; mutable Common::ExclusiveLock lock_; Reliability::FailoverUnitDescription failoverUnitDescription_; Reliability::ReplicaDescription replicaDescription_; Reliability::ServiceDescription serviceDescription_; std::wstring runtimeId_; ConfigurationReplicaStore configurationReplicas_; std::map<Federation::NodeId, Reliability::ReconfigurationAgentComponent::ReplicaProxy> remoteReplicas_; FailoverUnitProxyLifeCycleState state_; ReplicatorStates::Enum replicatorState_; FailoverUnitProxyStates::Enum replicaState_; ProxyMessageStage::Enum messageStage_; Reliability::ReplicaRole::Enum currentServiceRole_; Reliability::ReplicaRole::Enum currentReplicatorRole_; ReplicaStates::Enum currentReplicaState_; Epoch lastUpdateEpochPrimaryEpochValue_; Reliability::ReplicaOpenMode::Enum replicaOpenMode_; ProxyConfigurationStage::Enum configurationStage_; CatchupResult::Enum catchupResult_; bool isCatchupCancel_; // This is used to keep track catch up cancel requested state bool isOpenForBusiness_; bool isMarkedForClose_; bool isMarkedForAbort_; Common::AsyncOperationSPtr drainAsyncOperation_; ServiceOperationManagerUPtr serviceOperationManager_; ReplicatorOperationManagerUPtr replicatorOperationManager_; //FailoverUnitProxy holds on to the root which keeps it alive until it is destructed //which in turn keeps the ReconfigurationAgentProxy alive ReconfigurationAgentProxy & reconfigurationAgentProxy_; Common::AsyncOperationSPtr root_; ComProxyStatelessServiceUPtr statelessService_; ComProxyStatefulServiceUPtr statefulService_; mutable Common::ComPointer<ComStatefulServicePartition> statefulServicePartition_; Common::ComPointer<ComStatelessServicePartition> statelessServicePartition_; ComProxyReplicatorUPtr replicator_; bool isDeleted_; ReadWriteStatusState readWriteStatusState_; // Used to cache the loads reported by the FT for query class ReportedLoadStore { public: void AddLoad(std::vector<LoadBalancingComponent::LoadMetric> const & loads); void OnFTChangeRole(); void OnFTOpen(); std::vector<ServiceModel::LoadMetricReport> GetForQuery() const; private: void Clear(); class Data { public: Data(); Data(uint value, Common::DateTime timestamp); ServiceModel::LoadMetricReport ToLoadValue(std::wstring const & metricName) const; private: Common::DateTime timeStamp_; uint value_; }; std::map<std::wstring, Data> loads_; }; ReportedLoadStore reportedLoadStore_; }; } }
Brain xenografts: the effect of cyclosporin A on graft survival. Animal models of Parkinson's disease and Alzheimer's disease have shown dramatic functional improvement after transplantation of embryonic neurons into denervated regions of the adult brain. Because of the ethical and logistic problems associated with the use of human embryonic brain tissue, cross-species transplants are an attractive alternative. An experimental model of cross-species brain transplantation was developed to evaluate cell survival in untreated and cyclosporin A (CyA)-treated animals. Cholinergic ventral neurons from embryonic mice were transplanted into the frontal lobes of 18 adult Sprague-Dawley rats using a cell suspension technique. Nine animals were treated for 13 days with CyA (10 mg/kg/day) and nine were not treated. Twelve weeks after transplantation, frozen sections through the transplant volume were obtained. Alternate sections were prepared with hematoxylin and eosin and acetylcholine esterase stains. Cell counts through a 2-cu mm volume incorporating the transplant were compared to a contralateral control volume. Eight of the nine untreated transplants were successful (mean transplant cells +/- standard error of the mean: 90.7 +/- 19.4/2 cu mm). All of the nine CyA-treated transplants survived, with mean transplant count 28.7 cells/2 cu mm greater than untreated transplants (mean increase 28.7: p less than or equal to 0.05, Wilcoxon matched-pairs signed ranks test). It is concluded that: 1) this model is useful for quantitating transplant cell survival; 2) untreated xenografts survive well; and 3) a 13-day course of CyA improved long-term graft survival.
def load_gallery_features(path_in, gal): mid_set = list(set([el[2] for el in gal])) mid_set.sort() return { mid: pd.read_pickle(path_in.joinpath(mid).joinpath("encodings.pkl")) for mid in tqdm(mid_set) }
<reponame>chonmb/SpringCloud<gh_stars>1-10 package com.springboot.cloud.gateway.admin.models.entities; import lombok.Getter; import lombok.Setter; import javax.persistence.*; /** * @author chonmb Email:<EMAIL> * @date 2021/4/21 9:50 */ @Entity @Table(name = "gateway_route") @Getter @Setter public class GatewayRoute { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; @Column(nullable = false, unique = true) private String name; @Column(nullable = false) private String uri; @Column(nullable = false, columnDefinition = "TEXT") private String predicates; @Column(columnDefinition = "TEXT", nullable = false) private String filters; @Column(nullable = false) private Integer ordered; }
// ListenAndServe starts a new dogstatsd server, listening for UDP datagrams on // addr and forwarding the metrics to handler. func ListenAndServe(addr string, handler Handler) (err error) { var conn net.PacketConn if conn, err = net.ListenPacket("udp", addr); err != nil { return } err = Serve(conn, handler) return }
{-# OPTIONS -Wall -Werror #-} import Data.List (group) {- (**) Determine the prime factors of a given positive integer. Construct a list containing the prime factors and their multiplicity. Example in Haskell: λ> prime_factors_mult 315 [(3,2),(5,1),(7,1)] -} -- OK! Q35 とほぼ同じで、タプルの計算だけ付け加えている。 prime_factors_mult :: Int -> [(Int, Int)] prime_factors_mult n = helper n 2 where helper 1 _ = [] helper num f | f*f > num = [(num, 1)] | otherwise = if num `mod` f == 0 then f `addTup` (helper (num `div` f) f) else helper num (f+1) where x `addTup` [] = [(x, 1)] x `addTup` tups@((z,w):ls) | x == z = (z, w+1):ls | otherwise = (x, 1):tups -- 解答 prime_factors_mult' :: Int -> [(Int, Int)] prime_factors_mult' n = map swap $ encode $ primeFactors n where swap (x,y) = (y,x) -- q35 より primeFactors :: Int -> [Int] primeFactors n = primeFactorsHelper' n 2 where primeFactorsHelper' num f | f * f > num = [num] | num `mod` f == 0 = f : primeFactorsHelper' (num `div` f) f | otherwise = primeFactorsHelper' num (f + 1) -- q10 より -- import Data.List (group) encode :: Eq a => [a] -> [(Int, a)] encode xs = map (\x -> (length x, head x)) $ group xs -- 別解(書き方が違うだけでほぼ同じ) prime_factors_mult' :: Int -> [Int] prime_factors_mult' = map encode . group . primeFactors where encode xs = (head xs, length xs)
// Checks if mouse is hovering on a region bool region_hit(int x, int y, int w, int h) { if ( gui.mouse.x < x || gui.mouse.y < y || gui.mouse.x >= x + w || gui.mouse.y >= y + h ) return false; else return true; }
/** * Sends a given token to the player's start position. * @param tIndex int number [0-3] representing the index of the token in the tokens array * @see #tokens */ public void start(int tIndex) { this.tokens[tIndex].setPosition(this.pIndex * START_DISTANCE); this.tokens[tIndex].setOut(true); this.tokensOut.add(tIndex); }
import { TeacherEntity } from './../teacher.entity'; import { RoleType } from './../../../common/constants/role-type'; import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger'; import { AbstractDto } from './../../../common/dto/AbstractDto'; export class TeacherDto extends AbstractDto { @ApiProperty() name: string; @ApiProperty({ type: 'enum', enum: RoleType, default: RoleType.TEACHER }) role: RoleType; @ApiPropertyOptional() avatarLink: string; @ApiProperty() teacherCode: string; @ApiProperty() email: string; constructor(teacher: TeacherEntity) { super(teacher); this.name = teacher.name; this.role = teacher.role, this.avatarLink = teacher.avatarLink, this.email = teacher.email, this.teacherCode = teacher.teacherCode } }
def log_multiline(log_function, log_text, title=None, prefix=''): logger.debug('log_multiline(%s, %s, %s, %s) called', log_function, repr(log_text), repr(title), repr(prefix)) if isinstance(log_text, str): logger.debug('log_text is type str') log_list = log_text.splitlines() elif isinstance(log_text, list) and isinstance(log_text[0], str): logger.debug('log_text is type list with first element of type text') log_list = log_text else: logger.debug('log_text is type ' + type(log_text).__name__) log_list = pformat(log_text).splitlines() log_function(prefix + '=' * 80) if title: log_function(prefix + title) log_function(prefix + '-' * 80) for line in log_list: log_function(prefix + line) log_function(prefix + '=' * 80)
In his final New Rule of the night, Bill Maher disputed the idea thrown out by some that atheism is a religion. It’s a refrain you hear from people who can’t stand both groups and believe that dogma is dogma, plain and simple, but Maher argued that treating atheism like a religion would be like saying “abstinence is a sex position.” After all, when was the last time a non-believer ever claimed to see the silhouette of Christopher Hitchens on the side of a tree? RELATED: Bill Maher Round Table Discusses The Dangers Of Religion Even though Maher produced a documentary four years ago whose title was literally a portmanteau of “religion” and “ridiculous”, he admitted that atheism isn’t even a hobby of his. “That’s the nice thing about being an atheist, it takes up so little of your time,” he said. But Maher expressed concerns about the “growing trend” of people trying to equate religion, which is based in faith and things claiming to be beyond normal human comprehension, with evidence-based science. Charles Darwin may look like the kind of person we’ve always pictured a god to be, but when was the last time someone organized meetings or built huge buildings in reverence to Darwinism? Maher did concede that atheism is not necessarily a bastion of pure intellectualism, but equating belief with “nonbelief” is not rational thinking. “We’re not two sides of the same coin, and you don’t get to put your unreason up on the same shelf with my reason. Your stuff has to go over there, on the shelf with Zeus and Thor and the Kraken, with the stuff that is not evidence-based, stuff that religious people never change their mind about, no matter what happens.” Maher said he would be perfectly willing to believe in a god or Jesus if there was any evidence to back up their existence, but until he sees any sign that they exist, he’s perfectly content to stay on the side of doubt. But, Maher concluded, if people are going to keep insisting atheism is a religion, then atheists should logically be able to get away with the same things religious people do on a daily basis. He brought up how Mitt Romney‘s late father-in-law was posthumously baptized, and donned a magician’s hat to perform an “unbaptism” ceremony. An unbaptism, for those unfamiliar with the practice, is very much like an unbirthday, except with far less tea. RELATED: Bill Maher: All Religions Are ‘Magic Tricks’ But Mormonism Is ‘Novelty Shop’ Magic Trick Maher mumbled a string of nonsense words, including what I’m pretty sure was a line from Harry Potter, and finally declared, “I call upon the Mormon spirits to leave your body the fuck alone.” Watch the video below, courtesy of HBO: Have a tip we should know? [email protected]
/** * Implementation of the Iterator. * */ private static class IntRangeIterator extends RangeIterator { int end; Operator operator; int start; int step; int value; IntRangeIterator(IntRange r) { this.value = this.start = r.start; this.end = r.end; this.operator = r.operator; if (operator == null) { throw new IllegalArgumentException("operator must be \"*\" or \"+\""); } this.step = r.step; } public boolean hasNext() { return value <= end; } public Object next() { int currentValue = value; value = step(); return new Integer(currentValue); } public int step() { if (operator == Operator.MULTIPLY) { return value * step; } else { return value + step; } } }
<reponame>doric-pub/DoricSQLite<gh_stars>0 export * from "./src/SQLite"; export * from "./src/SQLiteORM";
package no.dusken.momus.service; import no.dusken.momus.dto.PageOrder; import no.dusken.momus.exceptions.RestException; import no.dusken.momus.model.LayoutStatus; import no.dusken.momus.model.Page; import no.dusken.momus.dto.PageContent; import no.dusken.momus.dto.PageId; import no.dusken.momus.model.Publication; import no.dusken.momus.model.websocket.Action; import no.dusken.momus.service.repository.*; import org.springframework.stereotype.Service; import javax.servlet.http.HttpServletResponse; import javax.transaction.Transactional; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @Service @Transactional public class PageService { private final PageRepository pageRepository; private final LayoutStatusRepository layoutStatusRepository; private final ArticleRepository articleRepository; private final AdvertRepository advertRepository; private final MessagingService messagingService; public PageService( PageRepository pageRepository, LayoutStatusRepository layoutStatusRepository, ArticleRepository articleRepository, AdvertRepository advertRepository, MessagingService messagingService ) { this.pageRepository = pageRepository; this.layoutStatusRepository = layoutStatusRepository; this.articleRepository = articleRepository; this.advertRepository = advertRepository; this.messagingService = messagingService; } public Page getPageById(Long id) { return pageRepository.findById(id).orElseThrow(() -> new RestException("Not found", HttpServletResponse.SC_NOT_FOUND)); } public List<Page> getPagesInPublication(Long publicationId) { return pageRepository.findByPublicationId(publicationId); } public PageOrder getPageOrderInPublication(Long publicationId) { return new PageOrder(publicationId, pageRepository.getPageOrderByPublicationId(publicationId)); } public List<Page> createEmptyPagesInPublication(Long publicationId, Integer afterPage, Integer numPages) { List<PageId> pageOrder = pageRepository.getPageOrderByPublicationId(publicationId); List<Page> createdPages = new ArrayList<>(); Publication publication = new Publication(); publication.setId(publicationId); LayoutStatus layoutStatus = layoutStatusRepository.findByName("Ukjent"); for (int i = 0; i < numPages; i++) { Page newPage = Page.builder() .pageNr(afterPage + i + 1) .publication(publication) .layoutStatus(layoutStatus) .build(); newPage = pageRepository.save(newPage); createdPages.add(newPage); messagingService.broadcastEntityAction(newPage, Action.CREATE); pageOrder.add(afterPage + i, new PageId(newPage.getId())); } pageRepository.flush(); setPageOrder(pageOrder, publicationId); return createdPages; } public Page updateMetadata(Long id, Page page) { Page existing = getPageById(id); existing.setNote(page.getNote()); existing.setLayoutStatus(page.getLayoutStatus()); existing.setDone(page.isDone()); Page saved = pageRepository.saveAndFlush(existing); messagingService.broadcastEntityAction(saved, Action.UPDATE); return saved; } public void setPageOrder(PageOrder pageOrder) { setPageOrder(pageOrder.getOrder(), pageOrder.getPublicationId()); } public void setPageOrder(List<PageId> pageOrder, Long publicationId) { Integer pageNr = 1; for (PageId page : pageOrder) { pageRepository.updatePageNr(pageNr++, page.getId()); } messagingService.broadcastEntityAction(new PageOrder(publicationId, pageOrder), Action.UPDATE); } public void setContent(Long id, PageContent content) { Page existing = getPageById(id); existing.setArticles(new HashSet<>(articleRepository.findAllById(content.getArticles()))); existing.setAdverts(new HashSet<>(advertRepository.findAllById(content.getAdverts()))); pageRepository.saveAndFlush(existing); messagingService.broadcastEntityAction(content, Action.UPDATE); } public void delete(Long id) { Page page = pageRepository.findById(id).orElseThrow(() -> new RestException("Not found", HttpServletResponse.SC_NOT_FOUND)); Long publicationId = page.getPublication().getId(); List<PageId> order = pageRepository.getPageOrderByPublicationId(publicationId); order.remove(new PageId(id)); setPageOrder(order, publicationId); messagingService.broadcastEntityAction(page, Action.DELETE); pageRepository.deleteById(id); } }
def resolve_one(self, name): if name in self.resolved: return for dep in self.deps.get(name, ()): self.resolve_one(dep) self.compute(name) self.resolved.add(name)
High-fat diets are detrimental of the lipid profile, glucose metabolism and body composition of Wistar rats: the role of fatty acid type and consumption duration Purpose The purpose of this study is to evaluate the nutritional effects in Wistar rats of supplementation with stand-alone saturated fatty acid (SFA) or monounsaturated fatty acid (MUFA), the replacement of SFA by MUFA and the combination of both (SFA + MUFA) over a long period of time (13 weeks). Design/methodology/approach In total, 30 Wistar rats were used and randomly assigned to receive (n = 6): control – lab chow; lard (L20%) – lab chow with added lard (20%); olive oil (O20%) – lab chow with added olive oil (20%); lard replacement with olive oil (L20% –O20%) – during six weeks lab chow with added lard (20%) replaced by lab chow with added olive oil (20%) given during the past seven weeks of the trial; lard combination with olive oil (L10% + O10%) – lab chow with added lard (10%) and olive oil (10%). Food and caloric intake, weight gain, food and energy efficiency, body mass index, bone mineral composition and blood biochemistry were evaluated. Findings All diets with added fatty acids showed higher energy intake (p < 0.001), weight gain (p = 0.01), accumulation of adipose tissue (p = 0.02) and food and energy efficiency (p = 0.01) compared to the control group. All groups exhibited higher levels of blood triglycerides compared to the control group (p = 0.02). In addition, the L10% + O10% group developed hyperglycemia (p < 0.001); the L group showed higher amounts of non- high density lipoprotein (HDL-c) (p = 0.04); and the L20%−O20% group exhibited high levels of the triglyceride/HDL-c ratio (p = 0.04) in relation to the control. Originality/value These results indicate that regardless of the fatty acid type, consumption in large quantities of fatty acids for long periods of time can cause obesity and dyslipidemia.
package snowball import ( "testing" ) var ( username = "username" password = "password" ) func TestClient_GetDetail(t *testing.T) { client := New(username, password) client.Login() list := client.GetDetail("AMD,RGSE") if len(list) != 2 { t.Error("failed") } }
// Acquire will get one of the idle resources, or create a new one. func (rp *ResourcePool) Acquire(ctx context.Context) (resource Resource, err error) { if rp.IsClosed() { return nil, ErrPoolClosed } select { case <-ctx.Done(): return nil, ctx.Err() default: } acq := acquireMessage{ ctx: ctx, rch: make(chan Resource), ech: make(chan error), } select { case rp.acqchan <- acq: case <-ctx.Done(): return nil, ctx.Err() } select { case resource := <-acq.rch: return resource, nil case err := <-acq.ech: return nil, err case <-ctx.Done(): return nil, ctx.Err() } }
Here’s a quick heads up for those who pre-purchased Dragon Quest 8: Journey of the Cursed King through the Nintendo eShop. If you live in North America, you can now pre-download the game. You already noticed that by reading the headline, but I have to fill this space with something. According to Nintendo Everything, the file will take up 24,413 blocks, which is just over 3GB so you may want to make some room if needed. Dragon Quest 8 is the retelling of Level-5’s classic PS2 RPG and features two new playable characters, more side quests, dungeons and full voice acting during cutscenes which feature important plot details. There’s much more to it than that though, and you can find out everything we know about it so far through the game’s release date post. Dragon Quest 8: Journey of the Cursed King will be released for 3DS in Europe and North America on January 20.
“Yes … everyone seems to be asleep,” Trent Reznor whispers at the opening of the ostentatiously-titled “Dear World,” the second song on the new Nine Inch Nails EP, his voice digitally teased like the anodyne textbot in Radiohead’s “Fitter Happier.” The line returns at the end of the song as a kind of provocation: Even the most loyal Nine Inch Nails apologist might consider whether such gestures might not have been better left in the ‘90s, where they felt more essential to the times. Today’s listeners have Echo Dots in their homes to bleat similarly toneless directives. Shouldn’t Reznor be able to plump deeper than this Palahniukian mumbo-jumbo fifty-one years into his life, nearly thirty years into his career, on the other side of a visionary, definitively mature stint soundtracking films? But as the song wears on, something approaching self-awareness begins to reveal itself through a new sense of high-dramatic extroversion. This is not the filter-less, flagellating Reznor of old, purging himself on record as on 1994’s The Downward Spiral and 1999’s The Fragile. Here, he’s a veteran Hamlet, donning the cape and snatching up the skull once again, but by now, he knows the cues well. The outlook here feels more measured and removed. That’s not to say that NIN is shamelessly retreading old territory on Not the Actual Events—just that they have a focused, well-reasoned idea of what the project is designed to do. Reznor and now-diehard collaborator Atticus Ross (currently the band’s only other member) are working as they do when writing for film. They’re not exorcising personal trauma open-endedly, as Reznor did on the band’s best-known work—they’re painting more universal impressions in broad, dissipated strokes. On any self-respecting NIN release, Reznor always finds some reason to justify razing modern civilization, but on Not the Actual Events—one of the first definitively post-Trump projects from a major musical artist—his argument feels more convincing than ever. “It is coming, and you didn’t even notice,” Reznor murmurs hoarsely, mentioning a “trajectory in decline,” chastising both himself and a world he doesn’t “recognize anymore.” One can’t help but think: Same, man. Not the Actual Events is probably the grimiest Nine Inch Nails release since The Fragile. Rather than running the gamut between overdriven steamrolling and receding, glitchy ambience as on most of the work Reznor loosed between 1994 and 2008, the EP realizes a specific, portentous mood from several equivalent angles. Even for the band who made surveillance-state rock opera Year Zero, this is unusually theatrical, broad-stroke music. Its most striking song, “She’s Gone,” is driven by a spectral timpani tattoo, recalling something the undead pirate crew in a Pirates of the Caribbean sequel might bang out from the poop deck. A close reference point for Reznor’s vocal delivery in the verses is Blood Money-era Tom Waits, and Mariqueen Maandig’s groaned, cloying chorus finds the overlap point between grindcore and “The Monster Mash.” This sounds like a disaster waiting to happen—NIN turned goth-Plastic Ono Band—but, somehow, it is as playful as it is ominous, transportive rather than kitschy. Much like the other four songs on the EP, it’s a huge, somewhat cartoonish piece of music forming a logical and controlled simulacrum of the contemporary themes it speaks to: self-denial, bull-headedness, greed, and chaos—horrifying or glorious, always inevitable. A five-song EP might feel slight for NIN, regents of the double-or-quadruple-album. But the band has been given to meaningful concision once before: Not the Actual Events bears some similarities to 1992 mini-LP Broken, by embracing a fuzzier, scrappier sonic landscape than the album that preceded it, and factoring in something approaching the blues. On EP highlight “Burning Bright (Field on Fire)” Reznor edges toward stoner metal with a riff that sounds like a decelerated permutation of “Smells Like Teen Spirit” by way of Dave Navarro. The lyrics fit the Cobain reference point well, focusing on alienation and self-hatred, and burning the whole world down in the wake of a storm of locusts—standard Trent stuff, and what millions of people in this country feel when they read about the president-elect’s newest cabinet appointment. It all works pretty well. Reznor elevates his droning riff with the help of sneering, tape-echo-drowned speak-song—a fresh device for him. As a succinct document of the gymnastics he and Ross are capable of, Not the Actual Events is a brief but impressive audition tape. It’s hard to think of another noted studio nut who could pull off feats like smelting the dazzling electrofunk arpeggiator-Tetris of “Dear World”—catnip for “Closer” fans—down into a rippling pond of delay, or reducing unforgiving, Grohl-driven fastball “The Idea of You” into yowling sonic booms. It’s a hopeful omen for a fertile 2017, in which Nine Inch Nails, by Reznor’s account, is slated to release “two new major works.” With Ross’s help, Reznor has been able to expand the library of styles he can compress and tease out of recognition. If there’s any time Nine Inch Nails might be poised to pull off two whole non-tedious releases in one year, it’s now.
Mere days before video footage of Democratic presidential nominee Hillary Clinton seizing and apparently fainting before having her limp body shoved onto a black van hit the media, comedian and left-wing activist Sarah Silverman told TMZ that only "f*cking a**holes" worry about the former secretary of state's health. Real unfortunate timing for Sarah, here. After asking the abortion-loving and former (sellout) Bernie Sanders supporter about Burning Man, Silverman was asked about Hillary's health by TMZ. “What do you think about Hillary Clinton’s health? Do you think she’s healthy?” asked TMZ, in a video posted Thursday. “Yeah, I think she’s healthy," answered Silverman, now facing the camera. "I think anyone bringing up her health is a f*cking a**hole." An annoyed Silverman unconvincingly added: “She f*cking, she’s as healthy as…Believe me, she’s fine.” Hilariously, when the TMZ cameraman asked Silverman to confirm her statement--which she is clearly feeling uncomfortable with--the blue-mouthed comedian essentially recanted: "I have no idea," she said in anger, before dropping the paparazzo. Inquiring about Hillary's health has been labeled as conspiratorial, sexist and off-limits by those on the left, the media especially. But this Sunday is throwing a wrench in such a narrative. As The Daily Wire reported, Hillary abruptly left Ground Zero in New York City for a 9/11 commemorative ceremony after reports of a "medical episode." Video footage soon emerged of the Democratic nominee shaking, fainting and then being shoved into a black van: Hillary Clinton 9/11 NYC pic.twitter.com/q9YnsjTxss — Zdenek Gazda (@zgazda66) September 11, 2016 Hillary was not taken to the hospital, but to her daughter Chelsea's nearby apartment. The Hillary camp later admitted that the candidate was diagnosed with pneumonia on Friday, after repeatedly blaming "allergies" for Hillary's odd visible aliments and abnormally long coughing fits. Earlier this month, released notes from the Hillary interview with the FBI over her damning email scandal also contributed to the mounting health concerns of the former First Lady. “In December of 2012, Clinton suffered a concussion and then around the New Year had a blood clot,” stated the FBI report. “Based on her doctor’s advice, she could only work at State for a few hours a day and could not recall every briefing she received.” Although Silverman would like to bury all questions surrounding her new candidate's health, the concerns are glaringly legitimate. No, you're not a "f*cking asshole" for inquiring, nor are you a sexist. (Unless Senator John McCain retroactively "identifies" as a woman, the "sexist" theory holds zero validity per recent election history.)
<reponame>autopi-io/autopi-core import logging import salt.exceptions # Define the module's virtual name __virtualname__ = "clock" log = logging.getLogger(__name__) def __virtual__(): return __virtualname__ def help(): """ Shows this help information. """ return __salt__["sys.doc"](__virtualname__) def status(): """ Show current time settings. """ ret = {} res = __salt__["cmd.run"]("timedatectl status") pairs = (l.split(": ") for l in res.splitlines()) for k, v in pairs: ret[k.strip().lower().replace(" ", "_")] = v.strip() return ret def set(value, adjust_system_clock=False): """ Set system time. Arguments: - value (str): Time string to set. Optional arguments: - adjust_system_clock (bool): Default is 'False'. """ ret = {} cmd = ["timedatectl"] if adjust_system_clock: cmd.append("--adjust-system-clock") cmd.append("set-time '{:s}'".format(value)) res = __salt__["cmd.run_all"](" ".join(cmd)) if res["retcode"] != 0: raise salt.exceptions.CommandExecutionError(res["stderr"]) return ret def ntp(enable=True): """ Enable or disable network time synchronization. Optional arguments: - enable (bool): Default is 'True'. """ ret = {} res = __salt__["cmd.run_all"]("timedatectl set-ntp '{:d}'".format(enable)) if res["retcode"] != 0: raise salt.exceptions.CommandExecutionError(res["stderr"]) return ret
def domino_rotation(asolns, brd): nsoln = domino_correction(np.rot90(brd, 1)) if True not in [np.array_equal(nsoln, soln) for soln in asolns]: asolns.append(nsoln) nsoln = np.rot90(brd, 2) if True not in [np.array_equal(nsoln, soln) for soln in asolns]: asolns.append(nsoln) nsoln = domino_correction(np.rot90(brd, 3)) if True not in [np.array_equal(nsoln, soln) for soln in asolns]: asolns.append(nsoln) return asolns
// Cleanup cleans up the emulator state. func Cleanup(state *EmulatorState) { state.process.Kill() err := os.RemoveAll(state.dataDir) if err != nil { log.Panicf("Failed to remove emulator data dir: %v", err) } }
/** * * Holds a 3x3 matrix. * * @author cix_foo <[email protected]> * @version $Revision$ * $Id$ */ public class Matrix3f extends Matrix implements Serializable { private static final long serialVersionUID = 1L; public float m00, m01, m02, m10, m11, m12, m20, m21, m22; /** * Constructor for Matrix3f. Matrix is initialised to the identity. */ public Matrix3f() { super(); setIdentity(); } /** * Load from another matrix * @param src The source matrix * @return this */ public Matrix3f load(Matrix3f src) { return load(src, this); } /** * Copy source matrix to destination matrix * @param src The source matrix * @param dest The destination matrix, or null of a new matrix is to be created * @return The copied matrix */ public static Matrix3f load(Matrix3f src, Matrix3f dest) { if (dest == null) dest = new Matrix3f(); dest.m00 = src.m00; dest.m10 = src.m10; dest.m20 = src.m20; dest.m01 = src.m01; dest.m11 = src.m11; dest.m21 = src.m21; dest.m02 = src.m02; dest.m12 = src.m12; dest.m22 = src.m22; return dest; } /** * Load from a float buffer. The buffer stores the matrix in column major * (OpenGL) order. * * @param buf A float buffer to read from * @return this */ public Matrix load(FloatBuffer buf) { m00 = buf.get(); m01 = buf.get(); m02 = buf.get(); m10 = buf.get(); m11 = buf.get(); m12 = buf.get(); m20 = buf.get(); m21 = buf.get(); m22 = buf.get(); return this; } /** * Load from a float buffer. The buffer stores the matrix in row major * (maths) order. * * @param buf A float buffer to read from * @return this */ public Matrix loadTranspose(FloatBuffer buf) { m00 = buf.get(); m10 = buf.get(); m20 = buf.get(); m01 = buf.get(); m11 = buf.get(); m21 = buf.get(); m02 = buf.get(); m12 = buf.get(); m22 = buf.get(); return this; } /** * Store this matrix in a float buffer. The matrix is stored in column * major (openGL) order. * @param buf The buffer to store this matrix in */ public Matrix store(FloatBuffer buf) { buf.put(m00); buf.put(m01); buf.put(m02); buf.put(m10); buf.put(m11); buf.put(m12); buf.put(m20); buf.put(m21); buf.put(m22); return this; } /** * Store this matrix in a float buffer. The matrix is stored in row * major (maths) order. * @param buf The buffer to store this matrix in */ public Matrix storeTranspose(FloatBuffer buf) { buf.put(m00); buf.put(m10); buf.put(m20); buf.put(m01); buf.put(m11); buf.put(m21); buf.put(m02); buf.put(m12); buf.put(m22); return this; } /** * Add two matrices together and place the result in a third matrix. * @param left The left source matrix * @param right The right source matrix * @param dest The destination matrix, or null if a new one is to be created * @return the destination matrix */ public static Matrix3f add(Matrix3f left, Matrix3f right, Matrix3f dest) { if (dest == null) dest = new Matrix3f(); dest.m00 = left.m00 + right.m00; dest.m01 = left.m01 + right.m01; dest.m02 = left.m02 + right.m02; dest.m10 = left.m10 + right.m10; dest.m11 = left.m11 + right.m11; dest.m12 = left.m12 + right.m12; dest.m20 = left.m20 + right.m20; dest.m21 = left.m21 + right.m21; dest.m22 = left.m22 + right.m22; return dest; } /** * Subtract the right matrix from the left and place the result in a third matrix. * @param left The left source matrix * @param right The right source matrix * @param dest The destination matrix, or null if a new one is to be created * @return the destination matrix */ public static Matrix3f sub(Matrix3f left, Matrix3f right, Matrix3f dest) { if (dest == null) dest = new Matrix3f(); dest.m00 = left.m00 - right.m00; dest.m01 = left.m01 - right.m01; dest.m02 = left.m02 - right.m02; dest.m10 = left.m10 - right.m10; dest.m11 = left.m11 - right.m11; dest.m12 = left.m12 - right.m12; dest.m20 = left.m20 - right.m20; dest.m21 = left.m21 - right.m21; dest.m22 = left.m22 - right.m22; return dest; } /** * Multiply the right matrix by the left and place the result in a third matrix. * @param left The left source matrix * @param right The right source matrix * @param dest The destination matrix, or null if a new one is to be created * @return the destination matrix */ public static Matrix3f mul(Matrix3f left, Matrix3f right, Matrix3f dest) { if (dest == null) dest = new Matrix3f(); float m00 = left.m00 * right.m00 + left.m10 * right.m01 + left.m20 * right.m02; float m01 = left.m01 * right.m00 + left.m11 * right.m01 + left.m21 * right.m02; float m02 = left.m02 * right.m00 + left.m12 * right.m01 + left.m22 * right.m02; float m10 = left.m00 * right.m10 + left.m10 * right.m11 + left.m20 * right.m12; float m11 = left.m01 * right.m10 + left.m11 * right.m11 + left.m21 * right.m12; float m12 = left.m02 * right.m10 + left.m12 * right.m11 + left.m22 * right.m12; float m20 = left.m00 * right.m20 + left.m10 * right.m21 + left.m20 * right.m22; float m21 = left.m01 * right.m20 + left.m11 * right.m21 + left.m21 * right.m22; float m22 = left.m02 * right.m20 + left.m12 * right.m21 + left.m22 * right.m22; dest.m00 = m00; dest.m01 = m01; dest.m02 = m02; dest.m10 = m10; dest.m11 = m11; dest.m12 = m12; dest.m20 = m20; dest.m21 = m21; dest.m22 = m22; return dest; } /** * Transform a Vector by a matrix and return the result in a destination * vector. * @param left The left matrix * @param right The right vector * @param dest The destination vector, or null if a new one is to be created * @return the destination vector */ public static Vector3f transform(Matrix3f left, Vector3f right, Vector3f dest) { if (dest == null) dest = new Vector3f(); float x = left.m00 * right.x + left.m10 * right.y + left.m20 * right.z; float y = left.m01 * right.x + left.m11 * right.y + left.m21 * right.z; float z = left.m02 * right.x + left.m12 * right.y + left.m22 * right.z; dest.x = x; dest.y = y; dest.z = z; return dest; } /** * Transpose this matrix * @return this */ public Matrix transpose() { return transpose(this, this); } /** * Transpose this matrix and place the result in another matrix * @param dest The destination matrix or null if a new matrix is to be created * @return the transposed matrix */ public Matrix3f transpose(Matrix3f dest) { return transpose(this, dest); } /** * Transpose the source matrix and place the result into the destination matrix * @param src The source matrix to be transposed * @param dest The destination matrix or null if a new matrix is to be created * @return the transposed matrix */ public static Matrix3f transpose(Matrix3f src, Matrix3f dest) { if (dest == null) dest = new Matrix3f(); float m00 = src.m00; float m01 = src.m10; float m02 = src.m20; float m10 = src.m01; float m11 = src.m11; float m12 = src.m21; float m20 = src.m02; float m21 = src.m12; float m22 = src.m22; dest.m00 = m00; dest.m01 = m01; dest.m02 = m02; dest.m10 = m10; dest.m11 = m11; dest.m12 = m12; dest.m20 = m20; dest.m21 = m21; dest.m22 = m22; return dest; } /** * @return the determinant of the matrix */ public float determinant() { float f = m00 * (m11 * m22 - m12 * m21) + m01 * (m12 * m20 - m10 * m22) + m02 * (m10 * m21 - m11 * m20); return f; } /** * Returns a string representation of this matrix */ public String toString() { StringBuilder buf = new StringBuilder(); buf.append(m00).append(' ').append(m10).append(' ').append(m20).append(' ').append('\n'); buf.append(m01).append(' ').append(m11).append(' ').append(m21).append(' ').append('\n'); buf.append(m02).append(' ').append(m12).append(' ').append(m22).append(' ').append('\n'); return buf.toString(); } /** * Invert this matrix * @return this if successful, null otherwise */ public Matrix invert() { return invert(this, this); } /** * Invert the source matrix and put the result into the destination matrix * @param src The source matrix to be inverted * @param dest The destination matrix, or null if a new one is to be created * @return The inverted matrix if successful, null otherwise */ public static Matrix3f invert(Matrix3f src, Matrix3f dest) { float determinant = src.determinant(); if (determinant != 0) { if (dest == null) dest = new Matrix3f(); /* do it the ordinary way * * inv(A) = 1/det(A) * adj(T), where adj(T) = transpose(Conjugate Matrix) * * m00 m01 m02 * m10 m11 m12 * m20 m21 m22 */ float determinant_inv = 1f/determinant; // get the conjugate matrix float t00 = src.m11 * src.m22 - src.m12* src.m21; float t01 = - src.m10 * src.m22 + src.m12 * src.m20; float t02 = src.m10 * src.m21 - src.m11 * src.m20; float t10 = - src.m01 * src.m22 + src.m02 * src.m21; float t11 = src.m00 * src.m22 - src.m02 * src.m20; float t12 = - src.m00 * src.m21 + src.m01 * src.m20; float t20 = src.m01 * src.m12 - src.m02 * src.m11; float t21 = -src.m00 * src.m12 + src.m02 * src.m10; float t22 = src.m00 * src.m11 - src.m01 * src.m10; dest.m00 = t00*determinant_inv; dest.m11 = t11*determinant_inv; dest.m22 = t22*determinant_inv; dest.m01 = t10*determinant_inv; dest.m10 = t01*determinant_inv; dest.m20 = t02*determinant_inv; dest.m02 = t20*determinant_inv; dest.m12 = t21*determinant_inv; dest.m21 = t12*determinant_inv; return dest; } else return null; } /** * Negate this matrix * @return this */ public Matrix negate() { return negate(this); } /** * Negate this matrix and place the result in a destination matrix. * @param dest The destination matrix, or null if a new matrix is to be created * @return the negated matrix */ public Matrix3f negate(Matrix3f dest) { return negate(this, dest); } /** * Negate the source matrix and place the result in the destination matrix. * @param src The source matrix * @param dest The destination matrix, or null if a new matrix is to be created * @return the negated matrix */ public static Matrix3f negate(Matrix3f src, Matrix3f dest) { if (dest == null) dest = new Matrix3f(); dest.m00 = -src.m00; dest.m01 = -src.m02; dest.m02 = -src.m01; dest.m10 = -src.m10; dest.m11 = -src.m12; dest.m12 = -src.m11; dest.m20 = -src.m20; dest.m21 = -src.m22; dest.m22 = -src.m21; return dest; } /** * Set this matrix to be the identity matrix. * @return this */ public Matrix setIdentity() { return setIdentity(this); } /** * Set the matrix to be the identity matrix. * @param m The matrix to be set to the identity * @return m */ public static Matrix3f setIdentity(Matrix3f m) { m.m00 = 1.0f; m.m01 = 0.0f; m.m02 = 0.0f; m.m10 = 0.0f; m.m11 = 1.0f; m.m12 = 0.0f; m.m20 = 0.0f; m.m21 = 0.0f; m.m22 = 1.0f; return m; } /** * Set this matrix to 0. * @return this */ public Matrix setZero() { return setZero(this); } /** * Set the matrix matrix to 0. * @param m The matrix to be set to 0 * @return m */ public static Matrix3f setZero(Matrix3f m) { m.m00 = 0.0f; m.m01 = 0.0f; m.m02 = 0.0f; m.m10 = 0.0f; m.m11 = 0.0f; m.m12 = 0.0f; m.m20 = 0.0f; m.m21 = 0.0f; m.m22 = 0.0f; return m; } }
/** * Copyright 2023 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bolt/bolt_utils.h" #include "src/common/log_adapter.h" #include "include/errorcode.h" using mindspore::lite::RET_NOT_SUPPORT; using mindspore::lite::RET_OK; namespace mindspore::kernel::bolt { int ConvertActType(const ActType &lite_act, ActivationMode *bolt_act) { switch (lite_act) { case ActType_No: *bolt_act = ACTIVATION_NULL; break; case ActType_Relu: *bolt_act = ACTIVATION_RELU; break; case ActType_Sigmoid: *bolt_act = ACTIVATION_SIGMOID; break; case ActType_Relu6: *bolt_act = ACTIVATION_RELU6; break; case ActType_Abs: *bolt_act = ACTIVATION_ABS; break; case ActType_Softplus: *bolt_act = ACTIVATION_SOFTPLUS; break; case ActType_Tanh: *bolt_act = ACTIVATION_TANH; break; case ActType_HSwish: *bolt_act = ACTIVATION_H_SWISH; break; case ActType_HSigmoid: *bolt_act = ACTIVATION_H_SIGMOID; break; case ActType_Sign: *bolt_act = ACTIVATION_SIGN; break; case ActType_Swish: *bolt_act = ACTIVATION_SWISH; break; case ActType_Gelu: *bolt_act = ACTIVATION_GELU; break; default: MS_LOG(ERROR) << "Unsupported act type: " << lite_act << " for bolt"; return RET_NOT_SUPPORT; } return RET_OK; } } // namespace mindspore::kernel::bolt
#pragma GCC target("avx") #pragma GCC optimize(3) #pragma GCC optimize("Ofast") #pragma GCC optimize("inline") #pragma GCC optimize("-fgcse") #pragma GCC optimize("-fgcse-lm") #include<bits/stdc++.h> #define rep(i,x,y) for (int i=(x);i<=(y);i++) #define ll long long #define inf 1000000001 using namespace std; int read(){ char ch=getchar();int x=0,op=1; for (;!isdigit(ch);ch=getchar()) if (ch=='-') op=-1; for (;isdigit(ch);ch=getchar()) x=(x<<1)+(x<<3)+ch-'0'; return x*op; } void write(ll a){ if (a<0) putchar('-'),a=-a; if (a>=10) write(a/10);putchar(a%10+'0'); } #define N 100005 int n,m;ll ans[N]; int main(){ n=read(),m=read(); rep (i,1,n){ int l=read()-1,r=read(),last,j; for (j=1;j<=l;j=last+1){ last=min(l/(l/j),r/(r/j));//cerr<<last<<' '; if (r/j-l/j>0) ans[j]++,ans[last+1]--; } if (j<=r) ans[j]++,ans[r+1]--; } rep (i,1,m) ans[i]+=ans[i-1],write(ans[i]),puts(""); return 0; }
/** * For command line interface utilities that are SOAP clients and need to authenticate with * the admin service using credentials from local configuration. * <p> * This class takes -h,--help for displaying usage, and -s,--server for target server hostname. * Subclass can provide additional options. The expected use is similar to the following: * <pre> * MyUtil util = new MyUtil(); * try { * util.setupCommandLineOptons(); * CommandLine cl = util.getCommandLine(args); * if (cl != null) { * if (cl.hasOption(...)) { * util.auth(); * util.doMyThing(); * } else if (cl.hasOption(...)) { * ... * } * } * } catch (ParseException e) { * util.usage(e); * } * * </pre> * * @author kchen * */ public abstract class SoapCLI { // common options public static final String O_AUTHTOKEN = "y"; public static final String O_AUTHTOKENFILE = "Y"; public static final String O_H = "h"; public static final String O_HIDDEN = "hidden"; public static final String O_S = "s"; public static final Option OPT_AUTHTOKEN = new Option(O_AUTHTOKEN, "authtoken", true, "use auth token string (has to be in JSON format) from command line"); public static final Option OPT_AUTHTOKENFILE = new Option(O_AUTHTOKENFILE, "authtokenfile", true, "read auth token (has to be in JSON format) from a file"); private String mUser; private String mPassword; private String mHost; private int mPort; private boolean mAuth; private Options mOptions; private Options mHiddenOptions; private boolean mDisableTargetServerOption; private SoapTransport mTrans = null; private String mServerUrl; protected SoapCLI() throws ServiceException { this(false); } protected SoapCLI(boolean disableTargetServerOption) throws ServiceException { // get admin username from local config mUser = LC.zimbra_ldap_user.value(); // get password from localconfig mPassword = LC.zimbra_ldap_password.value(); // host can be specified mHost = "localhost"; // get admin port number from provisioning com.zimbra.cs.account.Config conf = null; try { conf = Provisioning.getInstance().getConfig(); } catch (ServiceException e) { throw ServiceException.FAILURE("Unable to connect to LDAP directory", e); } mPort = conf.getIntAttr(Provisioning.A_zimbraAdminPort, 0); if (mPort == 0) throw ServiceException.FAILURE("Unable to get admin port number from provisioning", null); mOptions = new Options(); mHiddenOptions = new Options(); mDisableTargetServerOption = disableTargetServerOption; } protected void setServer(String hostname) { mHost = hostname; } /** * Parses the command line arguments. If -h,--help is specified, displays usage and returns null. * @param args the command line arguments * @return * @throws ParseException */ protected CommandLine getCommandLine(String[] args) throws ParseException { CommandLineParser clParser = new GnuParser(); CommandLine cl = null; Options opts = getAllOptions(); try { cl = clParser.parse(opts, args); } catch (ParseException e) { if (helpOptionSpecified(args)) { usage(); return null; } else throw e; } if (cl.hasOption(O_H)) { boolean showHiddenOptions = cl.hasOption(O_HIDDEN); usage(null, showHiddenOptions); return null; } if (!mDisableTargetServerOption && cl.hasOption(O_S)) setServer(cl.getOptionValue(O_S)); return cl; } /** * Returns an <tt>Options</tt> object that combines the standard options * and the hidden ones. */ @SuppressWarnings("unchecked") private Options getAllOptions() { Options newOptions = new Options(); Set<OptionGroup> groups = new HashSet<OptionGroup>(); Options[] optionses = new Options[] { mOptions, mHiddenOptions }; for (Options options : optionses) { for (Option opt : (Collection<Option>) options.getOptions()) { OptionGroup group = options.getOptionGroup(opt); if (group != null) { groups.add(group); } else { newOptions.addOption(opt); } } } for (OptionGroup group : groups) { newOptions.addOptionGroup(group); } return newOptions; } private boolean helpOptionSpecified(String[] args) { return args != null && args.length == 1 && ("-h".equals(args[0]) || "--help".equals(args[0])); } /** * Authenticates using the username and password from the local config. * @throws IOException * @throws com.zimbra.common.soap.SoapFaultException * @throws ServiceException */ protected LmcSession auth() throws SoapFaultException, IOException, ServiceException { URL url = new URL("https", mHost, mPort, AdminConstants.ADMIN_SERVICE_URI); mServerUrl = url.toExternalForm(); SoapTransport trans = getTransport(); mAuth = false; Element authReq = new Element.XMLElement(AdminConstants.AUTH_REQUEST); authReq.addAttribute(AdminConstants.E_NAME, mUser, Element.Disposition.CONTENT); authReq.addAttribute(AdminConstants.E_PASSWORD, mPassword, Element.Disposition.CONTENT); try { Element authResp = trans.invokeWithoutSession(authReq); String authToken = authResp.getAttribute(AdminConstants.E_AUTH_TOKEN); ZAuthToken zat = new ZAuthToken(null, authToken, null); trans.setAuthToken(authToken); mAuth = true; return new LmcSession(zat, null); } catch (UnknownHostException e) { // UnknownHostException's error message is not clear; rethrow with a more descriptive message throw new IOException("Unknown host: " + mHost); } } /** * Authenticates using the provided ZAuthToken * @throws IOException * @throws com.zimbra.common.soap.SoapFaultException * @throws ServiceException */ protected LmcSession auth(ZAuthToken zAuthToken) throws SoapFaultException, IOException, ServiceException { if (zAuthToken == null) return auth(); URL url = new URL("https", mHost, mPort, AdminConstants.ADMIN_SERVICE_URI); mServerUrl = url.toExternalForm(); SoapTransport trans = getTransport(); mAuth = false; Element authReq = new Element.XMLElement(AdminConstants.AUTH_REQUEST); zAuthToken.encodeAuthReq(authReq, true); try { Element authResp = trans.invokeWithoutSession(authReq); ZAuthToken zat = new ZAuthToken(authResp.getElement(AdminConstants.E_AUTH_TOKEN), true); trans.setAuthToken(zat); mAuth = true; return new LmcSession(zat, null); } catch (UnknownHostException e) { // UnknownHostException's error message is not clear; rethrow with a more descriptive message throw new IOException("Unknown host: " + mHost); } } /** * Sets up expected command line options. This class adds -h for help and -s for server. * */ protected void setupCommandLineOptions() { if (!mDisableTargetServerOption) { Option s = new Option(O_S, "server", true, "Mail server hostname. Default is localhost."); mOptions.addOption(s); } mOptions.addOption(O_H, "help", false, "Displays this help message."); mHiddenOptions.addOption(null, O_HIDDEN, false, "Include hidden options in help output"); } /** * Displays usage to stdout. * */ protected void usage() { usage(null); } /** * Displays usage to stdout. * @param e parse error */ protected void usage(ParseException e) { usage(e, false); } protected void usage(ParseException e, boolean showHiddenOptions) { if (e != null) { System.err.println("Error parsing command line arguments: " + e.getMessage()); } Options opts = showHiddenOptions ? getAllOptions() : mOptions; PrintWriter pw = new PrintWriter(System.err, true); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(pw, formatter.getWidth(), getCommandUsage(), null, opts, formatter.getLeftPadding(), formatter.getDescPadding(), null); pw.flush(); String trailer = getTrailer(); if (trailer != null && trailer.length() > 0) { System.err.println(); System.err.println(trailer); } } /** * Returns the command usage. Since most CLI utilities are wrapped into shell script, the name of * the script should be returned. * @return */ protected abstract String getCommandUsage(); /** * Returns the trailer in the usage message. Subclass can add additional notes on the usage. * @return */ protected String getTrailer() { return ""; } /** * Returns whether this command line SOAP client has been authenticated. * @return */ protected boolean isAuthenticated() { return mAuth; } /** * Returns the username. * @return */ protected String getUser() { return mUser; } /** * Returns the target server hostname. * @return */ protected String getServer() { return mHost; } /** * Returns the target server admin port number. * @return */ protected int getPort() { return mPort; } /** * Gets the SOAP transport. * @return null if the SOAP client has not been authenticated. */ protected SoapTransport getTransport() { if (mTrans == null) initTransport(); return mTrans; } private void initTransport() { SoapHttpTransport trans = new SoapHttpTransport(mServerUrl); trans.setRetryCount(1); mTrans = trans; } /** * Set the SOAP transport read timeout * @return null if the SOAP client has not been authenticated. */ public void setTransportTimeout(int newTimeout) { getTransport().setTimeout(newTimeout); } protected String getServerUrl() { return mServerUrl; } /** * Gets the options that has been set up so far. * @return */ protected Options getOptions() { return mOptions; } protected Options getHiddenOptions() { return mHiddenOptions; } // helper for options that specify date/time private static final String[] DATETIME_FORMATS = { "yyyy/MM/dd HH:mm:ss", "yyyy/MM/dd HH:mm:ss SSS", "yyyy/MM/dd HH:mm:ss.SSS", "yyyy/MM/dd-HH:mm:ss-SSS", "yyyy/MM/dd-HH:mm:ss", "yyyyMMdd.HHmmss.SSS", "yyyyMMdd.HHmmss", "yyyyMMddHHmmssSSS", "yyyyMMddHHmmss" }; public static final String CANONICAL_DATETIME_FORMAT = DATETIME_FORMATS[0]; public static Date parseDatetime(String str) { for (String formatStr: DATETIME_FORMATS) { SimpleDateFormat fmt = new SimpleDateFormat(formatStr); fmt.setLenient(false); ParsePosition pp = new ParsePosition(0); Date d = fmt.parse(str, pp); if (d != null && pp.getIndex() == str.length()) return d; } return null; } public static String getAllowedDatetimeFormatsHelp() { StringBuilder sb = new StringBuilder(); sb.append("Specify date/time in one of these formats:\n\n"); Date d = new Date(); for (String formatStr: DATETIME_FORMATS) { SimpleDateFormat fmt = new SimpleDateFormat(formatStr); String s = fmt.format(d); sb.append(" ").append(s).append("\n"); } sb.append("\n"); sb.append( "Specify year, month, date, hour, minute, second, and optionally millisecond.\n"); sb.append( "Month/date/hour/minute/second are 0-padded to 2 digits, millisecond to 3 digits.\n"); sb.append( "Hour must be specified in 24-hour format, and time is in local time zone.\n"); return sb.toString(); } public static ZAuthToken getZAuthToken(CommandLine cl) throws ServiceException, ParseException, IOException { if (cl.hasOption(SoapCLI.O_AUTHTOKEN) && cl.hasOption(SoapCLI.O_AUTHTOKENFILE)) { String msg = String.format("cannot specify both %s and %s options", SoapCLI.O_AUTHTOKEN, SoapCLI.O_AUTHTOKENFILE); throw new ParseException(msg); } if (cl.hasOption(SoapCLI.O_AUTHTOKEN)) { return ZAuthToken.fromJSONString(cl.getOptionValue(SoapCLI.O_AUTHTOKEN)); } if (cl.hasOption(SoapCLI.O_AUTHTOKENFILE)) { String authToken = StringUtil.readSingleLineFromFile(cl.getOptionValue(SoapCLI.O_AUTHTOKENFILE)); return ZAuthToken.fromJSONString(authToken); } return null; } }
import api from '@/api'; const fetchProducts = async (query: any): Promise <any> => { return api({ url: "searchProducts", method: "post", data: query, cache: true }); } export const ProductService = { fetchProducts }
def names(self): return [self.VALUE_ESTIMATOR, self.VALUE_DATASET]
A New Password-and Position-Based Authenticated Key Exchange Password-based authenticated key exchange is a popular method for secure authentication and key exchange. With the wide application of unmanned aerial vehicles, position information has also become an important factor in authentication. In this paper, we present a new key exchange protocol, which firstly realizes dual authentication for both password and position, and we propose two applicable scenarios for the PPAKE mechanism: one is unmanned aerial vehicle authentication, and the other one is authentication in the military base. By adding position authentication, the reliability of authentication has improved, and the difficulty of adversarial attacks also increases. Any arbitrary adversary who can listen, tamper, and sendmessages can only perform an online attack for password guessing at a specified position. Finally, we provide security proofs under the defined model. Introduction Key exchange protocol is designed to allow two or more parties to negotiate and share session keys over insecure channels to establish an encrypted communication. To achieve secure communication in open and insecure communication channels, Diffie and Hellman in 1976 introduced the concept of public key cryptography and the famous Diffie-Hellman key exchange protocol which establishes a shared session key between two communicating parties. However, the Diffie-Hellman protocol cannot resistant manin-the-middle attacks or provide dual authentication. A large number of authentication key exchange protocols have been proposed subsequently , as well as corresponding applications . According to different application scenarios and assumptions, the authentication key exchange protocols are broadly divided into the following two categories: one assumes that each interacting party has a high-entropy private key which can be used to generate a high-entropy session key; the other one assumes that each interacting party only shares a weak password and generates a high-entropy session key through interaction. Bellovin and Merritt in 1992 first proposed the password-based authentication key exchange (PAKE) protocol, called the BM scheme. Subsequently, there were many improvements based on the BM scheme, but none of these had a security model. Until EUROCRYPT 2000, Boyko et al. presented the first security model of PAKE. Under the random oracle model, the SPAKE scheme in is an efficient provable secure scheme. Under the standard model, Goldreich and Lindell proposed a solution based on one-way functions and zero knowledge in EUROCRYPT 2001, but neither it nor the subsequent theoretical constructs based on it are practical. Katz et al. proposed the first practical password-based solution for provable security with the help of public reference strings in EUROCRYPT 2001, called the KOY scheme. Gennaro and Lindell extended the KOY scheme to a general construction based on smooth projection hashing systems and the choice of secret security encryption schemes in EUROCRYPT 2003 . Xue et al. found that the scheme in , requiring six group elements and a random string, is more efficient than other current schemes in the BPR model while under the standard model. And they presented an improved PAKE protocol by replacing the CCA-secured encryption scheme in with a CCA-secured key encapsulation mechanism (KEM). is protocol finally requires only 5 group elements and 2 short random strings. And the length of a random string is 1/3 log p bit (the size is equal to 1/6 of the length of a group element on an elliptic curve). In previous proposals, the form of password-based key exchange needs to face the challenge of generating highentropy session keys from low-entropy keys. e current password-based key exchange protocol is mainly used in the scenario of the server-client, and the mainstream technology adopts a CCA2 secure encryption scheme and a smooth projective hash function. In many real applications, such as drone control stations and military base communications, the position information is also an important type of authentication information. e first position-based authentication protocol was presented by Chandran et al. , where a location can be verified in the 3D space using 4 verifiers. e protocol has many application scenarios, e.g., 4 aircrafts can verify the controller's position and communication between military bases. Followed by this work, a lot of schemes are proposed, such as blockchain-based positioning scheme , tracking cryptographic keys and encrypted data using position verification , and position-based encryption . Motivation Position and password are all important information in wireless communication. Our basic idea is to combine the result of PAKE in with the secure position-based protocol in to obtain a secure key exchange protocol for dual authentication of position and password (called PPAKE). Dual authentication based on the password and position can improve the reliability of authentication and increase the difficulty of adversarial attacks. To propose a secure PPAKE protocol, we solved the following issues: (1) How do the four verifiers determine the position information of the participant and verify it simultaneously? (2) How does the participant verify the password information of the four verifiers and generate a highentropy session key at the same time? (3) How do four verifiers generate the same high-entropy session key while verifying the position and password information of the participant? By applying our PPAKE protocol, four verifiers use a common reference string to authenticate the participant and initiate encrypted communication by sharing the password and position information. e PPAKE protocol includes ElGamal ciphertext, universal projective hash function, key encapsulation mechanism, 4-wise independent hash function, and pseudo-random generator. e proposed PPAKE protocol can realize the synchronization authentication of password and position information. e prior art authenticates participants unilaterally, but the present technique uses the password and position information to authenticate the joining party and negotiate a common session key to prepare for the next step of private communication. Specifically, the adversary cannot pretend to be the joining party to verify from a location that is inconsistent with its declared location. Any location and password forged by the adversary cannot be authenticated. Our PPAKE can be widely applied in many scenarios, for example, (1) the communication base station that needs to verify the position and password of the user and authorizes networking and (2) real-life logistics distribution scenarios require password and location information verification to negotiate important content. Other applications, such as unmanned aerial vehicle authentication and military base authentication, will be described in the later section. From what has been discussed above, it is very meaningful to present a key exchange protocol for dual authentication of password and position. Applications of PPAKE Next, we propose two applicable scenarios for the PPAKE mechanism: one is unmanned aerial vehicle authentication, and the other one is authentication in military base. Unmanned Aerial Vehicle Authentication. e unmanned aerial vehicle (UAV) is a kind of unmanned aircraft that uses wireless remote control or flight planning. Due to a series of advantages such as low cost, easy operation, fast image acquisition speed, high ground resolution, not restricted by a fixed environment, and no need to worry about accidental crashes that may cause casualties on board, UAVs have been widely used in map surveying and mapping update, geological survey, natural disaster monitoring, agricultural remote sensing monitoring, and other fields. Control technology of the UAV refers to the technology of remote control, telemetry, tracking, positioning, and information transmission to the UAV. e corresponding technical facilities consist of a data chain and ground control station. e data chain realizes data transmission and delivery, tracking, and positioning between the ground control station and the UAV. e ground control station is used to realize such functions as mission planning, link control, flight control, load control, flight track display, and parameter and image display, as well as recording and distribution. To ensure the authenticity of the information content and its source transmitted between the subjects, dual authentication and key exchange should be carried out before information exchange between the UAV and the ground control station. Our PPAKE adopts dual authentication based on the position and password to complete the identity authentication and key exchange between the two parties. When a UAV holding a legal identity and password granted by the ground control station sends a request to the ground control station for authentication and information transmission, it should also submit the encrypted identity, password, and position information. en, the ground control station authenticates the information separately. When the ground control station confirms the information, if it matches the prestored information, the session key is generated, and the message is transmitted; otherwise, the request for authentication and message transmission is aborted. Military Base. In recent years, several local wars in the world have shown the wide application of information technology in the military field, which has brought about comprehensive and profound changes to the war pattern. With the increasing use of modern communication and computer network technology, the situation of military information carriers has undergone great changes. e hidden danger of information security also spreads from simple document management in the past to information systems, equipment, places, and various links in information operation. Modern communication technology in the army can be divided into three categories, namely, wired communication, wireless communication, and network communication, which all exist in different ways and have different degrees of security risk. In the process of wireless communication, to remote access system resources or data transmission, the user must obtain appropriate permissions. Dual identity authentication gives a simple and effective security solution to the problem. Specifically, the PPAKE adopts the form of dual authentication based on the password and position to authenticate a wireless user who has registered with the base and obtained his or her identity ID and password and generate a session key. For a user who holds an ID and password, when sending an access and authentication request to the base station, the user needs to submit the encrypted ID, password, and location information. en, the base station will authenticate each message separately. Security Model In this model, we assume that (1) the clocks of all verifiers are synchronized. We require that the pace between verifiers and the participants be the same. (2) e protocol has a fixed set of protocol users. (3) Messages travel at a speed equal to that of radio waves. (4) Each principal can execute the protocol multiple times with different partners. As described above, the PPAKE protocol concludes two phases, namely, the initialization phase and the execution phase. Initialization phase: in this phase, public parameters are established, each user's position is given, and the unique identifiers of all the verifiers are given to all protocol users. Each participant shares a password with all verifiers. Each password is uniformly chosen from the set 1, . . . , D n for some integer D n depending on n. Execution phase: in this phase, we separately define how the verifiers, participants, and adversaries execute in these following two security definitions according to . Position-based authentication : in the execution phase, any verifier and adversary can send all the following three types of messages: broadcast messages, directional messages, and private multicast messages. Any participant can send broadcast messages and directional messages. e detailed description of all types of messages is as follows: (1) Broadcast messages: a broadcast message travels with equal speed in all directions, in concentric hyperspheres centered at the sender's position P, which arrives at a position P′ after time t (t is the time the radio waves travel from P to P′) (2) Directional messages: a directional message travels in a region of concentric hyperspheres centered at the sender's position P and arrives at position P′ after time t (t is the time the radio waves travel from P to P′) (3) Private multicast messages: a verifier (or an adversary) talks to other verifiers (or other adversaries) via a private channel A PPAKE protocol in the 3-dimensional space is described as a set of verifiers Ver � V 1 , V 2 , . . . , V n at positions pos 1 , pos 2 , . . . , pos n , respectively, which take as input a claimed position P′ of a participant at position P and jointly return "accept" after interacting with the honest participant (if P′ � P) and in the absence of any adversarial parties. Password-based authenticated key exchange : in the execution phase, the adversary is given oracle access to these different instances. All the oracles are described as follows: (1) Send: this oracle sends message M to instance Π i U (denote instance i of user U as Π i U ) and outputs the message sent by the instance of Π i U to the adversary (2) Execute: if Π i U and Π j U ' have not yet been used, this oracle executes the protocol between these instances and outputs the resulting transcript to the adversary (3) Reveal: this oracle outputs the session key sk i U to the adversary Finally, adversary A makes a single query Test to a fresh instance Π i U and outputs a bit b ′ . In the Test oracle, a random bit b is chosen; if b � 1, the adversary is given sk i U , and if b � 0, the adversary is given a session key chosen uniformly from the appropriate space. A succeeds if either b ′ � b or at the end of the experiment, there is an instance Π i U that accepts but is not semipartnered with any other instance (semipartnering is defined as follows: instances Π i U and Π attacks, A succeeds with advantage Adv A,Π � def 2 · Pr − 1 ≤ Q(n)/D n + ε(n), where Succ is the event that the adversary succeeds and ε is negligible in the defined security parameter. We claim that if protocol Π satisfies both position-based authentication and password-based authenticated key exchange, then protocol Π is a secure PPAKE protocol. Description of PPAKE In the PPAKE protocol, we assume that (1) all participants of the system have a synchronized clock, and all users have access to the public reference string CRS; (2) all verifiers share the private random number string VRS; (3) the calculation time is negligible relative to the transmission time of the information; and (4) computation for the Diffie-Hellman problem on a group with prime order p is difficult. e main process of our PPAKE protocol is described in Figure 1, and the details are as follows. Initialize Phase. In this phase, all users share a common reference string CRS � G, p, g, h, H, H cr , PRG, d, e and maintain a common clock. In CRS, G is a cyclic group of order p, and generally, the length of p is greater than 160 bits. g and h are random elements on group G. H is a 4-wise independent hash function. H cr is a collision-resistant hash function. PRG is a pseudo-random generator. d and e are elements on group G; specifically, d � g a 1 h b 1 and e � g a 2 h b 2 are the public keys for the key encapsulation mechanism (KEM), where a 1 , a 2 , b 1 , and b 2 are random numbers generated when the system is established. Assume that all verifiers V i (i � 1, ..., 4) share a random number string VRS � (K 1 , K 2 , K 3 , K 4 , r) through a secure communication channel. Generally, the length of all K 1 , K 2 , K 3 , and K 4 is greater than 80 bits. e length of r is greater than 160 bits, and t 1 , . . . , t 4 , respectively, represent the time in which the radio waves were transmitted from the verifiers V 1 , . . . , V 4 to the position of the participant (write as P for short). Execution Phase. Now, we introduce the execution phase, which is described from phase 1 to phase 4. Phase 1. In this phase, all verifiers send authentication information to P, in which the content of the message sent by the prime verifier is slightly different from those sent by other nonprime verifiers. Figure 2 illustrates the calculation process of the prime verifier V 1 . V 1 sends the calculated results, that is, encrypted password and position authentication information, to P. Figure 3 illustrates the calculation process of nonprime verifiers V i (i � 2, 3, 4). Nonprime verifiers calculate and send the position authentication information, which reaches P at the same time. e computation details are described as follows: (1) V 1 selects r from VRS and calculates A � g r and c ′ � h r g π , where π represents the password previously shared between all verifiers and P. en, V 1 broadcasts (K 1 , A ‖ c ′ ) at time T − t 1 , as shown in Figures 1 and 2. (2) V 2 randomly selects X 1 , calculates K 2 ′ � PRG(X 1 , K 1 )⊕K 2 , and broadcasts (X 1 , K 2 ′ ) at time T − t 2 , as shown in Figures 1 and 3. (3) V 3 randomly selects X 2 , calculates K 3 ′ � PRG(X 2 , K 2 )⊕K 3 , and broadcasts (X 2 , K 3 ′ ) at time T − t 3 , as shown in Figures 1 and 3. (4) V 4 randomly selects X 3 , calculates K 4 ′ � PRG(X 3 , K 3 )⊕K 4 , and broadcasts (X 3 K 4 ′ ) at time T − t 4 , as shown in Figures 1 and 3. Figure 4, phase 2 can be divided into three steps, as detailed from Figures 5 to 7. e computation details are described as follows. Figure 5 illustrates the process of calculating the password-based authentication information. When calculating the password information, P randomly calculates μ, the public key of the hash proof function, and the hash value σ, according to the password-encrypted ElGamal secret message. σ is divided into three parts, which can be written as τ p � � � � � sk p � � � � � r p ←σ, where τ p is used to verify the identity of the verifier, sk p is used to generate the session key, and r p is used to encapsulate the key and dissimulate the password and location information. e specific calculation steps are as follows: P randomly selects λ 1 and λ 2 from Zq (the value of q is related to the safety parameters), computes μ � g λ 1 h λ 2 , c � c ′ g − π , and σ � A λ 1 c λ 2 , where σ is divided into three equal pieces by bit value τ p � � � � � sk p � � � � � r p ←σ, then computes c kem � (g r P , h r P ) and k kem � H(d t e r P ), where t � H cr (g r P , h r P , A‖c ′ , V 1 , P); and finally, it outputs (μ � � � �c kem ) and k kem as (2 − P − 1). Figure 6 illustrates the process of calculating positionbased authentication information K 4 by the information received in phase 1. P computes the position information K i+1 � PRG(X i , K i )⊕K ' i+1 (i � 1, 2, 3) and outputs K 4 as (2 − P − 2). Figure 7 illustrates how to compute password and position authentication information (2 − P − 3) from (2 − P − 1) and (2 − P − 2). P computes δ � k kem (π � � � �K 4 ) and broadcasts 1, 2, 3, and 4). Phase 3. In this phase, all verifiers V i (i � 1, 2, 3, and 4) verify P's password and position authentication information, calculate the session key, and reply the authentication information to P. As shown in Figure 8, all verifiers receive the information from P, calculate the hash value σ, verify the password, and check the consistency of the receiving time and location. After passing all the authentication checks, all verifiers send the first block of σ back to P. e detailed computation process is as follows: when V i (i � 1, 2, 3, and 4) receives (μ � � � �c kem ‖δ), it calculates the hash value σ � μ r and sets τ V ‖sk v ‖r V ←σ. en, V i verifies c kem , δ, and the receiving time. Only if c kem is equal to (g r V , h r V ), δ is equal to H(d t e r V )⊕(π ‖ K 4 ), where t � H cr (g r V , h r V , A � � � �c ′ , V 1 , P), and the receiving time is equal to T + t i , then V i sends τ V as the message of (3−V i ) to P; otherwise, V i aborts the progress of phase 3. At the end of this phase, V i sets the negotiated session key as sk v . Phase 4. As shown in Figure 9, P determines whether the authentication message τ V sent by V i is equal to τ p . If they are equal, P sets sk p as the communication key with verifiers; otherwise, P aborts the progress of phase 4. Security Analysis of PPAKE Our PPAKE protocol dual authenticates the participant by password and position and negotiates a session key for the next step of private communication. In particular, the prime verifier V 1 is responsible for both password-based authenticated key exchange as well as position-based authentication with participant P, while V 2 , V 3 , and V 4 are mainly responsible for position-based authentication. Our protocol is We redescribe our PPAKE as follows, in which the position-based authentication part is omitted: in phase 1, V 1 sends (A‖c ′ ) to P; in phase 2, P broadcasts (μ � � � �c kem ); in phase 3, every verifier computes the negotiated key as sk v and sends τ V to P; and finally, in phase 4, P checks the value of τ V and computes similarly as in . PAKE protocol assumes that, in phase 3, only V 1 will compute the negotiated key. In PPAKE, we assume that V 2 , V 3 , and V 4 can get the value of r from VRS, so they have the ability to compute the negotiated key. At the end of the protocol, all verifiers and the participant share the same session key. People without r cannot compute the shared key. erefore, the security proof of our PPAKE can also follow the security proof in . Xue et al. proved that their PAKE is secure in the BPR model; the security proof sketch is as follows. e proof proceeds via a sequence of experiments. Let "G i " denote the sequence of experiments and denote the advantage of adversary A in "G i " as A dv A,Gi(n) � 2Pr − 1. Let G 0 be the experiment of BPR challenge. e proof is separated into two phases: the first phase (from G 1 to G 5 ) bounds out the advantage of execute queries, and the second phase (from G 6 to G 10 ) bounds out the advantage of send queries. e detailed descriptions of G 1 to G 10 are the same as eorem 2 in . Finally, summing up all the gap advantages, we finally have In the following, we analyze the security of positionbased authentication. e completeness follows from the fact that verifiers can compute K 4 from the stored Xi values, and the participant can also compute K 4 since all the information required is gathered at time T at P. Now, we prove that our PPAKE protocol is secure on position-based authentication. We redescribe the position-based authentication part in our PPAKE as follows: in phase 1, V 1 broadcasts (A � � � �c ′ , K 1 ) at time T − t 1 , and V i (i � 2, 3, 4) broadcasts (X i−1 , K i ′ ) at time T − t i ; in phase 2, P calculates K i+1 � PRG(X i , K i )⊕K ' i+1 (i � 1, 2, 3) and broadcasts δ, where δ � k kem ⊕(π � � � �K 4 ), k kem � H(d t e r P ), and t � H cr (g r P , h r P , A‖c ′ , V 1 , P); and in phase 3, all verifiers verify δ and the receiving time. If the verification passed, Select r from VRS Select g and h from CRS Calculate A = g r and c′ = h r g π Figure 2: Prime verifier's algorithm for generating the sent message in phase 1. Randomly select λ 1 , λ 2 from ℤq Generate password authentication message (μ || c kem ) (2-P-1) Calculate: Security and Communication Networks then V i 's authentication on P is successful. ere are some differences between the secure positioning protocol proposed by Chandran et al. and our PPAKE, that is, in phase 2, P broadcasts K 4 , instead of δ. In our protocol, to cooperate with password-based authenticated key exchange, we enhanced the protocol in by encrypting K 4 . We compute δ � k kem ⊕(π � � � �K 4 ), where k kem � H(d t e r P ), t � H cr (g r P , h r P , A‖c ′ , V 1 , P). erefore, our PPAKE protocol at least satisfies the security of position-based authentication in . For more details of this proof, please refer to Section 7 in . From the above analysis, we can claim that our proposed protocol is a secure authenticated key exchange, which provides both password-and position-based authentication. Conclusion In summary, the PPAKE protocol dual authenticates the participant through the password and position and negotiates a common session key to prepare for the next step of private communication. e proposed protocol can resist the attack of the active adversary under the standard model. Specifically, an arbitrary adversary who can listen, tamper, and send messages can only perform an online attack for password guessing at a specified position. e impersonation of any of the position and password by the adversary cannot be authenticated. Data Availability is is a pure theoretic research paper; therefore, it does not include any experimental data. Conflicts of Interest e authors declare that they have no conflicts of interest. Security and Communication Networks
def create_age_weights(self, place, params): param_ind = place.place_type.value - 1 min_age = [params["age_group1_min_age"][param_ind], params["age_group2_min_age"][param_ind], params["age_group3_min_age"][param_ind]] max_age = [params["age_group1_max_age"][param_ind], params["age_group2_max_age"][param_ind], params["age_group3_max_age"][param_ind]] prop = [params["age_group1_prop"][param_ind], params["age_group2_prop"][param_ind], params["age_group3_prop"][param_ind]] person_list = [] weights = [] for person in place.cell.persons: if (place.place_type in person.place_types): continue if not Parameters.instance().use_ages: person_list.append(person) weights.append(prop[2]) else: for i in range(3): if (person.age > (min_age[i]-1) and person.age < max_age[i]): person_list.append(person) weights.append(prop[i]) break return person_list, weights
In a down month for video games, Microsoft’s Xbox 360 continued to buck the trend in May, according to new data out this afternoon. Microsoft sold 270,000 Xbox 360s in the U.S. in May, up 39 percent over the same month last year, according to data compiled by the NPD Group market research firm. The Microsoft console has now held the No. 1 slot, ahead of Sony’s PlayStation 3 and Nintendo’s Wii, for 11 out of the past 12 months in the country. NPD noted that the Xbox 360 has “realized nearly a year and a half of month-over-month unit sales increases.” Microsoft’s Kinect motion sensor has breathed new life into the Xbox 360, resulting in higher sales at a point in the console life cycle that typically brings declines. Even with Microsoft’s progress, the Wii remains the overall leader in cumulative console sales this hardware generation in the U.S. and worldwide, thanks to its huge sales volumes in the initial years following its release. Nintendo last week unveiled plans for a new console, the Wii U, featuring a motion-sensitive controller with an embedded touch screen. It’s expected to be released next year. Overall, total sales of hardware, games and accessories fell 14 percent in May to $743 million in U.S. physical retail stores. NPD blamed a light schedule for new game releases. NPD didn’t release May sales figures for the PS3 or Wii; the research firm leaves it to each company to disclose its data.
use pin_project::pin_project; #[pin_project] struct Struct1 {} //~ ERROR may not be used on structs with zero fields #[pin_project] struct Struct2(); //~ ERROR may not be used on structs with zero fields #[pin_project] struct Struct3; //~ ERROR may not be used on structs with units #[pin_project] enum Enum1 {} //~ ERROR may not be used on enums without variants #[pin_project] enum Enum2 { A = 2, //~ ERROR may not be used on enums with discriminants } #[pin_project] enum Enum3 { A, //~ ERROR may not be used on enums that have no field B, } #[pin_project] union Union { //~^ ERROR may only be used on structs or enums x: u8, } fn main() {}
Nonintrusive Energy Meter for Nontechnical Losses Identification We present in this paper a method and apparatus for nonintrusive measurement of active energy in low-voltage ac installations. In the proposed method, the active power is calculated from the voltage and current waveforms, and the phase shift between them. No voltage amplitude measurement is required since the nominal voltage of the ac installation is considered as the actual voltage. This approach bypasses the main disadvantage of capacitive ac voltage probing: low accuracy in amplitude measurements due to variations on the sensor coupling capacitance. The energy meter that implements the technique is composed of a commercial nonintrusive current sensor (clamp-on current transformer) and a contactless capacitive voltage sensor—designed to measure the voltage phase and waveform. The voltage sensor includes a shield to isolate it from external electric fields, making the energy meter suitable for applications in polyphase systems. The developed energy meter does not require on-site calibration, galvanic contact to the phase conductors, neither electrical circuit opening, allowing for quick, safe, and easy installation on overhead service drop line. Due to these characteristics, it can be used by electricity distribution companies in the preinspection of consumer units suspected of fraud. Experimental results proved that the developed energy meter is insensitive to the characteristics of cables (width and insulation), external electrical fields, as well as to the voltage sensor capacitance variations. The error of the active energy measurement under real condition in a two-phase installation was 1%.
def iter_volume_data(self) -> dict: for mod in self.iter_modalities(): if mod.volumes_data: yield mod.volumes_data
/** * Generates all of the sub-objects and fields for a given class. * * @param rootNode the JSON class node in the JSON syntax tree. * @param rootName the name of the root class to generate. * @param codeModelPackage the code model package to generate the class in. * @throws Exception if an error occurs. */ private int generate(final JsonObject rootNode, final String rootName, final JPackage codeModelPackage) throws Exception { parseObject(rootNode, rootName, codeModelPackage); for (JDefinedClass clazz : definedClasses.values()) { SwingUtilities.invokeLater(() -> { if (resultTextField != null) { resultTextField.setText(R.get("generating", clazz.name())); } }); List<GeneratedField> fields = generateFields(clazz, fieldMap.get(clazz), codeModelPackage.owner()); } return definedClasses.size(); }
// GetTi50TestBoard gets a DevBoard for testing in either lab or workstation modes. // TODO(b/197998755): Move into a precondition. func GetTi50TestBoard(ctx context.Context, dut *dut.DUT, rpcHint *testing.RPCHint, mode, spiflash string, bufLen int, readTimeout time.Duration) (ti50.DevBoard, *rpc.Client, error) { mode = ParseTi50TestMode(ctx, mode) testing.ContextLogf(ctx, "Using %q mode for Ti50Test", mode) spiflash = ParseTi50TestSpiflash(ctx, spiflash) testing.ContextLogf(ctx, "Using spiflash at %q for Ti50Test", spiflash) var targets []string var err error if mode == WorkstationMode { targets, err = ti50.ListConnectedUltraDebugTargets(ctx) } else if mode == LabMode { targets, err = ListRemoteUltraDebugTargets(ctx, dut) } if len(targets) == 0 { return nil, nil, errors.Wrap(err, "could not find any UD targets") } testing.ContextLogf(ctx, "UD Targets: %v, choosing first one found", targets) tty := string(targets[0]) var board ti50.DevBoard var rpcClient *rpc.Client if mode == WorkstationMode { board = ti50.NewConnectedAndreiboard(tty, bufLen, spiflash, readTimeout) } else if mode == LabMode { rpcClient, err = rpc.Dial(ctx, dut, rpcHint, "cros") if err != nil { return nil, nil, errors.Wrap(err, "dialing rpc") } board = NewRemoteAndreiboard(dut, rpcClient.Conn, tty, bufLen, spiflash, readTimeout) } return board, rpcClient, nil }
package dealer import ( "bytes" "crypto/sha256" "encoding/hex" "fmt" v1 "k8s.io/api/core/v1" "github.com/nano-gpu/nano-gpu-scheduler/pkg/utils" ) const ( NotNeedGPU = -1 ) // GPUResource ─┬─> GPUs // └─> Demand ─> Plan type Plan struct { Demand Demand GPUIndexes []int Score int } func NewPlanFromPod(pod *v1.Pod) (*Plan, error) { if !utils.IsAssumed(pod) { return nil, fmt.Errorf("pod %s/%s is not assumed", pod.Namespace, pod.Name) } plan := &Plan{ Demand: make(Demand, len(pod.Spec.Containers)), GPUIndexes: make([]int, len(pod.Spec.Containers)), Score: 0, } for i, c := range pod.Spec.Containers { plan.Demand[i] = GPUResource{ Percent: utils.GetGPUPercentFromContainer(&c), } idx, err := utils.GetContainerAssignIndex(pod, c.Name) if err != nil { idx = 0 } plan.GPUIndexes[i] = idx } return plan, nil } type Demand []GPUResource func NewDemandFromPod(pod *v1.Pod) Demand { ans := make(Demand, len(pod.Spec.Containers)) for i, container := range pod.Spec.Containers { ans[i] = GPUResource{ Percent: utils.GetGPUPercentFromContainer(&container), } } return ans } func (d *Demand) String() string { buffer := bytes.Buffer{} for _, resource := range *d { buffer.Write([]byte(resource.String())) } return buffer.String() } func (d *Demand) Hash() string { to := func(bs [32]byte) []byte { return bs[0:32] } return hex.EncodeToString(to(sha256.Sum256([]byte(d.String()))))[0:8] } func (d *Demand) ToSortableGPUs() SortableGPUs { sortableGpus := make(SortableGPUs, 0) for i, gpu := range *d { sortableGpu := &GPUResourceWithIndex{ GPUResource: &GPUResource{gpu.Percent, gpu.PercentTotal}, index: i, } sortableGpus = append(sortableGpus, sortableGpu) } return sortableGpus } type GPUs []*GPUResource func (g GPUs) Choose(demand Demand, rater Rater) (ans *Plan, err error) { ans = &Plan{ Demand: demand, } ans.Score = rater.Rate(g, ans) ans.GPUIndexes, err = rater.Choose(g, demand) return } func (g GPUs) Allocate(plan *Plan) error { for i := 0; i < len(plan.GPUIndexes); i++ { // no gpu needed if plan.GPUIndexes[i] < 0 { continue } if !g[plan.GPUIndexes[i]].CanAllocate(plan.Demand[i]) { // restore for j := 0; j < i; j++ { g[plan.GPUIndexes[j]].Add(plan.Demand[i]) } return fmt.Errorf("can't apply plan %v on %s", plan, g) } g[plan.GPUIndexes[i]].Sub(plan.Demand[i]) } return nil } func (g GPUs) Release(plan *Plan) error { for i := 0; i < len(plan.Demand); i++ { if plan.GPUIndexes[i] < 0 { continue } if plan.GPUIndexes[i] >= len(g) { return fmt.Errorf("allocate plan's GPU index %d bigger then GPU resource", plan.GPUIndexes[i]) } g[plan.GPUIndexes[i]].Add(plan.Demand[i]) } return nil } func (g GPUs) String() string { buffer := bytes.Buffer{} for _, resource := range g { buffer.Write([]byte(resource.String())) } return buffer.String() } type GPUResource struct { Percent int PercentTotal int } func (g GPUResource) String() string { return fmt.Sprintf("(%d)", g.Percent) } func (g *GPUResource) Add(resource GPUResource) { g.Percent += resource.Percent } func (g *GPUResource) Sub(resource GPUResource) { g.Percent -= resource.Percent } func (g *GPUResource) CanAllocate(resource GPUResource) bool { return g.Percent >= resource.Percent } // return gpu usage of current node, [0%, 100%] func (gpus GPUs) Usage() float64 { percentSum, percentUsed := 0, 0 for _, r := range gpus { percentSum += r.PercentTotal percentUsed += r.PercentTotal - r.Percent } return float64(percentUsed) / float64(percentSum) } func (gpus GPUs) PercentUsed() int { totalPercentUsed := 0 for _, r := range gpus { totalPercentUsed += r.PercentTotal - r.Percent } return totalPercentUsed } func (gpus GPUs) PercentAvailableAndFreeGpuCount() (totalAvailable int, freeGpuCount int) { for _, g := range gpus { totalAvailable += g.Percent if g.Percent == g.PercentTotal { freeGpuCount++ } } return } func (gpus GPUs) UsageVariance() float64 { var ( percentUsages = []float64{} ) for _, r := range gpus { percentUsages = append(percentUsages, (float64(r.PercentTotal)-float64(r.Percent))/float64(r.PercentTotal)) } return Variance(percentUsages) } func (gpus GPUs) ToSortableGPUs() SortableGPUs { sortableGpus := make(SortableGPUs, 0) for i, gpu := range gpus { sortableGpu := &GPUResourceWithIndex{ GPUResource: &GPUResource{gpu.Percent, gpu.PercentTotal}, index: i, } sortableGpus = append(sortableGpus, sortableGpu) } return sortableGpus } type GPUResourceWithIndex struct { *GPUResource index int } type SortableGPUs []*GPUResourceWithIndex func (g SortableGPUs) Len() int { return len(g) } func (g SortableGPUs) Swap(i, j int) { g[i], g[j] = g[j], g[i] } func (g SortableGPUs) Less(i, j int) bool { return g[i].Percent < g[j].Percent }
<gh_stars>1-10 package com.youlai.common.core.base; public class BaseController<T> { }
<filename>fxtract/main.cpp<gh_stars>0 // // main.cpp // fxtract // // Created by <NAME> on 30/07/13. // Copyright (c) 2013 <NAME>. All rights reserved. // #include <fstream> #include <iostream> #include <set> #include <vector> #include <string> #include <unistd.h> #include <seqan/seq_io.h> #include <seqan/sequence.h> #include <seqan/find.h> #include "util.h" #include "fileManager.h" #define VERSION "0.2" struct Fx { seqan::CharString id; seqan::CharString seq; seqan::CharString qual; Fx() {}; Fx(seqan::CharString _id, seqan::CharString _seq, seqan::CharString _qual = "") : id(_id), seq(_seq), qual(_qual){} }; struct Options { bool gzip; bool bzip2; bool fasta; bool fastq; bool header; char * pattern_file; Options() : gzip(false), bzip2(false), fasta(false), fastq(false), header(false), pattern_file(NULL) {} }; typedef seqan::Pattern<seqan::String<seqan::CharString>, seqan::WuManber> WuMa; void printSingle(Fx& mate1, std::ostream& out ) { if (seqan::empty(mate1.qual)) { //out<<">"<<mate1.id<<std::endl; //out<<mate1.seq<<std::endl; seqan::writeRecord(out, mate1.id, mate1.seq, seqan::Fasta()); } else { //std::cout<<"@"<<mate1.id<<'\n'<<mate1.seq<<"\n+\n"<<mate1.qual<<std::endl; seqan::writeRecord(out, mate1.id, mate1.seq, mate1.qual, seqan::Fastq()); } } void printPair(Fx& mate1, Fx& mate2, std::ostream& out) { // match in the first read print out pair printSingle(mate1, out); printSingle(mate2, out); } void usage() { std::cout<< "fxtract [-hHv] -f <pattern_file|pattern> <read1.fx> [<read2.fx>]\n"; std::cout<<"\t-H Evaluate patterns in the context of headers (default: sequences)\n"; //std::cout<<"\t-j Force bzip2 formatting\n"; //std::cout<<"\t-q Force fastq formatting\n"; std::cout<<"\t-f <file> File containing patterns, one per line" <<std::endl; std::cout<<"\t-h Print this help"<<std::endl; std::cout<<"\t-V Print version"<<std::endl; exit(1); } int parseOptions(int argc, char * argv[], Options& opts) { int c; while ((c = getopt(argc, argv, "Hhf:zjqV")) != -1 ) { switch (c) { case 'f': opts.pattern_file = optarg; break; case 'z': opts.gzip = true; break; case 'j': opts.bzip2 = true; break; case 'q': opts.fastq = true; break; case 'V': std::cout <<VERSION<<std::endl; exit(1); break; case 'H': opts.header = true; break; case 'h': default: usage(); break; } } return optind; } void tokenizePatternFile(std::istream& in, FileManager& fmanager) { // tokenize a line from the pattern file. The first part will be the pattern and the second // part is the file to write to. std::map<seqan::CharString, seqan::CharString> results; std::vector<std::string> fields; std::string line; bool cout_only = false; std::getline(in, line); tokenize(line, fields); if(fields.size() == 1) { cout_only = true; results[fields[0]] = ""; } else { results[fields[0]] = fields[1]; } fields.clear(); while(std::getline(in, line)) { tokenize(line, fields); switch(fields.size()) { case 0: break; case 1: results[fields[0]] = ""; cout_only = true; break; default: if(fields.size() > 1 && cout_only) { throw FileManagerException("All rows in the pattern file must contain the same number of columns"); } if(results.find(fields[0]) != results.end()) { // patterns are the same if(results[fields[0]] != fields[1]) { // warn user if it was supposed to go to a different file std::cerr << "pattern "<< fields[0] << " not unique but different output file requested: " << results[fields[0]] <<" and "<< fields[1] <<std::endl; std::cerr << "output will only go into " <<results[fields[0]]<<std::endl; } } else { results[fields[0]] = fields[1]; } break; } fields.clear(); } fields.clear(); if(cout_only) { std::map<seqan::CharString, seqan::CharString>::iterator it; for(it = results.begin(); it != results.end(); ++it) { fmanager.add(it->first); } } else { std::map<seqan::CharString, seqan::CharString>::iterator it; for(it = results.begin(); it != results.end(); ++it) { fmanager.add(it->first, it->second); } } } int main(int argc, char * argv[]) { seqan::String<seqan::String<char> > pattern_list; FileManager manager; Options opts; int opt_idx = parseOptions(argc, argv, opts); if(opts.pattern_file == NULL) { if( opt_idx >= argc) { std::cout<< "Please provide a pattern (or pattern file) and at least one input file"<<std::endl; usage(); } else if (opt_idx >= argc - 1) { std::cout << "Please provide an input file (or two)" <<std::endl; usage(); } seqan::CharString pattern = argv[opt_idx++]; seqan::CharString rcpattern = pattern; seqan::reverseComplement(rcpattern); manager.add(pattern); seqan::appendValue(pattern_list, pattern); seqan::appendValue(pattern_list, rcpattern); } else { if (opt_idx > argc - 1) { std::cout << "Please provide an input file (or two)"<<std::endl; usage(); } std::ifstream in(opts.pattern_file); try{ tokenizePatternFile(in, manager); } catch(FileManagerException& e) { std::cerr << e.what() <<std::endl; return 1; } fmapping_t::iterator it; for(it = manager.begin(); it != manager.end(); ++it) { //std::cout << "pattern: "<<it->first<<" bound to index: "<<it->second<<std::endl; seqan::appendValue(pattern_list, it->first); } } typedef std::set<seqan::CharString> LookupTable; LookupTable lookup; if (opts.header) { typedef seqan::Iterator<seqan::String<seqan::CharString> >::Type TStringSetIterator; for (TStringSetIterator it = begin(pattern_list); it != end(pattern_list); ++it) { lookup.insert( value(it) ); } } WuMa needle(pattern_list); seqan::SequenceStream read1(argv[opt_idx++]); if (!isGood(read1)) std::cerr << "Could not open read1 file\n"; // Read one record. Fx mate1 = Fx(); if (opt_idx < argc) { // we have a mate file seqan::SequenceStream read2(argv[opt_idx]); if (!isGood(read2)) std::cerr << "Could not open read2 file\n"; Fx mate2 = Fx(); while (!atEnd(read1)) { if (atEnd(read2)) { std::cerr<< "files have different number of reads"<<std::endl; break; } if (readRecord(mate1.id, mate1.seq, mate1.qual, read1) != 0) { std::cerr<<"Malformed record"<<std::endl; } if (readRecord(mate2.id, mate2.seq, mate2.qual, read2) != 0) { std::cerr<<"Malformed record"<<std::endl; } if (opts.header) { seqan::StringSet<seqan::CharString> header_parts; seqan::strSplit(header_parts, mate1.id, ' ', false, 1); LookupTable::iterator pos = lookup.find(header_parts[0]); if(pos != lookup.end()) { printPair(mate1, mate2, manager[header_parts[0]]); lookup.erase(pos); if(lookup.empty()) { break; } } else { seqan::clear(header_parts); seqan::strSplit(header_parts, mate2.id, ' ', false, 1); if(lookup.find(header_parts[0]) != lookup.end()) { printPair(mate1, mate2, manager[header_parts[0]]); lookup.erase(pos); if(lookup.empty()) { break; } } } } else { seqan::Finder<seqan::CharString> finder(mate1.seq); if (seqan::find(finder, needle)) { printPair(mate1, mate2, manager[pattern_list[seqan::position(needle)]]); } else { seqan::Finder<seqan::CharString> finder(mate2.seq); //seqan::setHaystack(finder, mate2.seq); if (seqan::find(finder, needle)) { printPair(mate1, mate2, manager[pattern_list[seqan::position(needle)]]); //printPair(mate1, mate2, std::cout); } } } } } else { while (!atEnd(read1)) { if (readRecord(mate1.id, mate1.seq, mate1.qual, read1) != 0) { std::cerr<<"Malformed record"<<std::endl; } if(opts.header) { seqan::StringSet<seqan::CharString> header_parts; seqan::strSplit(header_parts, mate1.id, ' ', false, 1); LookupTable::iterator pos = lookup.find(seqan::value(header_parts, 0)); if(pos != lookup.end()) { printSingle(mate1, manager[seqan::value(header_parts,0)]); lookup.erase(pos); if(lookup.empty()) { break; } } } else { seqan::Finder<seqan::CharString> finder(mate1.seq); if (seqan::find(finder, needle)) { printSingle(mate1, manager[pattern_list[seqan::position(needle)]]); } } } } return 0; }
Ukrainian civic activist Oleksandr Kolchenko is turning 27 on Nov 26, his third birthday in Russian captivity. The need for letters of support and also public attention is now even more urgent with Russia having just prevented real human rights activists from visiting him. Kolchenko, renowned filmmaker Oleg Sentsov and other Ukrainian political prisoners are isolated and therefore in danger, making all letters and appeals an important message to Moscow as well that it is being watched. Kolchenko, Sentsov and two other opponents of Russia’s invasion and occupation of their native Crimea (Gennady Afanasyev and Oleksiy Chirniy), were arrested in May 2014. They were held incommunicado, then taken illegally to Moscow and only allowed to see lawyers after several weeks, almost certainly to hide the signs of torture. It was only then that the FSB finally came out with quite extraordinary charges, claiming that Sentsov had been the mastermind behind a ‘Right Sector terrorist plot’ and that the other three were involved in and planning acts of terrorism. It is likely that the fading marks of torture were the main reason, however it is not impossible that the FSB also needed something to back Russia’s increasingly preposterous allegations about the right-wing Ukrainian nationalist Right Sector movement which came to the fore during Euromaidan. Russian President Vladimir Putin and all government-controlled media had used false claims about a ‘fascist junta’ in Kyiv as an attempt to justify Russia’s invasion of Crimea, and military aggression in Eastern Ukraine. ‘Right Sector’ was particularly demonized and its role seriously exaggerated. The Ukrainian presidential elections on May 25 were a terrible blow to the Kremlin and Russian propaganda. Not only had Ukrainians succeeded in uniting behind one candidate, the elected President Petro Poroshenko, but they also demonstrated how little support the far-right in fact had. The candidates representing Right Sector and the far-right Svoboda party could only muster 2% for both. Russian TV reacted to this initially by lying, and producing a purported ‘real victory’ for the Right Sector candidate, but this could only be a temporary measure. Willy-nilly, the truth would out. 5 days later, the FSB came up with their ‘Right Sector terrorist plot’ in Crimea. Afanasyev and Chirniy had both provided ‘confessions’ under torture (and received shorter sentences). These were then used to justify the FSB’s grandiose claims about the alleged ‘plot’. There was no proof of any plot, nor anything that constituted ‘terrorism’. The charges were especially absurd given that Kolchenko is a committed anarchist with pronounced left-wing views, and it was ludicrous to cast him as a Right Sector ‘plotter’. Sentsov was also seriously miscast, being an internationally recognized film director and a father bringing up two young children, one with special needs. Kolchenko had taken part in one act of protest in April 2014. A Molotov cocktail was thrown at the entrance of an organization which had played a major and dodgy role in supporting Russia’s invasion. Thrown at night when nobody would be hurt. Afanasyev and Chirniy had taken part in two such protests. Sentsov had done nothing at all, and the monstrous 20-year sentence only confirmed his allegations from the beginning of torture and threats that if he didn’t provide the ‘testimony’ they demanded, that he would be made the ringleader and ‘rot in prison’. Russia imposed strict secrecy and so the lack of any evidence, while assumed from the outset, was only confirmed from the first day of the trial in Rostov of Kolchenko and Sentsov who had dismissed the absurd charges from the outset. Both men were almost immediately declared political prisoners by the Memorial Human Rights Centre. Afanasyev was also recognized as a political prisoner after he had the courage to stand up in court on August 31, and retract all testimony as given under torture. The court ignored the absurdity of the whole ‘case’ and Afanasyev’s retraction, and sentenced Sentsov to 20 years, Kolchenko to 10. The two men stood arm in arm as the sentence was being read out, singing Ukraine’s national anthem. There have since been cynical moves pretending willingness to consider the men’s extradition. These ran parallel with continued attempts to force Russian citizenship on them. Even Russia’s outgoing Ombudsperson Ella Panfilova acknowledged that the men are Ukrainian and the indictment gives only their Ukrainian citizenship, yet Russia has now claimed that they cannot be released. The reason is a record-breaker in cynicism. The two men, whose persecution for opposition to Russia’s invasion is internationally recognized , are supposed to have ‘automatically’ become Russian because of that invasion. Please write to both Oleksandr (Sasha) and Oleg! Just cut and paste the addresses. If writing in Russian is a problem, there is a ‘crib’ below which you can also just add. Photos or similar would be nice, and please avoid anything political or about their case since that will stop the letters getting through. Maximum weight, by the way, is 100 g. It’s also a good idea to give a return address, since they’ll surely want to reply. Oleksandr Kolchenko Привет и с днём рождения! Желаю Вам крепкого здоровья, мужества и счастья. Надеюсь на скорое освобождение. Мы о Вас помним. (Happy Birthday! Wishing you health, courage and happiness, and that you will soon be released. You are not forgotten. Address: 456612, Челябинская обл., Копейск, ул. Кемеровская, 20, ИК-6, Александру Кольченко, 1989 г.р. Oleg Sentsov Добрый день, Желаю Вам здоровья, мужества и терпения, надеюсь на скорое освобождение. Мы о Вас помним. [Hello, I wish you good health, courage and patience and hope that you will soon be released. You are not forgotten. Address 677004, Республика Саха (Якутия), г. Якутск, ул. Очиченко, 25, ФКУ ИК-1 Сенцову Олегу Геннадьевичу, 1976 г.р. It is not quite clear where Oleksiy Chirniy is at present – his address will be added as soon as it becomes known. PLEASE also write to politicians in your country and ask them to demand that Russia honours its commitments and releases the Ukrainians it is holding hostage.
def rbac_configmap_delete(p_client, ns): value = "valueall" keyvaluepair = {"testall": value} configmap = create_configmap(keyvaluepair, p_client, ns) delete_configmap(p_client, configmap, ns, keyvaluepair)
As Twitchy reported early Thursday, a 32-year-old Australian citizen of Afghan descent was arrested after targeting Christmas shoppers with his car. Both the driver and a second man, who was photographing the aftermath of the attack and who also reportedly had three knives on him, were arrested. Police initially said there was no evidence that the incident was terror-related and that the driver had a history of mental health issues. UPDATE: Police say driver in Melbourne car incident is Australian citizen with history of mental health issues; say no evidence incident is terror-related https://t.co/KA9juJlKgn — Reuters Top News (@Reuters) December 21, 2017 However, later in the day, police said the suspect was motivated by “perceived mistreatment of Muslims” but had no known links to any terrorist groups. Breaking: Victoria’s Acting Police Commissioner tells @SkyNewsAust the alleged Flinders Street attacker ‘attributed his actions to perceived mistreatment of muslims’ but has no known links to any extremist group pic.twitter.com/axJU7gg3UA — Amy Greenbank (@Amy_Greenbank) December 21, 2017 So the man of Afghan descent who drove his car into a crowd of Christmas shoppers because he was upset about mistreatment of Muslims still doesn’t count as a terrorist. So the mental lone wolf didn't like how muslims were "mistreated" so decided to run down heaps of people with car. But it's *not* terrorism!?! ok… https://t.co/fQdfwIczT4 — Tweety McTweeterson (@Tweetenhoffer) December 21, 2017 Doesn’t make sense,thought he was an ice addict with mental illness https://t.co/3axLyWbihQ — samuel abbs (@abbs_man) December 21, 2017 Wait. I thought he was just a depressed guy with a drug problem. #religionofpeace #MelbourneAttack https://t.co/gXyc6Syj0N — Betsy Brantner Smith (@sgtbetsysmith) December 21, 2017 Well the tropes didn't last long, did they… https://t.co/PCzkAmDrb6 — Andrew Lockley (@andrewjlockley) December 21, 2017 Just another moderate Muslim driving cars through crowds, completely unrelated to terrorism… this Orwellian explanation is a bit much even for the current environment. https://t.co/9MvHrOSOxV — Jacob Kampen (@jacobkampen) December 21, 2017 Still saying it's not terror related? https://t.co/gZZFVPEvQ4 — Nolan ??? (@Nolo4dolo) December 21, 2017 Doesn't matter if you have "no known links with terrorist organizations" or have mental health issues or smoke curried tortoise shell. If you commit an act of terror, you ARE a terrorist. https://t.co/R1KzIQbdug — Mum on a Mission (@ethannahs_mum) December 21, 2017 Well, that's OK, then. So long as he had no links to any extremist group… https://t.co/gXjLP8iOCP — Eileen Toomey-Wright (@ToomeyWright) December 21, 2017 "…no known links to any extremist group." Besides being Muslim, of course. https://t.co/GHscmKn4eh — TexitMachine (@BrowningMachine) December 21, 2017 The extremist group he belongs to is called Islam. https://t.co/yDiLQIibxf — Winter Darknet Reptile Shapeshifter (@winter_darknet) December 21, 2017 Ah, so it was an Islam thing. You could've knocked me over with a feather to find that out. Really. I'm shocked. Just shock-zzzzzzzzzz https://t.co/GzSk67ez00 — MonBossyMothma-WR (@nowhere_nh) December 21, 2017 Forgive me for my suspicions, and for casting a cold eye on the Australian police analogy, but when a man from Afghanistan rams a car at top speed, into innocent Melbourne pedestrians, 4 days before Christmas, I say, if it looks, swims and quacks like a duck, it could be a Jihad. — Cllr Brian Murphy (@brianmurphycllr) December 21, 2017 Related: ‘Deliberate act’: Christmas shoppers attacked in Melbourne; ‘Up to 19’ injured; Driver is ‘Australian citizen of Afghan descent’
<filename>pkg/apis/compute/secgroup.go // Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package compute import ( "fmt" "yunion.io/x/jsonutils" "yunion.io/x/pkg/errors" "yunion.io/x/pkg/util/regutils" "yunion.io/x/pkg/util/secrules" "yunion.io/x/onecloud/pkg/apis" ) type SSecgroupRuleResource struct { // 优先级, 数字越大优先级越高 // minimum: 1 // maximum: 100 // required: true Priority *int `json:"priority"` // 协议 // required: true // // // // | protocol | name | // | -------- | ---- | // | any | 所有协议| // | tcp | TCP | // | icmp | ICMP | // | udp | UDP | // enum: any, tcp, udp, icmp Protocol string `json:"protocol"` // 端口列表, 参数为空代表任意端口 // 此参数仅对protocol是tcp, udp时生效 // 支持格式: // | 格式类型 | 举例 | // | -------- | ---- | // | 单端口 | 22 | // | 端口范围 | 100-200 | // | 不连续端口| 80,443 | // requried: false Ports string `json:"ports"` // swagger:ignore PortStart int // swagger:ignore PortEnd int // 方向 // enum: in, out // required: true Direction string `json:"direction"` // ip或cidr地址, 若指定peer_secgroup_id此参数不生效 // example: 192.168.222.121 CIDR string `json:"cidr"` // 行为 // deny: 拒绝 // allow: 允许 // enum: deny, allow // required: true Action string `json:"action"` // 规则描述信息 // requried: false // example: test to create rule Description string `json:"description"` // 对端安全组Id, 此参数和cidr参数互斥,并且优先级高于cidr, 同时peer_secgroup_id不能和它所在的安全组ID相同 // required: false PeerSecgroupId string `json:"peer_secgroup_id"` } type SSecgroupRuleCreateInput struct { apis.ResourceBaseCreateInput SSecgroupRuleResource // swagger:ignore Secgroup string `json:"secgroup" yunion-deprecated-by:"secgroup_id"` // 安全组ID // required: true SecgroupId string `json:"secgroup_id"` } type SSecgroupRuleUpdateInput struct { apis.ResourceBaseUpdateInput SSecgroupRuleResource } func (input *SSecgroupRuleResource) Check() error { priority := 1 if input.Priority != nil { priority = *input.Priority } rule := secrules.SecurityRule{ Priority: priority, Direction: secrules.TSecurityRuleDirection(input.Direction), Action: secrules.TSecurityRuleAction(input.Action), Protocol: input.Protocol, PortStart: input.PortStart, PortEnd: input.PortEnd, Ports: []int{}, } if len(input.Ports) > 0 { err := rule.ParsePorts(input.Ports) if err != nil { return errors.Wrapf(err, "ParsePorts(%s)", input.Ports) } } if len(input.CIDR) > 0 { if !regutils.MatchCIDR(input.CIDR) && !regutils.MatchIPAddr(input.CIDR) { return fmt.Errorf("invalid ip address: %s", input.CIDR) } } else { input.CIDR = "0.0.0.0/0" } return rule.ValidateRule() } type SSecgroupCreateInput struct { apis.SharableVirtualResourceCreateInput // 规则列表 // required: false Rules []SSecgroupRuleCreateInput `json:"rules"` } type SecgroupListInput struct { apis.SharableVirtualResourceListInput ServerResourceInput DBInstanceResourceInput ELasticcacheResourceInput // equals Equals string // 按缓存数量排序 // pattern:asc|desc OrderByCacheCnt string `json:"order_by_cache_cnt"` // 按缓存关联主机数排序 // pattern:asc|desc OrderByGuestCnt string `json:"order_by_guest_cnt"` // 模糊过滤规则中含有指定ip的安全组 // example: 10.10.2.1 Ip string `json:"ip"` // 精确匹配规则中含有指定端口的安全组 // example: 100-200 Ports string `json:"ports"` // 指定过滤规则的方向(仅在指定ip或ports时生效) choices: all|in|out // default: all // example: in Direction string `json:"direction"` RegionalFilterListInput ManagedResourceListInput WithCache bool `json:"witch_cache"` } type SecurityGroupCacheListInput struct { apis.StatusStandaloneResourceListInput apis.ExternalizedResourceBaseListInput ManagedResourceListInput RegionalFilterListInput VpcFilterListInput SecgroupFilterListInput } type SecurityGroupRuleListInput struct { apis.ResourceBaseListInput SecgroupFilterListInput Projects []string `json:"projects"` // 以direction字段过滤安全组规则 Direction string `json:"direction"` // 以action字段过滤安全组规则 Action string `json:"action"` // 以protocol字段过滤安全组规则 Protocol string `json:"protocol"` // 以ports字段过滤安全组规则 Ports string `json:"ports"` // 根据ip模糊匹配安全组规则 Ip string `json:"ip"` } type SecgroupResourceInput struct { // 过滤关联指定安全组(ID或Name)的列表结果 SecgroupId string `json:"secgroup_id"` // swagger:ignore // Deprecated // filter by secgroup_id Secgroup string `json:"secgroup" yunion-deprecated-by:"secgroup_id"` // 模糊匹配安全组规则名称 SecgroupName string `json:"secgroup_name"` } type SecgroupFilterListInput struct { SecgroupResourceInput // 以安全组排序 OrderBySecgroup string `json:"order_by_secgroup"` } type SecgroupDetails struct { apis.SharableVirtualResourceDetails SSecurityGroup // 关联云主机数量 GuestCnt int `json:"guest_cnt,allowempty"` // 关联此安全组的云主机is_system为true数量 SystemGuestCnt int `json:"system_guest_cnt,allowempty"` // admin_secgrp_id为此安全组的云主机数量 AdminGuestCnt int `json:"admin_guest_cnt,allowempty"` // 安全组缓存数量 CacheCnt int `json:"cache_cnt,allowempty"` // 规则信息 Rules []SecgroupRuleDetails `json:"rules"` // 入方向规则信息 InRules []SecgroupRuleDetails `json:"in_rules"` // 出方向规则信息 OutRules []SecgroupRuleDetails `json:"out_rules"` CloudCaches []jsonutils.JSONObject `json:"cloud_caches"` } type SecurityGroupResourceInfo struct { // 安全组名称 Secgroup string `json:"secgroup"` } type GuestsecgroupListInput struct { GuestJointsListInput SecgroupFilterListInput } type ElasticcachesecgroupListInput struct { ElasticcacheJointsListInput SecgroupFilterListInput } type GuestsecgroupDetails struct { GuestJointResourceDetails SGuestsecgroup // 安全组名称 Secgroup string `json:"secgroup"` } //type SElasticcachesecgroup struct { // SElasticcacheJointsBase // SSecurityGroupResourceBase //} type ElasticcachesecgroupDetails struct { ElasticcacheJointResourceDetails SElasticcachesecgroup // 安全组名称 Secgroup string `json:"secgroup"` } type SecgroupMergeInput struct { // 安全组id列表 SecgroupIds []string `json:"secgroup_ids"` // swagger:ignore // Deprecated Secgroups []string `json:"secgroup" yunion-deprecated-by:"secgroup_ids"` } type SecurityGroupPurgeInput struct { } type SecurityGroupCloneInput struct { Name string Description string } type SecgroupImportRulesInput struct { Rules []SSecgroupRuleCreateInput `json:"rules"` } type SecgroupJsonDesc struct { Id string `json:"id"` Name string `json:"name"` }
/** * Take a turn using another strategy. * * @param board the current state of the game * * @return the selected column of the board */ public int strategy1(Board board) { int row = 2; return -1; }
Posted by Raine Hutchens on Jul 13, 2012 Privateer Press is best known for their work on the Warmachine and Hordes miniature games. They’ve found a way to monetize the miniature market in a way that allows players to enjoy the game without the necessity to spend outrageous amounts on minis and sets. As an aspiring minis player, I was blown away with some of the minis prices that I’ve seen throughout the industry. When I stumbled upon Warmachine I was pleasantly surprised. Warmachine is a tabletop in which players create armies to do battle against one another. As a player, you’re essentially a warcaster who commands different battalions to victory. Privateer Press supplies the models, but players will collect, assemble, and paint them to their liking. Each army is customizable and makes gameplay different each time you do battle. A lot of players compare Warmachine to Warhammer 40k, but with this game you get a lot more for your money. The newest release for the game is entitled the Colossals set, and Privateer Press has released three new previews from the set. They are: General Ossrum for the Mercs The Devestator/Demolisher/Spriggan Kit The Hyperian colossal for Retribution These previews come just in time for Gen Con, where Privateer Press will be holding plenty of events. This year they’ve got plenty of tournaments lined up for players to participate in and walk away with glory. Here’s a listing of events that players can take part in at the convention: Thursday, 16th Rite of Passage Tournament (WM/Hordes Qualifier, 10:00) 50 PT SR2012 tournament, SR 2012 Appendix Rules: 2 Lists Required, Divide and Conquer, 5 Round Event, Death Clock. Top 4 players qualify for the WM/Hordes Masters on Saturday. Triple Threat Tournament (WM/Hordes Qualifier, 23:00) 35 PT SR2012 tournament, Appendix Rules: 3 Lists Required, 4 Round Event, Death Clock. Top 4 players after 4 rounds qualify for the WM/Hordes Masters on Saturday Friday, 17th Hardcore Tournament (WM/Hordes Qualifier, 9:00) 50 PT SR2012 tournament, 1 List Required, 4 Rounds, Hardcore Timing, Hardcore Painting, Close Quarters, Assassination Scoring, Death Clock. Vanquishers play round 5/6, Top 4 qual for Saturday Masters. Last Rites Tournament (WM/Hordes Qualifier, 12:00) 50 PT SR2012 tournament, Appendix Rules: 2 Lists Required, 4 Round Event, Death Clock, Artifice: Kill Box. Top 4 players after 4 rounds qualify for the WM/Hordes Masters on Saturday. Who’s the Boss? Tournament (WM/Hordes Challenge event, 23:00) 35 PT SR2012, Appendix Rules: 1 List Required, 4 Round Event, Death Clock. Your warcaster needs a break. Let the wheel of fate take charge and find out who the new boss is as wacky mayhem ensues! Saturday, 18th WM/Hordes Masters Tournament (16 Qualified players, 8:00) 75 PT SR2012 tournament, Appendix Rules: 3 Lists Required, 4 Round Event, Death Clock. Blood, Sweat and Tiers Tournament (WM/Hordes Challenge event, 9:00) 35 PT SR2012 tournament, Appendix Rules: Theme Forces Tier 1 Required, Basic Painting required, 5 Round Event, Death Clock. Death Race! Tournament (WM/Hordes Challenge event, 23:00) 25 PT SR2012 tournament, 1 List Required, Death Clock, Scenario: Gauntlet, Assassination Scoring. Sunday, 19th Team Tournament (WM/Hordes Challenge Event, 8:00) 50 PT SR2012 tournament, Appendix Rules: 2 Lists Required, Divide and Conquer, 5 Round Event, Death Clock. 3 player teams face off each round. EVERY DAY Iron Arena If you’re a Warmachine or Hordes player, you won’t want to skip out on Gen Con this year. Make sure you pack your best armies and get to the convention center post haste. It’s time to gain supremacy!
/** * bi_shift_left * Shift the given integer to the left as if multiplied by the given * power of two. * @params: * * dest : pointer to big integer -- container for the shifted * version * * source : big integer -- the integer to copy and shift * * shamt : int -- amount to shift by */ int bi_shift_left( bi * dest, bi source, int shamt ) { int num_bits; int num_limbs; int i; int shamtmod, shamtdiv; unsigned long int * data; if( shamt < 0 ) { return 1; } if( shamt == 0 ) { return bi_copy(dest, source); } num_bits = bi_bitsize(source); shamtmod = shamt % (sizeof(unsigned long int)*8); shamtdiv = shamt / (sizeof(unsigned long int)*8); num_limbs = (shamt + num_bits + sizeof(unsigned long int)*8 - 1) / (sizeof(unsigned long int)*8); dest->num_limbs = num_limbs; data = malloc(sizeof(unsigned long int) * num_limbs); for( i = 0 ; i < dest->num_limbs ; ++i ) { data[i] = 0; } for( i = 0 ; i < source.num_limbs && shamtdiv+i < dest->num_limbs ; ++i ) { data[shamtdiv+i] = source.data[i] << shamtmod; } if( shamtmod != 0 ) { for( i = 0 ; i < source.num_limbs && i+shamtdiv+1 < dest->num_limbs ; ++i ) { data[shamtdiv+i+1] = data[shamtdiv+1+i] | (source.data[i] >> (sizeof(unsigned long int)*8 - shamtmod)); } } for( dest->num_limbs = dest->num_limbs ; dest->num_limbs > 1 ; --dest->num_limbs ) { if( data[dest->num_limbs-1] != 0 ) { break; } } dest->sign = source.sign; free(dest->data); dest->data = data; return 1; }
package read_test import ( "os" "path" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/felamaslen/gmus-backend/pkg/database" "github.com/felamaslen/gmus-backend/pkg/read" setup "github.com/felamaslen/gmus-backend/pkg/testing" "github.com/felamaslen/gmus-backend/pkg/types" ) var _ = Describe("Scanning directories", func() { db := database.GetConnection() BeforeEach(func() { setup.PrepareDatabaseForTesting() }) Describe("ScanDirectory", func() { var results []*types.File var testScanDirectory = func() { results = nil files := read.ScanDirectory(read.TestDirectory) done := false for !done { select { case result, more := <-files: if more { results = append(results, result) } done = !more } } } Context("when the database is empty", func() { BeforeEach(testScanDirectory) It("should return a channel with all the files in the directory", func() { Expect(results).To(HaveLen(2)) if results[0].RelativePath == read.TestSong.RelativePath { Expect(results[0].RelativePath).To(Equal(read.TestSong.RelativePath)) Expect(results[1].RelativePath).To(Equal(read.TestSongNested.RelativePath)) } else { Expect(results[1].RelativePath).To(Equal(read.TestSong.RelativePath)) Expect(results[0].RelativePath).To(Equal(read.TestSongNested.RelativePath)) } }) }) Context("when the database already contains one of the files", func() { BeforeEach(func() { info, _ := os.Stat(path.Join(read.TestSong.BasePath, read.TestSong.RelativePath)) db.MustExec( ` insert into songs (title, artist, album, base_path, relative_path, modified_date) values ($1, $2, $3, $4, $5, $6) `, "old title", "old artist", "old album", read.TestSong.BasePath, read.TestSong.RelativePath, info.ModTime().Unix(), ) testScanDirectory() }) It("should only return those files which do not exist in the database", func() { Expect(results).To(HaveLen(1)) Expect(results[0].RelativePath).To(Equal(read.TestSongNested.RelativePath)) }) }) Context("when an error previously occurred scanning one of the files", func() { BeforeEach(func() { db.MustExec(` insert into scan_errors (base_path, relative_path, error) values ($1, $2, $3) `, read.TestSong.BasePath, read.TestSong.RelativePath, "A bad thing happened") testScanDirectory() }) It("should only return those files which did not have errors marked against them", func() { Expect(results).To(HaveLen(1)) Expect(results[0].RelativePath).To(Equal(read.TestSongNested.RelativePath)) }) }) }) Describe("UpsertSongsFromChannel", func() { var songs chan *types.Song var testScanSongs = func() { songs = make(chan *types.Song) go func() { defer close(songs) songs <- &types.Song{ TrackNumber: 7, Title: "<NAME>", Artist: "The Beatles", Album: "", Duration: 431, BasePath: "/path/to", RelativePath: "file.ogg", ModifiedDate: 8876, } songs <- &types.Song{ TrackNumber: 11, Title: "Starman", Artist: "<NAME>", Album: "The Rise and Fall of Ziggy Stardust and the Spiders from Mars", Duration: 256, BasePath: "/different/path", RelativePath: "otherFile.ogg", ModifiedDate: 11883, } }() read.UpsertSongsFromChannel(songs) } Context("when the songs do not already exist in the database", func() { BeforeEach(testScanSongs) It("should insert the correct number of songs", func() { var count int db.Get(&count, "select count(*) from songs") Expect(count).To(Equal(2)) }) It("should insert both songs", func() { var songs []types.Song db.Select(&songs, ` select track_number, title, artist, album, duration, base_path, relative_path, modified_date from songs order by title `) Expect(songs[0]).To(Equal(types.Song{ TrackNumber: 7, Title: "<NAME>", Artist: "The Beatles", Album: "", Duration: 431, BasePath: "/path/to", RelativePath: "file.ogg", ModifiedDate: 8876, })) Expect(songs[1]).To(Equal(types.Song{ TrackNumber: 11, Title: "Starman", Artist: "<NAME>", Album: "The Rise and Fall of Ziggy Stardust and the Spiders from Mars", Duration: 256, BasePath: "/different/path", RelativePath: "otherFile.ogg", ModifiedDate: 11883, })) }) }) Context("when there is already a file in the database with the same name", func() { BeforeEach(func() { db.MustExec( ` insert into songs (title, artist, album, base_path, relative_path, modified_date) values ($1, $2, $3, $4, $5, $6) `, "my title", "my artist", "my album", "/path/to", "file.ogg", 7782, ) testScanSongs() }) It("should not add an additional row for the same file", func() { var count int db.Get(&count, ` select count(*) from songs where base_path = '/path/to' and relative_path = 'file.ogg' `) Expect(count).To(Equal(1)) }) It("should upsert the existing item", func() { var songs []types.Song db.Select(&songs, ` select track_number ,title ,artist ,album ,duration ,base_path ,relative_path ,modified_date from songs where base_path = '/path/to' and relative_path = 'file.ogg' `) Expect(songs).To(HaveLen(1)) var song = songs[0] Expect(song.TrackNumber).To(Equal(7)) Expect(song.Title).To(Equal("<NAME>")) Expect(song.Artist).To(Equal("The Beatles")) Expect(song.Album).To(Equal("")) Expect(song.Duration).To(Equal(431)) Expect(song.ModifiedDate).To(Equal(int64(8876))) }) }) }) Describe("ScanAndInsert", func() { It("should recursively scan files from a directory and add them to the database", func() { read.ScanAndInsert(read.TestDirectory) var songs []types.Song err := db.Select(&songs, ` select title, artist, album, duration, base_path, relative_path from songs `) Expect(err).To(BeNil()) Expect(songs).To(HaveLen(2)) Expect(types.Song{ Title: read.TestSong.Title, Artist: read.TestSong.Artist, Album: read.TestSong.Album, Duration: read.TestSong.Duration, BasePath: read.TestSong.BasePath, RelativePath: read.TestSong.RelativePath, }).To(BeElementOf(songs)) Expect(types.Song{ Title: read.TestSongNested.Title, Artist: read.TestSongNested.Artist, Album: read.TestSongNested.Album, Duration: read.TestSongNested.Duration, BasePath: read.TestSongNested.BasePath, RelativePath: read.TestSongNested.RelativePath, }).To(BeElementOf(songs)) }) }) })