content
stringlengths
10
4.9M
Root surface etching at neutral pH promotes periodontal healing. The purpose of the present investigation was to examine whether an etching agent operating at neutral pH (EDTA) can enhance healing compared to a low pH etching agent (citric acid) in an animal model. Maxillary molars and premolars, in total 32 teeth, in 4 monkeys were divided between test (EDTA or citric acid treatment) and matched control groups. Periodontal surgery on both palatal and buccal roots using the dehiscence model was performed with or without root surface etching. Healing results were evaluated histomorphometrically after 8 weeks. The statistically significant differences between EDTA treated surfaces (n=15) and control surfaces (n=11) were approximately 10% less failure (gingival recession and periodontal pocket), 10 to 15% more total histological attachment (long epithelial junction, connective tissue and reparative cementum), approximately 20% less long epithelial junction and approximately 20% more connective tissue in roots etched with EDTA. The statistically significant differences between citric-acid-treated surfaces (n=14) and control surfaces (n=11) were approximately 10% more connective tissue and 15% less long epithelial junction in the citric acid etched roots. Thus, etching with EDTA appeared to improve healing, avoiding the superficial necrotizing effect on exposed periodontal tissues by citric acid documented in previous studies. Although etching at present is not routinely applied in conventional periodontal therapy, future potential applications of etching at neutral pH may include exposure of the collagenous matrix of dentin for retention of biologically active substances, such as growth factors. Such treatment may be argued to produce a biocompatible surface more conducive to periodontal membrane cell colonization after removal of root-surface- associated smear without compromising the vitality of the surrounding periodontium.
<reponame>jakubbujny/pants # coding=utf-8 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import logging import os import re from builtins import open, str from pants.base.exception_sink import ExceptionSink from pants.util.collections import assert_single_element from pants.util.contextutil import temporary_dir from pants.util.dirutil import touch from pants_test.test_base import TestBase class TestExceptionSink(TestBase): def _gen_sink_subclass(self): # Avoid modifying global state by generating a subclass. class AnonymousSink(ExceptionSink): pass return AnonymousSink def test_unset_destination(self): self.assertEqual(os.getcwd(), self._gen_sink_subclass().get_destination()) def test_retrieve_destination(self): sink = self._gen_sink_subclass() with temporary_dir() as tmpdir: sink.set_destination(tmpdir) self.assertEqual(tmpdir, sink.get_destination()) def test_log_exception(self): sink = self._gen_sink_subclass() pid = os.getpid() with temporary_dir() as tmpdir: # Check that tmpdir exists, and log an exception into that directory. sink.set_destination(tmpdir) sink.log_exception('XXX') # This should have created two log files, one specific to the current pid. self.assertEqual(os.listdir(tmpdir), ['logs']) cur_process_error_log_path = os.path.join(tmpdir, 'logs', 'exceptions.{}.log'.format(pid)) self.assertTrue(os.path.isfile(cur_process_error_log_path)) shared_error_log_path = os.path.join(tmpdir, 'logs', 'exceptions.log') self.assertTrue(os.path.isfile(shared_error_log_path)) # We only logged a single error, so the files should both contain only that single log entry. err_rx = """\ timestamp: ([^\n]+) args: ([^\n]+) pid: {pid} XXX """.format(pid=re.escape(str(pid))) with open(cur_process_error_log_path, 'r') as cur_pid_file: self.assertRegexpMatches(cur_pid_file.read(), err_rx) with open(shared_error_log_path, 'r') as shared_log_file: self.assertRegexpMatches(shared_log_file.read(), err_rx) def test_backup_logging_on_fatal_error(self): sink = self._gen_sink_subclass() with self.captured_logging(level=logging.ERROR) as captured: with temporary_dir() as tmpdir: exc_log_path = os.path.join(tmpdir, 'logs', 'exceptions.log') touch(exc_log_path) # Make the exception log file unreadable. os.chmod(exc_log_path, 0) sink.set_destination(tmpdir) sink.log_exception('XXX') single_error_logged = str(assert_single_element(captured.errors())) expected_rx_str = ( re.escape("pants.base.exception_sink: Problem logging original exception: [Errno 13] Permission denied: '") + '.*' + re.escape("/logs/exceptions.log'")) self.assertRegexpMatches(single_error_logged, expected_rx_str)
#! /usr/bin/env python # coding: utf8 # Test code for 4tronix Picon Zero to work with an # analog Infrared Distance Sensor (e.g. GP2Y0A21). # # Currently just prints the signal from an analog pin. # #----------------------------------------------------------------- # GP2Y0A21 info: # Datasheet: http://www.robot-electronics.co.uk/files/gp2y0a21.pdf # PiconZero input is 0-5v, 0 - 1023 readings. # Sensor is 0-3.3v, actually reads ~10 (far) to ~690 at 10cm. #----------------------------------------------------------------- import time import sys sys.path.insert(1, "../../lib/PiconZero/Python") import piconzero as pz import numpy as np from scipy.optimize import curve_fit # Ratio between 0-100% indicating reflectivity of the observed surface # TODO: Change from constant to a variable based off camera input. REFLECTIVE_RATIO = 100 IR_PIN = 3 # The pin number used to connect the infrared sensor pz.init() pz.setInputConfig(IR_PIN, 1) # Set input pin to analog def continuousRead(): try: while True: ir = pz.readInput(IR_PIN) print ir time.sleep(1) except KeyboardInterrupt: print() finally: pz.cleanup() def read(): ir = pz.readInput(IR_PIN) return ir def calibrate(speed=1): print "Infrared sensor calibration started..." print "Initial reading:", pz.readInput(IR_PIN) # WIP reflectivity # reflectivity = input("Input estimate surface reflectivity (0-100): ") # if not 0 <= reflectivity <= 100: # print "Error: Invalid reflectivity input. Cancelling calibration." # return False maxDistance = input("Input max distance required in cm: ") if not 0 <= maxDistance <= 100: print "Error: Invalid maxDistance input. Cancelling calibration." return False calibrations = np.array([1023], dtype='uint16') # 675 should be max (3.3v) distances = np.array([9], dtype='uint16') # ... at 0cm away. distance = 10 while distance <= maxDistance: distances = np.append(distances, distance) print "Place object", distance, "cm away from sensor" time.sleep(speed) for _ in range(0, 3): print "." time.sleep(speed) ir = pz.readInput(IR_PIN) print "Reading for", distance, "cm:", ir time.sleep(speed) # # First reading (-1) is put into calibrations array for 0cm as well: # TODO: Explain why? #if calibrations.size == 0: # calibrations = np.append(calibrations, ir + 1) calibrations = np.append(calibrations, ir) distance += 10 # Clever WIP maths stuff #distancesCount = distances.size yData = calibrations xData = distances # New exponential scipy stuff # https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.optimize.curve_fit.html # https://stackoverflow.com/questions/15624070/why-does-scipy-optimize-curve-fit-not-fit-to-the-data def func(x, a, b, c): return a * np.exp(-b * x) + c # Fit an exponential popt, pcov = curve_fit(func, xData, yData) # Old polyfit stuff #z = np.poly1d(np.polyfit(x, y, 2)) ##xNew = np.linspace(x[0], x[-1], distancesCount) #xNew = np.arange(1024) # Create 0-1023 array for reading lookup table #yNew = z(xNew).astype(int) # Displaying the data print "Calibration finished." print "popt:" print popt print "pcov:" print pcov trialX = np.arange(1024) yExp = func(trialX, *popt) print "yExp:" print yExp ''' print "xNew:" #print xNew print "yNew:" #print yNew print "" print "Fancy graph:" # print estimated distances for the first half (0-511) of the input readings # i = 16 because 1024 / 32 / 2 = 16 i = 32 while i > 0: outputStr = " " # Add a space if row label 2 digits not 3: if i * 32 - 1 < 100: outputStr += " " outputStr += str(i * 32 - 1) + '|' calibrationRow = False j = 0 for k in np.nditer(calibrations): if i * 32 -1 >= k >= (i - 1) * 32 - 1: calibrationRow = True rowDistanceIndex = j j += 1 if calibrationRow: for j in range(0, min(80, yNew[i * 32 - 1]), 2): outputStr += '▯' if j >= 78: outputStr += '+' else: for j in range(0, min(80, yNew[i * 32 - 1]), 2): # Estimated dist outputStr += '▮' if j >= 78: outputStr += '+' outputStr += " " + str(yNew[i * 32 -1]) if calibrationRow: outputStr += " / " + str(y[rowDistanceIndex]) print outputStr i -= 1 print "-in-|10cm|20cm|30cm|40cm|50cm|60cm|70cm|80cm|" ''' # #print "Original data:" #d = np.nditer(x, flags=['f_index']) #for i in range(0, distancesCount): # print distances[i], "cm:", y[i] #print "Calculated data:" #for i in range(0, distancesCount): # print distances[i], "cm:", int(yNew[i]) def cleanup(): pz.cleanup()
‘Paletta goes if Romagnoli arrives’ By Football Italia staff Gabriel Paletta’s agent confirms the defender will ask to leave if Milan sign Alessio Romagnoli from Roma. The Rossoneri are believed to be moving closer to securing a deal for the Italian Under-21 international, which would limit the playing time of the former Parma man. “If Romagnoli arrives, we’d ask to leave,” Martin Guastadisegno told Tuttomercatoweb. “He wants to play, because the national team is his main goal. I think he’s played well in recent games, now there’s the Trofeo TIM where he should play, so we’ll see. “Things can change very quickly, and he still has three years on his Milan contract. “At the moment Gabriel is a Milan player, and he wants to stay. After what’s been in the newspapers, they said seven teams are interested. “There’s not a club out there which is looking for a defender that wouldn’t be interested in Paletta.” The agent confirmed that Atalanta are one of the teams interested, but warns his client wants to play at a high level. “They’re one of the teams who have called us, the Coach [Edoardo Reja] was friendly and expressed a strong desire to sign Gabriel. “Naturally, he wants to play for a team of Milan’s level if he leaves. After arriving [in January] he played 14 games out of 17, he’s not a reserve. “We’re calm, we’re under no pressure to choose one way or the other. “The priority is [to stay in] Italy, as always for Gabriel. He feels Italian, and wants to wear the Azzurri shirt. “Of course, if a foreign team which can challenge for trophies called us then we’d evaluate their offer.”
import copy for test in range(int(input())): n = int(input()) s = input() tt = 0 for i in range(len(s) - 6): if s[i:i+7] == "abacaba": tt += 1 #print(tt) ss = [] for i in s: ss.append(i) s = ss if tt > 1: print("NO") elif tt == 1: for j in range(len(s)): if s[j] == "?": s[j] = "z" s = ''.join(s) print("YES") print(s) else: counter = 0 for i in range(len(s)-6): if (s[i] == "a" or s[i] == "?") and (s[i+1] == "b" or s[i+1] == "?") and (s[i+2] == "a" or s[i+2] == "?") and (s[i+3] == "c" or s[i+3] == "?") and (s[i+4] == "a" or s[i+4] == "?") and (s[i+5] == "b" or s[i+5] == "?") and (s[i+6] == "a" or s[i+6] == "?"): temp = copy.deepcopy(s) s[i] = "a" s[i+1] = "b" s[i+2] = "a" s[i+3] = "c" s[i+4] = "a" s[i+5] = "b" s[i+6] = "a" for j in range(len(s)): if s[j] == "?": s[j] = "z" s = ''.join(s) tt = 0 for j in range(len(s) - 6): if s[j:j + 7] == "abacaba": tt += 1 if tt == 1: print("YES") print(s) counter = 1 break else: s = copy.deepcopy(temp) if counter == 0: print("NO")
import { useQuery } from "react-query"; import { listMeasures } from "utils/api/requestMethods"; import { CoreSetAbbr } from "utils/types/types"; interface GetMeasures { state: string; year: string; coreSet: string; } const getMeasures = async ({ state, year, coreSet }: GetMeasures) => { return await listMeasures({ state, year, coreSet, }); }; export const useGetMeasures = ( state: string, year: string, coreSetId: CoreSetAbbr ) => { return useQuery(["coreSets", state, year], () => getMeasures({ state: state, year: year, coreSet: coreSetId, }) ); };
/** * yzhang class global comment. Detailled comment */ public abstract class AbstractNotePropertyComposite { protected Note note; private HorizontalTabFactory tabFactory; private IEditorPart multiPageTalendEditor; public static final int STANDARD_LABEL_WIDTH = 85; public abstract Composite getComposite(); /** * yzhang AbstarctNotePropertyComposite constructor comment. */ public AbstractNotePropertyComposite(Composite parent, Note note, HorizontalTabFactory tabFactory) { multiPageTalendEditor = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage().getActiveEditor(); this.note = note; this.tabFactory = tabFactory; createControl(parent); } public abstract void createControl(Composite parent); public TabbedPropertySheetWidgetFactory getWidgetFactory() { return this.tabFactory.getWidgetFactory(); } protected CommandStack getCommandStack() { if (multiPageTalendEditor instanceof AbstractMultiPageTalendEditor) { return (CommandStack) ((AbstractMultiPageTalendEditor) multiPageTalendEditor).getTalendEditor().getAdapter( CommandStack.class); } return null; } }
class Roll_Combo: """ Two die roll combination for games that require dice to play. """ sum : int is_doubles : bool def __init__(self, val1: int, val2 : int = None): """ Intializes Roll Combo object by summing @param val1 : value of the first die's currently rolled value @param val2 : value of the first die's currently rolled value """ if val2 is None: self.is_doubles = False self.sum = val1 else: self.sum = val1 + val2 self.is_doubles = (val1 == val2) @property def properties(self): """ Returns the sum of the roll as well as if it was a doubles. :return _properties : calculated values for dice roll :rtype dict """ _properties = self.__dict__ return _properties
// Given the list of features, if a specific user requested feature is not essential to pipeline uptime // and may be a cause of the error, respond it back as high-priority so Pipeline can restart without it func (dcp *DcpNozzle) prioritizeReturnErrorByFeatures(requested mcc.UprFeatures, responded mcc.UprFeatures) error { if requested.CompressionType != responded.CompressionType { if requested.CompressionType == base.CompressionTypeNone && responded.CompressionType == base.CompressionTypeSnappy { dcp.Logger().Warnf(fmt.Sprintf("%v did not request compression, but DCP responded with compression type %v\n", dcp.Id(), base.CompressionTypeStrings[responded.CompressionType])) return base.ErrorCompressionDcpInvalidHandshake } else if requested.CompressionType == base.CompressionTypeSnappy && responded.CompressionType == base.CompressionTypeNone { dcp.Logger().Warnf(fmt.Sprintf("%v requested compression type %v, but DCP responded with compression type %v\n", dcp.Id(), base.CompressionTypeStrings[requested.CompressionType], base.CompressionTypeStrings[responded.CompressionType])) return base.ErrorCompressionNotSupported } } return nil }
#!/usr/bin/env python3 # SPDX-License-Identifier: MPL-2.0 # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright 1997 - July 2008 CWI, August 2008 - 2023 MonetDB B.V. # import sys import getopt from pymonetdb import mapi def main() : hostname = 'localhost' port = '50000' username = 'monetdb' password = 'monetdb' language = 'sql' database = '' encoding = None opts, args = getopt.getopt(sys.argv[1:], '', ['host=', 'port=', 'user=', 'passwd=', 'language=', 'database=', 'encoding=']) for o, a in opts: if o == '--host': hostname = a elif o == '--port': port = a elif o == '--user': username = a elif o == '--passwd': password = a elif o == '--language': language = a elif o == '--database': database = a elif o == '--encoding': encoding = a if encoding is None: import locale encoding = locale.getlocale()[1] if encoding is None: encoding = locale.getdefaultlocale()[1] s = mapi.Server() s.connect(hostname = hostname, port = int(port), username = username, password = password, language = language, database = database) print("#mclient (python) connected to %s:%d as %s" % (hostname, int(port), username)) fi = sys.stdin prompt = '%s>' % language sys.stdout.write(prompt.encode('utf-8')) line = fi.readline() if encoding != 'utf-8': prompt = str(prompt, 'utf-8').encode(encoding, 'replace') while line and line != "\q\n": if encoding != 'utf-8': line = str(line, encoding).encode('utf-8') res = s.cmd('s' + line) if encoding != 'utf-8': res = str(res, 'utf-8').encode(encoding, 'replace') print(res) sys.stdout.write(prompt) line = fi.readline() s.disconnect() if __name__ == "__main__": main()
<reponame>jdwinters/Meetup-AP-I-pynthonkc<gh_stars>1-10 # -*- coding: utf-8 -*- """ Overview ======== Defines the types and structures returned from the PythonKC Meetup.com API client. Types ===== MeetupEvent ----------- id Meetup.com ID for the event. name Name of the event. description Description of the event. May include markup. time Date & time (``datetime.datetime``) that the event starts at. status Status of the event, e.g., ``upcoming``. venue A ``MeetupVenue`` describing the location of the event. yes_rsvp_count The number of "yes" RSVPs. maybe_rsvp_count The number of "maybe" RSVPs. event_url URL of the Meetup event page. photo_url URL of the event photo. attendees List of ``MeetupMember`` that attended this event if it was in the past. photos List of ``MeetupPhoto`` from the event if it was in the past. MeetupVenue ----------- id Meetup.com ID for the venue. name Name of the venue. address_1 Address of the venue, line 1. address_2 Address of the venue, line 2. address_3 Address of the venue, line 3. city City of the venue. state State or region of the venue. zip Postal-code of the venue. country Country of the venue. lat Geographical latitude. lon Geographical longitude. MeetupMember ------------ id Meetup.com ID for the member. name Full name (first & last) of the member. photo A ``MeetupPhoto`` containing URLs to this member's photo resources. MeetupPhoto ----------- id Meetup.com ID for the photo. url URL of the photo resource. highres_url URL of the high-resolution version of the photo. thumb_url URL of the thumbnail version of the photo. """ from collections import namedtuple MeetupEvent = namedtuple('MeetupEvent', ['id', 'name', 'description', 'time', 'status', 'venue', 'yes_rsvp_count', 'maybe_rsvp_count', 'event_url', 'photo_url', 'attendees', 'photos']) MeetupVenue = namedtuple('MeetupVenue', ['id', 'name', 'address_1', 'address_2', 'address_3', 'city', 'state', 'zip', 'country', 'lat', 'lon']) MeetupMember = namedtuple('MeetupMember', ['id', 'name', 'photo']) MeetupPhoto = namedtuple('MeetupPhoto', ['id', 'url', 'highres_url', 'thumb_url'])
#! /usr/bin/env python3 #coding=utf-8 from Crypto.Hash import MD5 from Crypto.Cipher import AES from Crypto import Random import base64 def get_md5(data): md5 = MD5.new() md5.update(data.encode()) return md5.hexdigest() def get_aeskey(): aeskey = Random.new().read(AES.block_size) return base64.b64encode(aeskey) iv = Random.new().read(AES.block_size) length = 16 #补齐最后一块数据 #如补1个/x01,5个/x05,16个/x0h def utf8len(s): if isinstance(s, bytes): return len(s) return len(s.encode('utf-8')) pad = lambda s: s + (length - utf8len(s) % length) * chr(length - utf8len(s) % length) unpad = lambda s: s[0:-ord(chr(s[-1]))] def encrypt(msg, key): key = base64.b64decode(key) cbc_cipher = AES.new(key, AES.MODE_CBC, IV=iv) cipher_text = iv + cbc_cipher.encrypt(pad(msg)) return base64.b64encode(cipher_text) def decrypt(msg, key): key = base64.b64decode(key) cipher_text = base64.b64decode(msg) cbc_decipher = AES.new(key, AES.MODE_CBC, IV=cipher_text[:length]) decrypt_text = cbc_decipher.decrypt(cipher_text[16:]) return unpad(decrypt_text) if __name__ == "__main__": msg = '123' print(utf8len(u'a')) key = base64.b64decode(get_aeskey()) encrypt_text = encrypt(msg, key) decrypt_text = decrypt(encrypt_text, key) print(len(encrypt_text)) print(decrypt_text.decode('utf-8'))
def _pick_certificate_cb(self, connection): server_name = connection.get_servername() try: key, cert = self.certs[server_name] except KeyError: logger.debug("Server name (%s) not recognized, dropping SSL", server_name) return new_context = SSL.Context(self.method) new_context.set_options(SSL.OP_NO_SSLv2) new_context.set_options(SSL.OP_NO_SSLv3) new_context.use_privatekey(key) new_context.use_certificate(cert) connection.set_context(new_context)
def pad(sequences, sos=None, eos=None, pad_token='<pad>', pad_left=True, reverse=False): if reverse: if eos is not None: sequences = [[eos] + seq for seq in sequences] if sos is not None: sequences = [seq + [sos] for seq in sequences] else: if sos is not None: sequences = [[sos] + seq for seq in sequences] if eos is not None: sequences = [seq + [eos] for seq in sequences] max_len = max(5,max(len(seq) for seq in sequences)) if pad_left: return [ [pad_token]*(max_len-len(seq)) + seq for seq in sequences ] return [ seq + [pad_token]*(max_len-len(seq)) for seq in sequences ]
#include <bits/stdc++.h> using namespace std; #define endl "\n" #define pb push_back #define mp make_pair #define loop(i, start, end) for(auto i=start; i<end; i++) #define TEST int T; cin >> T; while(T--) #define print(var) cout << var << "\n" #define u unsigned #define ONLINE_JUDGE true #define INF LLONG_MAX #define inf INT_MAX #define mod 1000000007 typedef long long int ll; typedef vector<int> vi; int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL); #ifndef ONLINE_JUDGE freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); freopen("error.txt", "w", stderr); #endif TEST { int n, k; cin >> n >> k; int arr[n], mn = inf; loop(i, 0, n) { cin >> arr[i]; mn = min(mn, arr[i]); } int ans = mn + k; int flag = 0; loop(i, 0, n) { if(abs(ans-arr[i])>k) flag = 1; } if(flag) print(-1); else print(ans); } }
<gh_stars>10-100 use super::property::PropertyValue; use crate::generator::Generator; use alloc::string::String; use alloc::vec::Vec; #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct ClipReference { clip_id: u32, } impl ClipReference { pub fn new(clip_id: u32) -> Self { ClipReference { clip_id } } pub fn clip_id(self) -> u32 { self.clip_id } } pub struct ClipPropertyValue { pub value: PropertyValue, pub is_overridden: bool, pub targeted_by: Option<ClipReference>, } pub struct ActiveClip { pub name: String, pub reference: ClipReference, pub track_index: usize, pub clip_index: usize, pub local_time: u32, pub properties: Vec<Vec<ClipPropertyValue>>, } pub trait ActiveClipMap { fn active_clips(&self) -> &[ActiveClip]; fn active_clips_mut(&mut self) -> &mut [ActiveClip]; fn get_clip_index(&self, reference: ClipReference) -> Option<usize>; } pub trait GeneratorClipMap { fn try_get_clip(&self, reference: ClipReference) -> Option<&dyn Generator>; fn try_get_clip_mut(&mut self, reference: ClipReference) -> Option<&mut dyn Generator>; fn get_clip(&self, reference: ClipReference) -> &dyn Generator { self.try_get_clip(reference).unwrap() } fn get_clip_mut(&mut self, reference: ClipReference) -> &mut dyn Generator { self.try_get_clip_mut(reference).unwrap() } }
<filename>sql/query_privileges.py # -*- coding: UTF-8 -*- """ @author: hhyo @license: Apache Licence @file: query_privileges.py @time: 2019/03/24 """ import logging import datetime import re import traceback import simplejson as json from django.contrib.auth.decorators import permission_required from django.db import transaction from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render from django.urls import reverse from django_q.tasks import async_task from common.config import SysConfig from common.utils.const import WorkflowDict from common.utils.extend_json_encoder import ExtendJSONEncoder from sql.engines.goinception import GoInceptionEngine from sql.models import QueryPrivilegesApply, QueryPrivileges, Instance, ResourceGroup from sql.notify import notify_for_audit from sql.utils.resource_group import user_groups, user_instances from sql.utils.workflow_audit import Audit from sql.utils.sql_utils import extract_tables logger = logging.getLogger('default') __author__ = 'hhyo' # TODO 权限校验内的语法解析和判断独立到每个engine内 def query_priv_check(user, instance, db_name, sql_content, limit_num): """ 查询权限校验 :param user: :param instance: :param db_name: :param sql_content: :param limit_num: :return: """ result = {'status': 0, 'msg': 'ok', 'data': {'priv_check': True, 'limit_num': 0}} # 如果有can_query_all_instance, 视为管理员, 仅获取limit值信息 # superuser 拥有全部权限, 不需做特别修改 if user.has_perm('sql.query_all_instances'): priv_limit = int(SysConfig().get('admin_query_limit', 5000)) result['data']['limit_num'] = min(priv_limit, limit_num) if limit_num else priv_limit return result # 如果有can_query_resource_group_instance, 视为资源组管理员, 可查询资源组内所有实例数据 if user.has_perm('sql.query_resource_group_instance'): if user_instances(user, tag_codes=['can_read']).filter(pk=instance.pk).exists(): priv_limit = int(SysConfig().get('admin_query_limit', 5000)) result['data']['limit_num'] = min(priv_limit, limit_num) if limit_num else priv_limit return result # explain和show create跳过权限校验 if re.match(r"^explain|^show\s+create", sql_content, re.I): return result # 仅MySQL做表权限校验 if instance.db_type == 'mysql': try: table_ref = _table_ref(sql_content, instance, db_name) # 循环验证权限,可能存在性能问题,但一次查询涉及的库表数量有限 for table in table_ref: # 既无库权限也无表权限则鉴权失败 if not _db_priv(user, instance, table['schema']) and \ not _tb_priv(user, instance, db_name, table['name']): result['status'] = 1 result['msg'] = f"你无{table['schema']}.{table['name']}表的查询权限!请先到查询权限管理进行申请" return result # 获取查询涉及库/表权限的最小limit限制,和前端传参作对比,取最小值 for table in table_ref: priv_limit = _priv_limit(user, instance, db_name=table['schema'], tb_name=table['name']) limit_num = min(priv_limit, limit_num) if limit_num else priv_limit result['data']['limit_num'] = limit_num except Exception as msg: logger.error(f"无法校验查询语句权限,{instance.instance_name},{sql_content},{traceback.format_exc()}") result['status'] = 1 result['msg'] = f"无法校验查询语句权限,请联系管理员,错误信息:{msg}" # 其他类型实例仅校验库权限 else: # 先获取查询语句涉及的库,redis、mssql特殊处理,仅校验当前选择的库 if instance.db_type in ['redis', 'mssql']: dbs = [db_name] else: dbs = [i['schema'].strip('`') for i in extract_tables(sql_content) if i['schema'] is not None] dbs.append(db_name) # 库去重 dbs = list(set(dbs)) # 排序 dbs.sort() # 校验库权限,无库权限直接返回 for db_name in dbs: if not _db_priv(user, instance, db_name): result['status'] = 1 result['msg'] = f"你无{db_name}数据库的查询权限!请先到查询权限管理进行申请" return result # 有所有库权限则获取最小limit值 for db_name in dbs: priv_limit = _priv_limit(user, instance, db_name=db_name) limit_num = min(priv_limit, limit_num) if limit_num else priv_limit result['data']['limit_num'] = limit_num return result @permission_required('sql.menu_queryapplylist', raise_exception=True) def query_priv_apply_list(request): """ 获取查询权限申请列表 :param request: :return: """ user = request.user limit = int(request.POST.get('limit', 0)) offset = int(request.POST.get('offset', 0)) limit = offset + limit search = request.POST.get('search', '') query_privs = QueryPrivilegesApply.objects.all() # 过滤搜索项,支持模糊搜索标题、用户 if search: query_privs = query_privs.filter(Q(title__icontains=search) | Q(user_display__icontains=search)) # 管理员可以看到全部数据 if user.is_superuser: query_privs = query_privs # 拥有审核权限、可以查看组内所有工单 elif user.has_perm('sql.query_review'): # 先获取用户所在资源组列表 group_list = user_groups(user) group_ids = [group.group_id for group in group_list] query_privs = query_privs.filter(group_id__in=group_ids) # 其他人只能看到自己提交的工单 else: query_privs = query_privs.filter(user_name=user.username) count = query_privs.count() lists = query_privs.order_by('-apply_id')[offset:limit].values( 'apply_id', 'title', 'instance__instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date', 'user_display', 'status', 'create_time', 'group_name' ) # QuerySet 序列化 rows = [row for row in lists] result = {"total": count, "rows": rows} # 返回查询结果 return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True), content_type='application/json') @permission_required('sql.query_applypriv', raise_exception=True) def query_priv_apply(request): """ 申请查询权限 :param request: :return: """ title = request.POST['title'] instance_name = request.POST.get('instance_name') group_name = request.POST.get('group_name') group_id = ResourceGroup.objects.get(group_name=group_name).group_id priv_type = request.POST.get('priv_type') db_name = request.POST.get('db_name') db_list = request.POST.getlist('db_list[]') table_list = request.POST.getlist('table_list[]') valid_date = request.POST.get('valid_date') limit_num = request.POST.get('limit_num') # 获取用户信息 user = request.user # 服务端参数校验 result = {'status': 0, 'msg': 'ok', 'data': []} if int(priv_type) == 1: if not (title and instance_name and db_list and valid_date and limit_num): result['status'] = 1 result['msg'] = '请填写完整' return HttpResponse(json.dumps(result), content_type='application/json') elif int(priv_type) == 2: if not (title and instance_name and db_name and valid_date and table_list and limit_num): result['status'] = 1 result['msg'] = '请填写完整' return HttpResponse(json.dumps(result), content_type='application/json') try: user_instances(request.user, tag_codes=['can_read']).get(instance_name=instance_name) except Instance.DoesNotExist: result['status'] = 1 result['msg'] = '你所在组未关联该实例!' return HttpResponse(json.dumps(result), content_type='application/json') # 库权限 ins = Instance.objects.get(instance_name=instance_name) if int(priv_type) == 1: # 检查申请账号是否已拥库查询权限 for db_name in db_list: if _db_priv(user, ins, db_name): result['status'] = 1 result['msg'] = f'你已拥有{instance_name}实例{db_name}库权限,不能重复申请' return HttpResponse(json.dumps(result), content_type='application/json') # 表权限 elif int(priv_type) == 2: # 先检查是否拥有库权限 if _db_priv(user, ins, db_name): result['status'] = 1 result['msg'] = f'你已拥有{instance_name}实例{db_name}库的全部权限,不能重复申请' return HttpResponse(json.dumps(result), content_type='application/json') # 检查申请账号是否已拥有该表的查询权限 for tb_name in table_list: if _tb_priv(user, ins, db_name, tb_name): result['status'] = 1 result['msg'] = f'你已拥有{instance_name}实例{db_name}.{tb_name}表的查询权限,不能重复申请' return HttpResponse(json.dumps(result), content_type='application/json') # 使用事务保持数据一致性 try: with transaction.atomic(): # 保存申请信息到数据库 applyinfo = QueryPrivilegesApply( title=title, group_id=group_id, group_name=group_name, audit_auth_groups=Audit.settings(group_id, WorkflowDict.workflow_type['query']), user_name=user.username, user_display=user.display, instance=ins, priv_type=int(priv_type), valid_date=valid_date, status=WorkflowDict.workflow_status['audit_wait'], limit_num=limit_num ) if int(priv_type) == 1: applyinfo.db_list = ','.join(db_list) applyinfo.table_list = '' elif int(priv_type) == 2: applyinfo.db_list = db_name applyinfo.table_list = ','.join(table_list) applyinfo.save() apply_id = applyinfo.apply_id # 调用工作流插入审核信息,查询权限申请workflow_type=1 audit_result = Audit.add(WorkflowDict.workflow_type['query'], apply_id) if audit_result['status'] == 0: # 更新业务表审核状态,判断是否插入权限信息 _query_apply_audit_call_back(apply_id, audit_result['data']['workflow_status']) except Exception as msg: logger.error(traceback.format_exc()) result['status'] = 1 result['msg'] = str(msg) else: result = audit_result # 消息通知 audit_id = Audit.detail_by_workflow_id(workflow_id=apply_id, workflow_type=WorkflowDict.workflow_type['query']).audit_id async_task(notify_for_audit, audit_id=audit_id, timeout=60) return HttpResponse(json.dumps(result), content_type='application/json') @permission_required('sql.menu_queryapplylist', raise_exception=True) def user_query_priv(request): """ 用户的查询权限管理 :param request: :return: """ user = request.user user_display = request.POST.get('user_display', 'all') limit = int(request.POST.get('limit')) offset = int(request.POST.get('offset')) limit = offset + limit search = request.POST.get('search', '') user_query_privs = QueryPrivileges.objects.filter(is_deleted=0, valid_date__gte=datetime.datetime.now()) # 过滤搜索项,支持模糊搜索用户、数据库、表 if search: user_query_privs = user_query_privs.filter(Q(user_display__icontains=search) | Q(db_name__icontains=search) | Q(table_name__icontains=search)) # 过滤用户 if user_display != 'all': user_query_privs = user_query_privs.filter(user_display=user_display) # 管理员可以看到全部数据 if user.is_superuser: user_query_privs = user_query_privs # 拥有管理权限、可以查看组内所有工单 elif user.has_perm('sql.query_mgtpriv'): # 先获取用户所在资源组列表 group_list = user_groups(user) group_ids = [group.group_id for group in group_list] user_query_privs = user_query_privs.filter(instance__queryprivilegesapply__group_id__in=group_ids) # 其他人只能看到自己提交的工单 else: user_query_privs = user_query_privs.filter(user_name=user.username) privileges_count = user_query_privs.distinct().count() privileges_list = user_query_privs.distinct().order_by('-privilege_id')[offset:limit].values( 'privilege_id', 'user_display', 'instance__instance_name', 'db_name', 'priv_type', 'table_name', 'limit_num', 'valid_date' ) # QuerySet 序列化 rows = [row for row in privileges_list] result = {"total": privileges_count, "rows": rows} # 返回查询结果 return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True), content_type='application/json') @permission_required('sql.query_mgtpriv', raise_exception=True) def query_priv_modify(request): """ 变更权限信息 :param request: :return: """ privilege_id = request.POST.get('privilege_id') type = request.POST.get('type') result = {'status': 0, 'msg': 'ok', 'data': []} # type=1删除权限,type=2变更权限 try: privilege = QueryPrivileges.objects.get(privilege_id=int(privilege_id)) except QueryPrivileges.DoesNotExist: result['msg'] = '待操作权限不存在' result['status'] = 1 return HttpResponse(json.dumps(result), content_type='application/json') if int(type) == 1: # 删除权限 privilege.is_deleted = 1 privilege.save(update_fields=['is_deleted']) return HttpResponse(json.dumps(result), content_type='application/json') elif int(type) == 2: # 变更权限 valid_date = request.POST.get('valid_date') limit_num = request.POST.get('limit_num') privilege.valid_date = valid_date privilege.limit_num = limit_num privilege.save(update_fields=['valid_date', 'limit_num']) return HttpResponse(json.dumps(result), content_type='application/json') @permission_required('sql.query_review', raise_exception=True) def query_priv_audit(request): """ 查询权限审核 :param request: :return: """ # 获取用户信息 user = request.user apply_id = int(request.POST['apply_id']) audit_status = int(request.POST['audit_status']) audit_remark = request.POST.get('audit_remark') if audit_remark is None: audit_remark = '' if Audit.can_review(request.user, apply_id, 1) is False: context = {'errMsg': '你无权操作当前工单!'} return render(request, 'error.html', context) # 使用事务保持数据一致性 try: with transaction.atomic(): audit_id = Audit.detail_by_workflow_id(workflow_id=apply_id, workflow_type=WorkflowDict.workflow_type['query']).audit_id # 调用工作流接口审核 audit_result = Audit.audit(audit_id, audit_status, user.username, audit_remark) # 按照审核结果更新业务表审核状态 audit_detail = Audit.detail(audit_id) if audit_detail.workflow_type == WorkflowDict.workflow_type['query']: # 更新业务表审核状态,插入权限信息 _query_apply_audit_call_back(audit_detail.workflow_id, audit_result['data']['workflow_status']) except Exception as msg: logger.error(traceback.format_exc()) context = {'errMsg': msg} return render(request, 'error.html', context) else: # 消息通知 async_task(notify_for_audit, audit_id=audit_id, audit_remark=audit_remark, timeout=60) return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(apply_id,))) def _table_ref(sql_content, instance, db_name): """ 解析语法树,获取语句涉及的表,用于查询权限限制 :param sql_content: :param instance: :param db_name: :return: """ engine = GoInceptionEngine() query_tree = engine.query_print(instance=instance, db_name=db_name, sql=sql_content).get('query_tree') return engine.get_table_ref(json.loads(query_tree), db_name=db_name) def _db_priv(user, instance, db_name): """ 检测用户是否拥有指定库权限 :param user: 用户对象 :param instance: 实例对象 :param db_name: 库名 :return: 权限存在则返回对应权限的limit_num,否则返回False TODO 返回统一为 int 类型, 不存在返回0 (虽然其实在python中 0==False) """ # 获取用户库权限 user_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance=instance, db_name=str(db_name), valid_date__gte=datetime.datetime.now(), is_deleted=0, priv_type=1) if user.is_superuser: return int(SysConfig().get('admin_query_limit', 5000)) else: if user_privileges.exists(): return user_privileges.first().limit_num return False def _tb_priv(user, instance, db_name, tb_name): """ 检测用户是否拥有指定表权限 :param user: 用户对象 :param instance: 实例对象 :param db_name: 库名 :param tb_name: 表名 :return: 权限存在则返回对应权限的limit_num,否则返回False """ # 获取用户表权限 user_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance=instance, db_name=str(db_name), table_name=str(tb_name), valid_date__gte=datetime.datetime.now(), is_deleted=0, priv_type=2) if user.is_superuser: return int(SysConfig().get('admin_query_limit', 5000)) else: if user_privileges.exists(): return user_privileges.first().limit_num return False def _priv_limit(user, instance, db_name, tb_name=None): """ 获取用户拥有的查询权限的最小limit限制,用于返回结果集限制 :param db_name: :param tb_name: 可为空,为空时返回库权限 :return: """ # 获取库表权限limit值 db_limit_num = _db_priv(user, instance, db_name) if tb_name: tb_limit_num = _tb_priv(user, instance, db_name, tb_name) else: tb_limit_num = None # 返回最小值 if db_limit_num and tb_limit_num: return min(db_limit_num, tb_limit_num) elif db_limit_num: return db_limit_num elif tb_limit_num: return tb_limit_num else: raise RuntimeError('用户无任何有效权限!') def _query_apply_audit_call_back(apply_id, workflow_status): """ 查询权限申请用于工作流审核回调 :param apply_id: 申请id :param workflow_status: 审核结果 :return: """ # 更新业务表状态 apply_info = QueryPrivilegesApply.objects.get(apply_id=apply_id) apply_info.status = workflow_status apply_info.save() # 审核通过插入权限信息,批量插入,减少性能消耗 if workflow_status == WorkflowDict.workflow_status['audit_success']: apply_queryset = QueryPrivilegesApply.objects.get(apply_id=apply_id) # 库权限 if apply_queryset.priv_type == 1: insert_list = [QueryPrivileges( user_name=apply_queryset.user_name, user_display=apply_queryset.user_display, instance=apply_queryset.instance, db_name=db_name, table_name=apply_queryset.table_list, valid_date=apply_queryset.valid_date, limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for db_name in apply_queryset.db_list.split(',')] # 表权限 elif apply_queryset.priv_type == 2: insert_list = [QueryPrivileges( user_name=apply_queryset.user_name, user_display=apply_queryset.user_display, instance=apply_queryset.instance, db_name=apply_queryset.db_list, table_name=table_name, valid_date=apply_queryset.valid_date, limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for table_name in apply_queryset.table_list.split(',')] QueryPrivileges.objects.bulk_create(insert_list)
export const BLACK = { 100: '#F5F5F5', 200: '#EEEEEE', 300: '#E0E0E0', 400: '#BDBDBD', 500: '#9E9E9E', 600: '#757575', 700: '#616161', 800: '#424242', 900: '#212121', }; export const GREY = { 50 : '#FAFAFA', 100: '#F5F5F5', 200: '#EEEEEE', 300: '#E0E0E0', 400: '#BDBDBD', 500: '#9E9E9E', 600: '#757575', 700: '#616161', 800: '#424242', 900: '#212121', }; export const BLUE_GREY = { 50 : '#ECEFF1', 100: '#CFD8DC', 200: '#B0BEC5', 300: '#90A4AE', 400: '#78909C', 500: '#607D8B', 600: '#546E7A', 700: '#455A64', 800: '#37474F', 900: '#263238', }; export const RED = { 50 : '#FFEBEE', 100 : '#FFCDD2', 200 : '#EF9A9A', 300 : '#E57373', 400 : '#EF5350', 500 : '#F44336', 600 : '#E53935', 700 : '#D32F2F', 800 : '#C62828', 900 : '#B71C1C', A100: '#FF8A80', A200: '#FF5252', A400: '#FF1744', A700: '#D50000', }; export const PINK = { '50' : '#FCE4EC', '100' : '#F8BBD0', '200' : '#F48FB1', '300' : '#F06292', '400' : '#EC407A', '500' : '#E91E63', '600' : '#D81B60', '700' : '#C2185B', '800' : '#AD1457', '900' : '#880E4F', 'A100': '#FF80AB', 'A200': '#FF4081', 'A400': '#F50057', 'A700': '#C51162', }; export const VIOLET = { '100' :'#E1BEE7', '200' :'#CE93D8', '300' :'#BA68C8', '400' :'#AB47BC', '500' :'#9C27B0', '600' :'#8E24AA', '700' :'#7B1FA2', '800' :'#6A1B9A', '900' :'#4A148C', 'A700' :'#AA00FF', }; export const PURPLE = { '25' :'#f1edf7', '50' :'#EDE7F6', '100' :'#D1C4E9', '200' :'#B39DDB', '300' :'#9575CD', '400' :'#7E57C2', '500' :'#673AB7', '600' :'#5E35B1', '700' :'#512DA8', '800' :'#4527A0', '900' :'#311B92', '1000' :'#190b5b', '1100' :'#0f0442', '1200' :'#0e0333', '1300' :'#090126', 'A100' :'#B388FF', 'A200' :'#7C4DFF', 'A400' :'#651FFF', 'A700' :'#6200EA', }; export const INDIGO = { '50' : '#E8EAF6', '100' : '#C5CAE9', '200' : '#9FA8DA', '300' : '#7986CB', '400' : '#5C6BC0', '500' : '#3F51B5', '600' : '#3949AB', '700' : '#303F9F', '800' : '#283593', '900' : '#1A237E', '1000': '#11165e', '2000': '#0b0f47', '3000': '#070b36', 'A100': '#8C9EFF', 'A200': '#536DFE', 'A400': '#3D5AFE', 'A700': '#304FFE', }; export const BLUE = { '50' : '#E3F2FD', '100' : '#BBDEFB', '200' : '#90CAF9', '300' : '#64B5F6', '400' : '#42A5F5', '500' : '#2196F3', '600' : '#1E88E5', '700' : '#1976D2', '800' : '#1565C0', '900' : '#0D47A1', '1000': '#093270', '1100': '#031f4a', '1200': '#001029', 'A100': '#82B1FF', 'A200': '#448AFF', 'A400': '#2979FF', 'A700': '#2962FF', }; export const GREEN = { 50 : '#E8F5E9', 100 : '#C8E6C9', 200 : '#A5D6A7', 300 : '#81C784', 400 : '#66BB6A', 500 : '#4CAF50', 600 : '#43A047', 700 : '#388E3C', 800 : '#2E7D32', 900 : '#1B5E20', A100: '#B9F6CA', A200: '#69F0AE', A400: '#00E676', A700: '#00C853', }; export const LIGHT_GREEN = { '50' : '#F1F8E9', '100' : '#DCEDC8', '200' : '#C5E1A5', '300' : '#AED581', '400' : '#9CCC65', '500' : '#8BC34A', '600' : '#7CB342', '700' : '#689F38', '800' : '#558B2F', '900' : '#33691E', 'A100': '#CCFF90', 'A200': '#B2FF59', 'A400': '#76FF03', 'A700': '#64DD17', }; export const YELLOW = { '50' : '#FFFDE7', '100' : '#FFF9C4', '200' : '#FFF59D', '300' : '#FFF176', '400' : '#FFEE58', '500' : '#FFEB3B', '600' : '#FDD835', '700' : '#FBC02D', '800' : '#F9A825', '900' : '#F57F17', 'A100': '#FFFF8D', 'A200': '#FFFF00', 'A400': '#FFEA00', 'A700': '#FFD600', }; export const AMBER = { '50' : '#FFF8E1', '100' : '#FFECB3', '200' : '#FFE082', '300' : '#FFD54F', '400' : '#FFCA28', '500' : '#FFC107', '600' : '#FFB300', '700' : '#FFA000', '800' : '#FF8F00', '900' : '#FF6F00', 'A100': '#FFE57F', 'A200': '#FFD740', 'A400': '#FFC400', 'A700': '#FFAB00', }; export const ORANGE = { '50' : '#FFF3E0', '100' : '#FFE0B2', '200' : '#FFCC80', '300' : '#FFB74D', '400' : '#FFA726', '500' : '#FF9800', '600' : '#FB8C00', '700' : '#F57C00', '800' : '#EF6C00', '900' : '#E65100', '1000': '#a63a00', 'A100': '#FFD180', 'A200': '#FFAB40', 'A400': '#FF9100', 'A700': '#FF6D00', };
package com.skanderj.gingerbread3.util; import java.awt.Color; import java.io.File; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Random; /** * Helper class for basically anything that doesn't fit somewhere else. * * @author Skander * */ public final class Utilities { public static final String NULL_STRING = null, EMPTY_STRING = new StringBuilder("").toString(); private static final Random random = new Random(); private Utilities() { return; } public static OperatingSystem getOperatingSystem() { final String osName = System.getProperty("os.name"); if (osName.toLowerCase().contains("win")) { return OperatingSystem.WINDOWS; } else if (osName.toLowerCase().contains("unix")) { return OperatingSystem.UNIX; } else if (osName.toLowerCase().contains("linux")) { return OperatingSystem.LINUX; } else if (osName.toLowerCase().contains("mac")) { return OperatingSystem.MACOS; } else { return OperatingSystem.OTHER; } } public static void disableHiDPI() { System.setProperty("sun.java2d.uiScale", "1.0"); } public static int framesToMS(int frames, double refreshRate) { // 1000ms = refreshRate // x ms = frames int ms = (int) (1000 * frames / refreshRate); return ms; } /** * Self explanatory. */ @SafeVarargs public static final <T> T[] createArray(final T... ts) { return ts; } /** * Self explanatory. */ public static boolean fileExistsInDirectory(final String directoryPath, final String fileName) { final File directory = new File(directoryPath); if (directory.exists() && directory.isDirectory()) { for (final File fileIn : directory.listFiles()) { if (fileIn.isFile()) { if (fileIn.getName().equals(fileName)) { return true; } } } } return false; } /** * Self explanatory. */ public static String fileNameCompatibleDateString() { return new SimpleDateFormat("MM-dd-YYYY_hh-mm-ss").format(new Date()); } /** * Returns a random integer between a and b - included. Why doesn't Java have * this???? */ public static int randomInteger(final int a, final int b) { final int min = Math.min(a, b); final int max = Math.max(a, b); return Utilities.random.nextInt(Math.abs(min < 0 ? 2 * max : max) + 1) + ((min < 0 ? 1 : -1) * min); } /** * Self explanatory. */ public static double randomDouble(final double a, final double b) { final double d = Utilities.random.nextDouble(); return a + ((b - a) * d); } /** * Self explanatory. */ public static Color buildAgainst(final Color color, final int alpha) { return new Color(color.getRed(), color.getGreen(), color.getBlue(), alpha); } /** * Returns a random color. If useAlpha, color will have a random transparency. */ public static Color randomColor(final boolean useAlpha) { return new Color(Utilities.random.nextInt(255), Utilities.random.nextInt(255), Utilities.random.nextInt(255), useAlpha ? Utilities.random.nextInt(255) : 255); } /** * I invite you to read the p5js documentation for this beautiful function. */ public static double map(final double value, final double valueMin, final double valueMax, final double targetMin, final double targetMax, final boolean withinBounds) { final double newval = (((value - valueMin) / (valueMax - valueMin)) * (targetMax - targetMin)) + targetMin; if (!withinBounds) { return newval; } if (targetMin < targetMax) { return Utilities.constraint(newval, targetMin, targetMax); } else { return Utilities.constraint(newval, targetMax, targetMin); } } /** * Used for {@link Utilities}{@link #map(float, float, float, float, float, * boolean))} */ private static double constraint(final double value, final double minimum, final double maximum) { return Math.max(Math.min(value, maximum), minimum); } }
// Handles running appropriate error callbacks. void OnError(const ShillClientHelper::ErrorCallback& error_callback, dbus::ErrorResponse* response) { std::string error_name; std::string error_message; if (response) { dbus::MessageReader reader(response); error_name = response->GetErrorName(); reader.PopString(&error_message); } error_callback.Run(error_name, error_message); }
package kg.apc.jmeter.vizualizers; import org.apache.jmeter.reporters.ResultCollector; import org.apache.jmeter.samplers.SampleSaveConfiguration; import org.apache.jorphan.logging.LoggingManager; import org.apache.log.Logger; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * * @author undera */ public class CorrectedResultCollector extends ResultCollector { private static final Logger log = LoggingManager.getLoggerForClass(); public static final String INCLUDE_SAMPLE_LABELS = "include_sample_labels"; public static final String EXCLUDE_SAMPLE_LABELS = "exclude_sample_labels"; private static final String COMMA = ","; @Override public void testStarted() { setupSaving(); super.testStarted(); } @Override public void testStarted(String host) { setupSaving(); super.testStarted(host); } private void setupSaving() { SampleSaveConfiguration conf = getSaveConfig(); // please, save the threads... it's so important, but disabled by default conf.setThreadCounts(true); } public List<String> getList(String prop) { String s = getPropertyAsString(prop); if (s.isEmpty()) { return new ArrayList<String>(0); } else { return Arrays.asList(s.split(COMMA)); } } public void setExcludeLabels(String excludeLabels) { setProperty(EXCLUDE_SAMPLE_LABELS, excludeLabels); } public void setIncludeLabels(String labels) { setProperty(CorrectedResultCollector.INCLUDE_SAMPLE_LABELS, labels); } }
/** * This method checks the validity of the location format * * @param latitude * it must be a double * @param longitude * it must be a double * @return true if they are both doubles, false, otherwise */ public boolean correctFormatLocation(String latitude, String longitude) { try { Double.parseDouble(latitude); Double.parseDouble(longitude); return true; } catch (Exception e) { return false; } }
Image copyright Getty Images A row has broken out over advice given to police in England and Wales telling them not to stop and search people only because they smell of cannabis. It was first given to police last year and was reiterated by an Inspectorate of Constabulary report on Tuesday. The advice says officers should look at other factors like behaviour as well. But some officers, including the chief constable of Merseyside Police, said they disagreed. The College of Policing said it plans to review the guidance. Police officers can use stop-and-search powers if they have "reasonable grounds" to suspect someone is carrying items such as drugs, weapons or stolen property. Last year, they were given new guidance by the College of Policing that the smell of cannabis on its own would not normally justify stopping and searching someone or their vehicle. But the Inspectorate of Constabulary said many officers were unaware of the guidance and it is now urging forces to encourage officers to not rely on a smell alone. However, Chief Constable Andy Cooke, of Merseyside Police, said he would not be giving that advice to his teams. He tweeted: "I disagree. The guidance in my view is wrong and the law does not preclude it. "Smell of cannabis is sufficient to stop search and I will continue to encourage my officers to use it particularly on those criminals who are engaged in serious and organised crime." Matt Locke, of Northumbria Police, described the guidance as "inconsistent", adding that it was "a bit of a dog's dinner". Another police officer, from North Yorkshire Police, tweeted: "If I smell cannabis on someone or coming from a vehicle then I'll conduct a search. I don't think there's a cop in this land that wouldn't. "Recently not only had that led to me seizing quantities of cannabis, but also arresting drivers showing with it in their system." Skip Twitter post by @Josh_B456 Sorry but even as a trainer I'm confused 😐 "strong smell of cannabis" would give me reasonable grounds to suspect a person is in possession of cannabis?! I can't see how a "strong smell" would be from clothing alone? A faint smell is a bit different! — Josh (@Josh_B456) December 12, 2017 Report Mike Cunningham, HM Inspector of Constabulary, responded to questions on social media about the guidance by saying the smell of cannabis "can be reasonable grounds" to search but it will be "for the officer to explain". He added that the advice "encourages multiple grounds" to merit a stop and search. The row came after the Inspectorate of Constabulary analysed more than 8,500 stop and search records and found almost 600 were conducted solely because police could smell cannabis. Searches based on other grounds, such as the suspect's behaviour, result in more arrests, the report said. Analysis: By Danny Shaw, BBC home affairs correspondent At the heart of this row is an important question: are too many people being needlessly stopped and searched for drugs? The Inspectorate report drops a heavy hint that they are. It says police carried out 3,698 searches, 43% of the sample, because officers believed a suspect had drugs on them for their own use, even though drug possession offences may not be "priority crimes". The watchdog is concerned about this, firstly, because drug possession searches are not necessarily the best use of police time; and secondly, because they appear to affect ethnic minority groups disproportionately. That's one of the key reasons why the Inspectorate has reinforced the College of Policing guidance on stop and searches, including the advice about smelling cannabis - even though it's caused a stink. Skip Twitter post by @CollegeofPolice No one likes a stink! We’ve plans to review the guidance on whether the smell of cannabis on its own is sufficient grounds for a stop search. We encourage multiple grounds for a stop search but officers can, and do, use their judgement. https://t.co/FzLCXtVEFi — College of Policing (@CollegeofPolice) December 12, 2017 Report The report said it was "troubling" that black people were eight times more likely to be stopped than white people. At the same time, black people were less likely to have illegal substances found on them than white people. The National Police Chiefs' Council said it was looking at why young black men were disproportionately stopped. The NPCC said stop-and-search powers were important "with rising knife and gun crime", as well as being a deterrent for people considering carrying out acid attacks.
<filename>zairachem/descriptors/baseline.py import numpy as np from rdkit import Chem from rdkit.Chem import rdMolDescriptors as rd from ersilia import ErsiliaModel from tqdm import tqdm RADIUS = 3 NBITS = 2048 DTYPE = np.int8 def clip_sparse(vect, nbits): l = [0] * nbits for i, v in vect.GetNonzeroElements().items(): l[i] = v if v < 255 else 255 return l class _Fingerprinter(object): def __init__(self): self.nbits = NBITS self.radius = RADIUS def calc(self, mol): v = rd.GetHashedMorganFingerprint(mol, radius=self.radius, nBits=self.nbits) return clip_sparse(v, self.nbits) class Fingerprinter(object): def __init__(self): self.fingerprinter = _Fingerprinter() def calculate(self, smiles_list): X = np.zeros((len(smiles_list), NBITS), np.uint8) for i, smi in tqdm(enumerate(smiles_list)): mol = Chem.MolFromSmiles(smi) if mol is None: continue X[i, :] = self.fingerprinter.calc(mol) return X class Embedder(object): def __init__(self): self.dim = 5000 self.model = "grover-embedding" def calculate(self, smiles_list, output_h5=None): if output_h5 is None: with ErsiliaModel(self.model) as mdl: X = mdl.api(api_name=None, input=smiles_list, output="numpy") return X else: with ErsiliaModel(self.model) as mdl: mdl.api(api_name=None, input=smiles_list, output=output_h5)
def sensitivity(c, n): sp = c.startpoints(n) if n in sp: return 1 sen = len(sp) s = sensitivity_transform(c, n) vs = int_to_bin(sen, clog2(len(sp)), True) while not sat(s, {f"sen_out_{i}": v for i, v in enumerate(vs)}): sen -= 1 vs = int_to_bin(sen, clog2(len(sp)), True) return sen
Digitizing China: The political economy of China’s digital switchover documentation and in the public perception, news professionalism itself is a dynamic contingency, not a static doctrine. In fact, the academic dialogue about advocacy journalism and citizen journalism exemplifies how news professionalism could be compatible with the social concern of marginalized groups. Such discussion could provide a strong justification for activists and even journalists to modify and challenge the sacred myths of objectivity and balanced reporting in the process of news gathering and presentation. Coupled with the rapid proliferation of social media and online communication, the above alternative attributes of news professionalism are gaining a foothold in both social and journalistic practices. In Hong Kong, we have witnessed a series of social movements engineered by young activists in recent years. Protests, rallies, and other sorts of resistance to cross-border infrastructure projects, the rejection of national education, class boycotts for constitutional reform, the Umbrella Revolution in 2014, which aimed to push China to offer Hong Kong a genuine local election to select the chief executive (i.e., the governing head of Hong Kong), and the localist movement, which defies the Chinese identification of Hong Kong people. Young activists and politicians have been on the frontline in assuming the leadership of the above social movements. At the time of writing this book review, dozens of young candidates were running for seats in the Legislative Council of Hong Kong. Such active involvement in social change and the political establishment should also influence the media representation and cultural imagination of Hong Kong’s youth. In a nutshell, Cheung’s book serves as a solid foundation for scholars and students who engage in academic dialogue about the media’s representation of youth, the media’s politics, and the social formation of Hong Kong. Its intellectual breadth, which encompasses cultural studies, journalism, and media discourses, provides the basis for academic dialogue about the evolution of media power before and after the handover of Hong Kong. This book is also a key reference for academic studies that offer serious reviews of the cultural resistance to a capitalist media market.
Seattle mayor Ed Murray threw a bean bag at a cornhole board. A busker picked tunes on his guitar. A chef from local restaurant Soul Kitchen plated Southern food. Seattle marked the transfer of some of its main public parks to private management in grand fashion. As part of a string of efforts to "clean up" downtown, Murray announced Thursday that the city would turn over management of Westlake and Occidental parks to the Downtown Seattle Association (DSA) as part of a trial contract lasting one year. Support for the partnership spans from enthusiastic, to unsure, to downright opposed. It’s a model used in other American cities, including Manhattan and San Francisco, but not yet tested in Seattle. From this point on, the onus of managing two of Seattle’s most used parks will fall mostly on the DSA, with help from Seattle Parks and Recreation, the Seattle Parks Foundation, Alliance for Pioneer Square and Friends of Waterfront Seattle. The city’s investment will be minimal – a dollar to every ten spent by the DSA, adding up to an estimated $60,000 in taxpayer funding. DSA will take over Westlake immediately, while Occidental will follow later this summer. The aforementioned programming would, in theory, attract more people to the parks and encourage them to stick around, replacing negative activity such as drug dealing with positive activity like, say, cornhole. “By bringing in as much positive energy,” said DSA President Jon Scholes, “…it’s harder to operate as a drug dealer.” This question of cleaning up “public disorder” – as the city calls low-level offenses like drug-use and theft – has been gaining momentum lately. Last December, the Seattle Police Department introduced its Neighborhood Response Team, a patrol force focused specifically on things like urination and general unpleasantness. The Law Enforcement Assisted Diversion (LEAD) program, which aims to rehabilitate rather than arrest low-level offenders, functions parallel to that squad. Quickly thereafter came the “9 and a half block strategy” a new approach to increase law-enforcement and to identify drug dealers in that radisu around Pike Place Market and Westlake Park. That program kicked off with a bang when the SPD and the FBI announced they had arrested nearly 100 suspects after months of undercover work. Timed with the city’s other efforts was the move to ban smoking in all public parks. Murray and other city officials have said the smoking ban is not related to this new partnership or other efforts to deal with public disorder. “The smoking ban comes from a different place,” said Murray. That said, the current legislation to ban smoking got its start when representatives from the DSA, the same organization taking over management, sent a letter to the city urging they act. Additionally, the ordinance as written calls out Westlake and Occidental parks (as well as Victor Steinbrueck Park) as places that will receive extra attention. On at least one occasion, the smoking ban was discussed in a meeting about public disorder. So while it may come from a different place, there is certainly overlap. What does a privately managed public park look like? Bryant Park in Manhattan is held up as the ideal (recent slashing incident aside). A late seventies report about the park wrote: “If you went out and hired the dope dealers, you couldn’t get a more villainous crew to show the urgency of the [present Bryant Park] situation.” But, with the help of the Rockefellers, a private body formed and hired Daniel Biederman to essentially fix the park. He spearheaded an effort to fix paths, create new entrances, host events and add landscaping. Now Bryant Park is teeming. Seattle has brought on Biederman’s firm to work his magic on Westlake and Occidental. “Programming is critical in city spaces that are not scenic wonders,” Biederman wrote in a statement. “The idea is to spread throughout the park this activity.” Also, thanks to 9 ½ blocks, there will be more SPD officers around the clock. Former Councilmember and mayoral candidate Peter Steinbrueck, who now heads Steinbrueck Urban Strategies, said the approach could be very successful, if done right. “I think there are good examples of well managed private/public spaces,” he said, pointing to Bryant Park as well as Union Square Park in San Francisco. Private entities like DSA, he said, are, in a lot of ways, better suited to manage, clean and enforce rules than the city. But, said Steinbrueck, the partnership needs to be well thought out and well vetted. One concern, which he doesn’t think is imminent, is that a private body could use the public space to generate revenue. “Over-programming and over-commercializing a public park in my opinion is not a good thing.” Still, he sees potential. Real Change Director Tim Harris, on the other hand, is not convinced. “These are public spaces and to me it’s very problematic,” he said. “In their view they’re going to make it as comfortable as they can for shoppers and not for people they feel don’t belong. “There’s nothing that needs to be revitalized about these parks. I see everyone sharing space in Occidental Park and everyone seems to pretty much get along. I’m not buying this line that parks need to be revitalized.” This critique of these revitalization efforts – that they unfairly target the homeless -- have been consistent, especially from Harris. While he said he supports and has worked with the DSA in their efforts to clean up drug use, he feels like homeless advocacy organizations like Real Change have been left out of recent discussions. “This is the first I’ve heard about this,” he said. Anticipating such criticism, Vice President of the Metropolitan Improvement District within the DSA Joshua Curtis said, “Let me be clear: These parks will be open to everyone.” He argued that revitalized parks lead to more pathways into social services. “Engaged and active spaces benefit both the poor and the affluent.” Additionally, DSA President Scholes said DSA did hold a roundtable at Plymouth Housing to brief homeless advocates. For Scholes, changing how parks are managed is a response to public demand. “There’s clearly a hunger,” he said. As this partnership is a pilot program, it’s as of yet unclear how far that hunger will take the DSA.
// ExampleClient_GetPKI example using GetPKI() // // See more examples in /examples/ func ExampleClient_GetPKI() { client, err := newTestClient() if err != nil { fmt.Printf("error loading client: %s", err.Error()) return } mockGetPKI(http.StatusOK) var pki *PKI pki, err = client.GetPKI(testServerURL+"id/{alias}@{domain.tld}", testAlias, testDomain) if err != nil { fmt.Printf("error getting pki: " + err.Error()) return } fmt.Printf("found %s handle with pubkey: %s", pki.Handle, pki.PubKey) }
// Add the Word Count column. private String[] metaSegmentHeader(String[] header) { String[] newheader = Arrays.copyOf(header, header.length + 1); newheader[newheader.length - 1] = "Word Count"; return newheader; }
def _report_hypervisor_resource_view(self, resources): free_ram_mb = resources['memory_mb'] - resources['memory_mb_used'] free_disk_gb = resources['local_gb'] - resources['local_gb_used'] vcpus = resources['vcpus'] if vcpus: free_vcpus = vcpus - resources['vcpus_used'] LOG.debug("Hypervisor: free VCPUs: %s" % free_vcpus) else: free_vcpus = 'unknown' LOG.debug("Hypervisor: VCPU information unavailable") if ('pci_passthrough_devices' in resources and resources['pci_passthrough_devices']): LOG.debug("Hypervisor: assignable PCI devices: %s" % resources['pci_passthrough_devices']) pci_devices = resources.get('pci_passthrough_devices') LOG.debug("Hypervisor/Node resource view: " "name=%(node)s " "free_ram=%(free_ram)sMB " "free_disk=%(free_disk)sGB " "free_vcpus=%(free_vcpus)s " "pci_devices=%(pci_devices)s", {'node': self.nodename, 'free_ram': free_ram_mb, 'free_disk': free_disk_gb, 'free_vcpus': free_vcpus, 'pci_devices': pci_devices})
/// Create a new Sound. /// /// Return a `Err` if the number of channels doesn't match the output number of channels. If /// the ouput number of channels is 1, or the number of channels of `source` is 1, `source` /// will be automatic wrapped in a [`ChannelConverter`]. /// /// If the `sample_rate` of `source` mismatch the output `sample_rate`, `source` will be /// wrapped in a [`SampleRateConverter`]. pub fn new_sound<T: SoundSource + Send + 'static>( &self, source: T, ) -> Result<Sound, &'static str> { let sound: Box<dyn SoundSource + Send> = if source.sample_rate() != self.sample_rate { if source.channels() == self.channels { Box::new(SampleRateConverter::new(source, self.sample_rate)) } else if self.channels == 1 { Box::new(ChannelConverter::new( SampleRateConverter::new(source, self.sample_rate), self.channels, )) } else if source.channels() == 1 { Box::new(ChannelConverter::new( SampleRateConverter::new(source, self.sample_rate), self.channels, )) } else { return Err("Number of channels do not match the output, and neither are 1"); } } else if source.channels() == self.channels { Box::new(source) } else if self.channels == 1 { Box::new(ChannelConverter::new(source, self.channels)) } else if source.channels() == 1 { Box::new(ChannelConverter::new(source, self.channels)) } else { return Err("Number of channels do not match the output, and is not 1"); }; let id = self.mixer.lock().unwrap().add_sound(sound); Ok(Sound { mixer: self.mixer.clone(), id, }) }
ST. LOUIS - The Blues held tryouts for the 2012-13 Blue Crew fan entertainment team on Saturday at Scottrade Center. For more than four hours, interested candidates participated in activities that tested their ability to pump up a crowd, think on their feet through improvisation and more. Candidates also went through a series of interviews with a panel of judges that consisted of Blues staff members. Participants were judged based on energy, charisma, confidence, stage presence and overall first impression. Blue Crew Tryouts Nearly 40 candidates participated in off-ice tryouts for the Blue Crew fan entertainment team on Saturday. Some will participate in on-ice tryouts on Sunday. RELATED CONTENT Video: Footage from On-Ice Tryouts at St. Louis Mills Footage from On-Ice Tryouts at St. Louis Mills Photo Gallery: Blue Crew Tryouts Nearly 40 candidates participated in off-ice tryouts for the Blue Crew fan entertainment team on Saturday. Some will participate in on-ice tryouts on Sunday. “The tryouts have been so much fun. I love it, because you meet all the girls,” said Krista Pucci, who currently works as a receptionist and studies Biblical Counseling and Psychology. “I’ve been watching the Blues since I was three years old, and it would mean a lot to me (to be selected). I’ve missed cheerleading since high school, and I love sports and I love promotional work, so all of that ties into this.” For the past month, the Blues have been accepting online applications. The team is looking for energetic, charismatic and outgoing individuals for the 2012-13 season. In addition to cleaning the ice and entertaining fans at games, this year’s Blue Crew will likely play a bigger role in the St. Louis community. Andrew McClure, an anesthesiologist assistant at St. Louis University Hospital, said he tried out because he’s looking for a fun and relaxing job to balance the stress of working in an operating room. Ashley Borre, a waitress and figure skater for nearly 20 years, is interested in part-time work while attending grad school for nursing. “Auditions have gone really well. There’s been a lot of energy, and a lot of creativity,” said Scott Casey, a three-year Blue Crew veteran who is trying out again this year. “It’s been great to just be part of the Blues organization and the history that’s in St. Louis. To be part of the team and the Blue Crew and bring the entertainment to the fans…all that is great.” Some candidates participated in on-ice skating tryouts at St. Louis Mills on Sunday. Added Casey, “It’s a great atmosphere and it’s just fun to be part of the group. The Blue Crew team itself is just a great team to be part of. It’s a big family, and it’s a joy to look forward to.”
/* * ====================================================================== * Copyright (c) Microsoft Open Technologies, Inc. All rights reserved. * Licensed under the MIT License. * See LICENSE.md in the project root for license information. * ====================================================================== */ #ifndef _RMS_LIB_ICONSENTCALLBACK_H_ #define _RMS_LIB_ICONSENTCALLBACK_H_ #include <memory> #include "IConsent.h" #include "ModernAPIExport.h" namespace rmscore { namespace modernapi { using ConsentList = std::vector<std::shared_ptr<IConsent> >; /// <summary> /// Interface for displaying consents. This callback is provided by app // developer to know when and which consent notifications to display to user. /// </summary> class IConsentCallback { public: /// <summary> /// Apps should implement this method and return consents /// </summary> /// <param name="consents">List of Consents</param> /// <returns>Access token</returns> virtual ConsentList Consents(ConsentList& consents) = 0; }; } // namespace modernapi } // namespace rmscore #endif // _RMS_LIB_ICONSENTCALLBACK_H_
#include <stdio.h> int main(void){ long a, b, i; while(scanf("%ld %ld", &a, &b) != EOF){ if(a>b){ long tmp=a; a=b; b=tmp; } long max = 1; for(i=2; i<=a; i++) { if(a%i==0 && b%i==0) max=i; } printf("%ld\n", max); } return 0; }
/** * @brief Constructs a new device buffer of `size` uninitialized bytes * * @throws rmm::bad_alloc If allocation fails. * * @param size Size in bytes to allocate in device memory. * @param stream CUDA stream on which memory may be allocated if the memory * resource supports streams. * @param mr Memory resource to use for the device memory allocation. */ explicit device_buffer(std::size_t size, cuda_stream_view stream, mr::device_memory_resource* mr = mr::get_current_device_resource()) : _stream{stream}, _mr{mr} { allocate_async(size); }
<gh_stars>0 #include "xy_align.h" using namespace frc; void XYalign::test(){ std::shared_ptr<NetworkTable> table = nt::NetworkTableInstance::GetDefault().GetTable("limelight"); degree = ahrs->GetAngle(); double vertical_offset = table->GetNumber("ty", 0.0); double drive_adjustment = vertical_offset; int xp = joy0->GetRawAxis(4); int yp = joy0->GetRawAxis(1); bool target_set = false; horizontal_offset = table->GetNumber("tx", 0.0); std::cout << "Math1: " << horizontal_offset - degree << std::endl; talon_drive_right_enc->Set(ControlMode::PercentOutput, .3 * (yp+xp)); talon_drive_left_enc->Set(ControlMode::PercentOutput, .3 * (xp-yp)); if (table -> GetNumber("tv", 0.0)){ if(target_set == false){ target_offset_angle_horizontal = table->GetNumber("tx",0.0); target_set = true; ahrs->ZeroYaw(); } if (horizontal_offset - degree < 0.1 and horizontal_offset - degree > -0.1) { Align = true; std::cout << "Aligned" << std::endl; //talon_drive_right_enc->Set(ControlMode::PercentOutput, .3 * (yp+xp)); //talon_drive_left_enc->Set(ControlMode::PercentOutput, .3 * (xp-yp)); } else if (horizontal_offset -degree > 0.20 /*and Align == false*/) { //left_drive= HorizontalOffset/ 45; std::cout << "Going left" << std::endl; left_drive = (horizontal_offset -degree) * -0.05 ; right_drive = 0; } else if (horizontal_offset - degree < -.20 /*and Align == false*/){ //right_drive = HorizontalOffset/ 45; std::cout << "Going right" << std::endl; right_drive = (horizontal_offset -degree) * -0.05; left_drive = 0; } else { right_drive = 0; left_drive = 0; } } if (joy0->GetRawButton(2)==1){ Align = false; } if (joy0->GetRawButton(1)==1){ target_set = false; } std::cout<<horizontal_offset<<std::endl; std::cout<< "Right: "<<right_drive<<std::endl; std::cout<< "Left: "<<left_drive<<std::endl; if(joy0->GetRawButton(3)==1){ talon_drive_right_enc->Set(ControlMode::PercentOutput, (.15 * - right_drive)); talon_drive_left_enc->Set(ControlMode::PercentOutput, (-.15 * - left_drive)); } std::cout<<drive_adjustment<<std::endl; std::cout<<"Target: "<<target_set<<std::endl; }
/** REST web services for creating and modifying related entities. */ @Path(ApiPaths.API_RESOURCE + "/" + ApiPaths.RELATED_ENTITIES) @Api(value = SwaggerInterface.TAG_RESOURCES, authorizations = {@Authorization(value = SwaggerInterface.BASIC_AUTH), @Authorization(value = SwaggerInterface.API_KEY_AUTH)}) public class PutRelatedEntities { /** Logger for this class. */ private Logger logger = LoggerFactory.getLogger( MethodHandles.lookup().lookupClass()); /** Create a new related entity. * @param request The HTTP request. * @param uriInfo The UriInfo of the request. * @param profile The caller's security profile. * @param newRelatedEntity The related entity to be created. * @return The new related entity, in either XML or JSON format. */ @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Pac4JSecurity @POST @ApiOperation(value = "Create a new related entity.", code = HttpStatus.SC_CREATED, responseHeaders = { @ResponseHeader(name = "Location", description = "URL of the newly-created " + "related entity", response = URL.class) }, response = RelatedEntity.class) @ApiResponses(value = { @ApiResponse(code = HttpStatus.SC_BAD_REQUEST, message = "Invalid input", response = ErrorResult.class), @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = "Not authenticated", response = ErrorResult.class, responseHeaders = { @ResponseHeader(name = "WWW-Authenticate", response = String.class) }), @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = "Not authenticated, or not authorized", response = ErrorResult.class), @ApiResponse(code = HttpStatus.SC_CONFLICT, message = "Duplicate related entity. See response " + "constraintViolations for conflicting entities.", response = ErrorResult.class) }) public Response createRelatedEntity( @Context final HttpServletRequest request, @Context final UriInfo uriInfo, @ApiParam(hidden = true) @Pac4JProfile final CommonProfile profile, @ApiParam(value = "The related entity to be added.", required = true) @NotNull(message = "The related entity must not be null") @CheckRelatedEntity final RelatedEntity newRelatedEntity) { logger.debug("called createRelatedEntity"); if (!AuthUtils.ownerIsAuthorizedByOrganisationOrUsername(profile, newRelatedEntity.getOwner())) { Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_CREATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "authorization", Analytics.TITLE_FIELD, newRelatedEntity.getTitle(), Analytics.OWNER_FIELD, newRelatedEntity.getOwner()); return ResponseUtils.generateForbiddenResponseForOwner(); } String username = profile.getUsername(); // All of the mappers we will need. RelatedEntityRegistrySchemaMapper reToDbMapper = RelatedEntityRegistrySchemaMapper.INSTANCE; RelatedEntityIdentifierRegistrySchemaMapper reiToDbMapper = RelatedEntityIdentifierRegistrySchemaMapper.INSTANCE; RelatedEntityDbSchemaMapper reFromDbMapper = RelatedEntityDbSchemaMapper.INSTANCE; RelatedEntityIdentifierDbSchemaMapper reiFromDbMapper = RelatedEntityIdentifierDbSchemaMapper.INSTANCE; au.org.ands.vocabs.registry.db.entity.RelatedEntity newDbRelatedEntity = reToDbMapper.sourceToTarget(newRelatedEntity); // Check for duplicates. // NB: we don't need to check for duplicate identifiers _within_ the // request body; that is handled by @CheckRelatedEntity. List<ValidationError> validationErrors = new ArrayList<>(); // Check REs with matching top-level properties. for (au.org.ands.vocabs.registry.db.entity.RelatedEntity re : RelatedEntityDAO.getMatchingRelatedEntities( newDbRelatedEntity)) { ValidationError ve = new ValidationError(); ve.setMessage("Related entity with matching details; path " + "has the ID of the related entity"); ve.setPath(re.getRelatedEntityId().toString()); validationErrors.add(ve); } // Check for existing related entities with a matching identifier. for (RelatedEntityIdentifier rei : newRelatedEntity.getRelatedEntityIdentifier()) { for (au.org.ands.vocabs.registry.db.entity.RelatedEntity re : RelatedEntityDAO. getRelatedEntitiesWithMatchingIdentifier( newRelatedEntity.getOwner(), rei.getIdentifierType(), rei.getIdentifierValue())) { ValidationError ve = new ValidationError(); ve.setMessage("Related entity with matching identifier; path " + "has the ID of the related entity"); ve.setPath(re.getRelatedEntityId().toString()); validationErrors.add(ve); } } if (!validationErrors.isEmpty()) { logger.info("Attempt to create a new related entity or identifier " + "that would be a duplicate of an existing one"); Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_CREATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "duplicate", Analytics.TITLE_FIELD, newRelatedEntity.getTitle(), Analytics.OWNER_FIELD, newRelatedEntity.getOwner()); ErrorResult errorResult = new ErrorResult("Duplicate related entity or identifier"); errorResult.setConstraintViolation(validationErrors); return Response.status(Status.CONFLICT).entity(errorResult).build(); } // No notion of drafts for related entities: // always use current date/time as start date. LocalDateTime now = TemporalUtils.nowUTC(); newDbRelatedEntity.setStartDate(now); newDbRelatedEntity.setEndDate( TemporalConstants.CURRENTLY_VALID_END_DATE); newDbRelatedEntity.setModifiedBy(username); // Persist the related entity. RelatedEntityDAO.saveRelatedEntityWithId(newDbRelatedEntity); // Now translate back into registry schema. RelatedEntity reReturned = reFromDbMapper.sourceToTarget( newDbRelatedEntity); List<RelatedEntityIdentifier> reiReturned = reReturned.getRelatedEntityIdentifier(); // Extract and persist the identifiers. for (RelatedEntityIdentifier rei : newRelatedEntity.getRelatedEntityIdentifier()) { au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier newDbREI = reiToDbMapper.sourceToTarget(rei); // Must manually point it back to the parent entity. newDbREI.setRelatedEntityId( newDbRelatedEntity.getRelatedEntityId()); newDbREI.setStartDate(now); newDbREI.setEndDate(TemporalConstants.CURRENTLY_VALID_END_DATE); newDbREI.setModifiedBy(username); // Persist the identifier. RelatedEntityIdentifierDAO.saveRelatedEntityIdentifierWithId( newDbREI); reiReturned.add(reiFromDbMapper.sourceToTarget(newDbREI)); } // This is a registry event; log it. RegistryEvent re = new RegistryEvent(); re.setElementType(RegistryEventElementType.RELATED_ENTITIES); re.setElementId(newDbRelatedEntity.getRelatedEntityId()); // NB: newDbVocab.getStartDate() would not be correct for a draft! re.setEventDate(now); re.setEventType(RegistryEventEventType.CREATED); re.setEventUser(profile.getUsername()); // To be done: put something sensible in the details. re.setEventDetails(""); RegistryEventDAO.saveRegistryEvent(re); // Analytics logging. Logging.logRequest(true, request, uriInfo, profile, Analytics.EVENT_CREATE_RELATED_ENTITY, Analytics.ID_FIELD, newDbRelatedEntity.getRelatedEntityId(), Analytics.TITLE_FIELD, newDbRelatedEntity.getTitle(), Analytics.OWNER_FIELD, newDbRelatedEntity.getOwner()); return Response.created(EntityPaths.getURIOfEntity(newDbRelatedEntity)). entity(reReturned).build(); } /** Update a related entity. * @param request The HTTP request. * @param uriInfo The UriInfo of the request. * @param profile The caller's security profile. * @param relatedEntityId The ID of the related entity to be updated. * @param updatedRelatedEntity The new value of the related entity. * @return The updated related entity, in either XML or JSON format. */ @SuppressWarnings("checkstyle:MethodLength") @Path(ApiPaths.RELATED_ENTITY_ID) @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Pac4JSecurity @PUT @ApiOperation(value = "Update a related entity.", response = RelatedEntity.class) @ApiResponses(value = { @ApiResponse(code = HttpStatus.SC_BAD_REQUEST, message = "Invalid input", response = ErrorResult.class), @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = "Not authenticated", response = ErrorResult.class, responseHeaders = { @ResponseHeader(name = "WWW-Authenticate", response = String.class) }), @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = "Not authenticated, or not authorized", response = ErrorResult.class), @ApiResponse(code = HttpStatus.SC_CONFLICT, message = "Duplicate related entity. See response " + "constraintViolations for conflicting entities.", response = ErrorResult.class), @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = "Internal server error", response = ErrorResult.class) }) public Response updateRelatedEntity( @Context final HttpServletRequest request, @Context final UriInfo uriInfo, @ApiParam(hidden = true) @Pac4JProfile final CommonProfile profile, @ApiParam(value = "The ID of the related entity to be updated.") @PathParam("relatedEntityId") final Integer relatedEntityId, @ApiParam(value = "The new value of the related entity.", required = true) @NotNull(message = "The related entity must not be null.") @CheckRelatedEntity(mode = ValidationMode.UPDATE) final RelatedEntity updatedRelatedEntity) { logger.debug("called updateRelatedEntity"); // Keep track of whether we actually do any database changes. // We only add a registry event if we changed something. boolean reWasUpdated = false; // We manage our own transaction. Because we do merges, it is // at least polite to include the initial queries within the same // transaction. // TAKE NOTE: All DAO methods used below are the variants that take an // EntityManager as the first parameter. // (Historical note: I moved the beginning of the // transaction up front, and introduced the variant DAO methods that // take an EntityManager parameter, in order to try to fix an exception // that was occurring during database updates. I subsequently realised // that the exception's underlying cause was completely unrelated: // there was an error in RelatedEntityIdentifierRegistrySchemaMapper // to do with the mapping of ids. Having done the work, I decided // to leave it.) EntityManager em = null; EntityTransaction txn = null; try { em = DBContext.getEntityManager(); txn = em.getTransaction(); txn.begin(); au.org.ands.vocabs.registry.db.entity.RelatedEntity existingDbRelatedEntity = RelatedEntityDAO.getCurrentRelatedEntityByRelatedEntityId( em, relatedEntityId); if (existingDbRelatedEntity == null) { // Possible future work: distinguish between: // (a) never existed: HTTP status code 404 // (b) there is historical data: HTTP status code 410 Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_UPDATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "No related entity with that id", Analytics.ID_FIELD, relatedEntityId, Analytics.TITLE_FIELD, updatedRelatedEntity.getTitle(), Analytics.OWNER_FIELD, updatedRelatedEntity.getOwner()); return Response.status(Status.NOT_FOUND).entity( new ErrorResult("No related entity with that id")). build(); } if (!AuthUtils.ownerIsAuthorizedByOrganisationOrUsername(profile, existingDbRelatedEntity.getOwner())) { Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_UPDATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "authorization", Analytics.ID_FIELD, relatedEntityId, Analytics.TITLE_FIELD, updatedRelatedEntity.getTitle(), Analytics.OWNER_FIELD, updatedRelatedEntity.getOwner()); return ResponseUtils.generateForbiddenResponseForOwner(); } List<ValidationError> validationErrors = new ArrayList<>(); // Check that the ID has not been altered. if (updatedRelatedEntity.getId().intValue() != relatedEntityId) { ValidationError ve = new ValidationError(); ve.setMessage("Related entity ID in request body does not " + "match the value of the path parameter"); ve.setPath("id"); validationErrors.add(ve); } if (!existingDbRelatedEntity.getOwner().equals( updatedRelatedEntity.getOwner())) { // Owner has changed. Need to check that the user still has // permission to be the owner. if (!AuthUtils.ownerIsAuthorizedByOrganisationOrUsername( profile, updatedRelatedEntity.getOwner())) { Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_UPDATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "authorization", Analytics.ID_FIELD, relatedEntityId, Analytics.TITLE_FIELD, updatedRelatedEntity.getTitle(), Analytics.OWNER_FIELD, updatedRelatedEntity.getOwner()); return ResponseUtils.generateForbiddenResponseForOwner(); } } String username = profile.getUsername(); // All of the mappers we will need. RelatedEntityRegistrySchemaMapper reToDbMapper = RelatedEntityRegistrySchemaMapper.INSTANCE; RelatedEntityIdentifierRegistrySchemaMapper reiToDbMapper = RelatedEntityIdentifierRegistrySchemaMapper.INSTANCE; RelatedEntityDbSchemaMapper reFromDbMapper = RelatedEntityDbSchemaMapper.INSTANCE; RelatedEntityIdentifierDbSchemaMapper reiFromDbMapper = RelatedEntityIdentifierDbSchemaMapper.INSTANCE; au.org.ands.vocabs.registry.db.entity.RelatedEntity updatedDbRelatedEntity = reToDbMapper.sourceToTarget( updatedRelatedEntity); // Did the top-level data stay the same? boolean sameTopLevel = ComparisonUtils.isEqualRelatedEntity( existingDbRelatedEntity, updatedDbRelatedEntity); // Check for duplicates. // NB: we don't need to check for duplicate identifiers _within_ the // request body; that is handled by @CheckRelatedEntity. // Check REs with matching top-level properties. for (au.org.ands.vocabs.registry.db.entity.RelatedEntity re : RelatedEntityDAO.getMatchingRelatedEntities( em, updatedDbRelatedEntity)) { // The following conditional is a difference from // createRelatedEntity(): since this RE is already in the // database, it has to be eliminated from the matching process! if (!re.getRelatedEntityId().equals(relatedEntityId)) { ValidationError ve = new ValidationError(); ve.setMessage("Related entity with matching details; path " + "has the ID of the related entity"); ve.setPath(re.getRelatedEntityId().toString()); validationErrors.add(ve); } } // Check for existing related entities with a matching identifier. for (RelatedEntityIdentifier rei : updatedRelatedEntity.getRelatedEntityIdentifier()) { for (au.org.ands.vocabs.registry.db.entity.RelatedEntity re : RelatedEntityDAO. getRelatedEntitiesWithMatchingIdentifier(em, updatedRelatedEntity.getOwner(), rei.getIdentifierType(), rei.getIdentifierValue())) { // The following conditional is a difference from // createRelatedEntity(): since this RE is already in the // database, it has to be eliminated from the matching // process! if (!re.getRelatedEntityId().equals(relatedEntityId)) { ValidationError ve = new ValidationError(); ve.setMessage("Related entity with matching " + "identifier; path has the ID of the " + "related entity"); ve.setPath(re.getRelatedEntityId().toString()); validationErrors.add(ve); } } } // Analyse the identifiers. // First, get the existing identifiers and put them into a Map. List<au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier> existingDbREIs = RelatedEntityIdentifierDAO. getCurrentRelatedEntityIdentifierListForRelatedEntity(em, relatedEntityId); Map<Integer, au.org.ands.vocabs.registry.db.entity. RelatedEntityIdentifier> existingDbREIMap = new HashMap<>(); // deletedIdentifiers will store the IDs of the identifiers // that the client is requesting to be deleted. It is a set that // is initialized with the IDs of the existing identifiers. // Then, IDs will be _removed_ from the set as we subsequently // process the list of identifiers included in the request body. Set<Integer> deletedIdentifiers = new HashSet<>(); for (au.org.ands.vocabs.registry.db.entity. RelatedEntityIdentifier rei : existingDbREIs) { existingDbREIMap.put(rei.getRelatedEntityIdentifierId(), rei); deletedIdentifiers.add(rei.getRelatedEntityIdentifierId()); } // Now, go through the identifiers in the request body and work out // which ones are being added, updated, and deleted, and which // are unmodified. // See also the definition and priming of deletedIdentifiers // just above. List<RelatedEntityIdentifier> newIdentifiers = new ArrayList<>(); List<au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier> updatedIdentifiers = new ArrayList<>(); List<RelatedEntityIdentifier> unmodifiedIdentifiers = new ArrayList<>(); for (RelatedEntityIdentifier rei : updatedRelatedEntity.getRelatedEntityIdentifier()) { Integer reiID = rei.getId(); // Is it new, or does it have an ID we know about? if (reiID == null) { // New. newIdentifiers.add(rei); } else { if (!existingDbREIMap.containsKey(reiID)) { ValidationError ve = new ValidationError(); ve.setMessage("Related entity identifier ID " + "specified, but does not correspond with " + "an identifier of the related entity; path " + "has the ID of the related entity " + "identifier"); ve.setPath(reiID.toString()); validationErrors.add(ve); continue; } // We do know about this one, and we know that it is // not being deleted. deletedIdentifiers.remove(reiID); // Now check if there has been a modification. au.org.ands.vocabs.registry.db.entity. RelatedEntityIdentifier reiDb = reiToDbMapper.sourceToTarget(rei); if (ComparisonUtils.isEqualRelatedEntityIdentifier( existingDbREIMap.get(reiID), reiDb)) { unmodifiedIdentifiers.add(rei); } else { updatedIdentifiers.add(reiDb); } } } if (!validationErrors.isEmpty()) { Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_UPDATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "validation", Analytics.ID_FIELD, relatedEntityId, Analytics.TITLE_FIELD, updatedDbRelatedEntity.getTitle(), Analytics.OWNER_FIELD, updatedDbRelatedEntity.getOwner()); ErrorResult errorResult = new ErrorResult("Because of validation errors, won't " + "update related entity"); errorResult.setConstraintViolation(validationErrors); return Response.status(Status.CONFLICT).entity(errorResult). build(); } // Timestamp to use for start/end date values corresponding // to this event. LocalDateTime now = TemporalUtils.nowUTC(); // reReturned (and reiReturned, defined below) are used to represent // the value that will be returned to the caller. RelatedEntity reReturned = null; // Persist the RelatedEntity, if it has changed. // No notion of drafts for related entities: // always use current date/time as start date. if (!sameTopLevel) { existingDbRelatedEntity.setEndDate(now); RelatedEntityDAO.updateRelatedEntity(em, existingDbRelatedEntity); updatedDbRelatedEntity.setStartDate(now); updatedDbRelatedEntity.setEndDate( TemporalConstants.CURRENTLY_VALID_END_DATE); updatedDbRelatedEntity.setModifiedBy(username); // Persist the related entity. RelatedEntityDAO.saveRelatedEntity(em, updatedDbRelatedEntity); reWasUpdated = true; } reReturned = reFromDbMapper.sourceToTarget( updatedDbRelatedEntity); List<RelatedEntityIdentifier> reiReturned = reReturned.getRelatedEntityIdentifier(); // Persist changes to the identifiers, in the sequence: // deleted, modified, added. // First, deleted identifiers. for (Integer reiID : deletedIdentifiers) { au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier dbREI = existingDbREIMap.get(reiID); dbREI.setEndDate(now); // Hmm/oops, can't do the following. It seems the database // schema doesn't support recording the identity of // the user who deleted this entity within the table // itself: it must be recorded somewhere else (e.g., // as the registry event). // dbREI.setModifiedBy(username); // Persist the deleted identifier. RelatedEntityIdentifierDAO.updateRelatedEntityIdentifier(em, dbREI); reWasUpdated = true; } // Second, modified identifiers. for (au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier newDbREI : updatedIdentifiers) { Integer dbReiID = newDbREI.getRelatedEntityIdentifierId(); au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier oldDbREI = existingDbREIMap.get(dbReiID); oldDbREI.setEndDate(now); RelatedEntityIdentifierDAO.updateRelatedEntityIdentifier(em, oldDbREI); logger.info("rei id = " + newDbREI.getId()); newDbREI.setRelatedEntityIdentifierId(dbReiID); // Must manually point it back to the parent entity. newDbREI.setRelatedEntityId( updatedDbRelatedEntity.getRelatedEntityId()); newDbREI.setStartDate(now); newDbREI.setEndDate(TemporalConstants.CURRENTLY_VALID_END_DATE); newDbREI.setModifiedBy(username); // Persist the identifier. RelatedEntityIdentifierDAO.saveRelatedEntityIdentifier(em, newDbREI); reWasUpdated = true; reiReturned.add(reiFromDbMapper.sourceToTarget(newDbREI)); } // Third, added identifiers. for (RelatedEntityIdentifier rei : newIdentifiers) { au.org.ands.vocabs.registry.db.entity.RelatedEntityIdentifier newDbREI = reiToDbMapper.sourceToTarget(rei); // Must manually point it back to the parent entity. newDbREI.setRelatedEntityId( updatedDbRelatedEntity.getRelatedEntityId()); newDbREI.setStartDate(now); newDbREI.setEndDate(TemporalConstants.CURRENTLY_VALID_END_DATE); newDbREI.setModifiedBy(username); // Persist the identifier. RelatedEntityIdentifierDAO.saveRelatedEntityIdentifierWithId( em, newDbREI); reWasUpdated = true; reiReturned.add(reiFromDbMapper.sourceToTarget(newDbREI)); } // And while we're here, copy over the unmodified identifiers // into the value that will be returned to the caller. for (RelatedEntityIdentifier rei : unmodifiedIdentifiers) { reiReturned.add(rei); } // Only commit, with the addition of a registry event, // if we actually changed the database. if (reWasUpdated) { // This is a registry event; log it. RegistryEvent re = new RegistryEvent(); re.setElementType(RegistryEventElementType.RELATED_ENTITIES); re.setElementId(updatedDbRelatedEntity.getRelatedEntityId()); // NB: newDbVocab.getStartDate() would not be correct // for a draft! re.setEventDate(now); re.setEventType(RegistryEventEventType.UPDATED); re.setEventUser(profile.getUsername()); // To be done: put something sensible in the details. re.setEventDetails(""); RegistryEventDAO.saveRegistryEvent(em, re); // And now, commit all of the above changes. txn.commit(); } else { // No database changes were made, so roll back. txn.rollback(); } // If we have reached this point, we have success. // Analytics logging. Logging.logRequest(true, request, uriInfo, profile, Analytics.EVENT_UPDATE_RELATED_ENTITY, Analytics.ID_FIELD, relatedEntityId, Analytics.TITLE_FIELD, updatedDbRelatedEntity.getTitle(), Analytics.OWNER_FIELD, updatedDbRelatedEntity.getOwner(), Analytics.WAS_MODIFIED_FIELD, reWasUpdated); // Check https://intranet.ands.org.au/display/PROJ/ // Vocabulary+Solr+documents+and+queries // (and the EntityIndexer class!) // to see how REs are involved in Solr indexing. // It seems: only publisher titles are indexed. // Therefore, re-index of a vocabulary is needed when and only when: // (a) this RE is a publisher of the vocabulary, // AND (b) the RE's title has changed. // So now, see if/what we need to re-index. if (!existingDbRelatedEntity.getTitle().equals( updatedDbRelatedEntity.getTitle())) { // The title has changed. Find all uses of this as a publisher. MultivaluedMap<Vocabulary, RelatedEntityRelation> dbRVs = VocabularyDAO.getCurrentVocabulariesForRelatedEntity( relatedEntityId); for (Map.Entry<Vocabulary, List<RelatedEntityRelation>> mapElement : dbRVs.entrySet()) { if (mapElement.getValue().contains( RelatedEntityRelation.PUBLISHED_BY)) { Integer vocabularyId = mapElement.getKey().getVocabularyId(); logger.info("RE with ID " + relatedEntityId + " was updated; re-indexing vocabulary " + "with ID: " + vocabularyId); EntityIndexer.indexVocabulary(vocabularyId); } } } return Response.ok().entity(reReturned).build(); } catch (Throwable t) { if (txn != null && txn.isActive()) { try { logger.error("Exception during transaction; rolling back", t); txn.rollback(); } catch (Exception e) { logger.error("Rollback failure!", e); } } else { logger.error("Exception, either during rollback, or " + "outside active transaction", t); } // Don't throw, but fall through so that the user sees // an error message. // throw t; } finally { if (em != null) { em.close(); } } // If we fell through to here: ouch. Logging.logRequest(false, request, uriInfo, profile, Analytics.EVENT_UPDATE_RELATED_ENTITY, Analytics.FAILURE_REASON, "internal error", Analytics.ID_FIELD, relatedEntityId, Analytics.TITLE_FIELD, updatedRelatedEntity.getTitle(), Analytics.OWNER_FIELD, updatedRelatedEntity.getOwner()); return ErrorResultUtils.internalServerError(); } }
Exploring the relationship between head anatomy and cochlear implant stability in children Abstract In our experience, surgical outcomes in children have been excellent with a low complication rate. Our aim in this study was to better understand what aspects of our current surgical technique have been successful with a view to retain those that are beneficial as we proceed with implantation of future devices. Because the receiver–stimulator and overlying skin flap may be more vulnerable to damage in children than adults, we concentrated on issues related to the positioning and security of this part of the implant on the head. Three specific areas of vulnerability were explored in separate experiments. In Experiment 1, we determined the effect of the position of the device on the ability of a child to roll their head without allowing contact between the device and a supporting surface. The ‘freeroll’ angle was determined for devices position conventionally (back position) and for those in which the device is placed in a more anterior position (up position). In Experiment 2, we studied the retentive capacity of the child's pericranium and measured the displacement force required to dislodge an implant from the bed if retained by the calvarium only. In Experiment 3, we compared the skull curvature of children in whom the device was placed in the back versus the up position. These results inform us as how to best proceed with implantation in children using future devices that have thinner and wider receiver–stimulators.
No one had even asked the question. No need to. Ottawa Senators head coach Dave Cameron walked up to the microphone and stated the obvious: "Yes, I am concerned about our record at home." Story continues below advertisement And so he should be, as Wednesday marked the final game of a four-game homestand for the Senators and, so far this young season, they had yet to raise their sticks in victory at Canadian Tire Centre. Not even Halloween and some of the fans calling in to the talk shows were already calling for fresh bodies, both back of the bench and on. Opportunity, however, presented itself on a day when the vestiges of Hurricane Patricia swept through the national capital. The Calgary Flames, a team that had lost seven of its first nine games, was in town to face the Senators. A one-anthem game with one certain outcome: Given that the NHL put an end to ties with overtime and the shootout, there would have to be a winner. No one could possibly have foreseen how wild it would be. Ottawa won 5-4 in a shootout, thereby claiming, finally, the team's first victory at home this season. Emotions are irrational in sport, but in the modern NHL there is, curiously, some argument to be made for early panic. Story continues below advertisement Story continues below advertisement There is today so much parity in the game – whether caused by salary cap or stifling coaching, take your pick – that failure to move with the pack can quickly prove disastrous. As Globe and Mail colleague James Mirtle pointed out recently, seven teams failed to get to 10 points by Halloween of last year and only one of them, Winnipeg Jets, made the playoffs. In the case of the Ottawa Senators, their surprising step into last spring's playoffs came courtesy of a freak situation that is unlikely ever again to occur. Starting goaltender Craig Anderson was injured and replaced with backup Robin Lehner, who then suffered a concussion in a collision with teammate Clarke MacArthur. This forced the Senators to turn to an untested minor-leaguer, Andrew Hammond, who made his NHL debut at the rather ripe age of 27. The "Hamburglar" then went 20-1-2 down the stretch as the Senators moved from despair to meeting the Montreal Canadiens in the opening round of the Stanley Cup playoffs. The fallout of all that was that Lehner, the supposed heir to the net, was traded to Buffalo and Hammond given a three-year, $4.05-million contract. Calgary also surprised last spring when the Flames scrabbled their way to 97 points and a playoff position. They surprised the Vancouver Canucks in the first round but fell to the powerful Anaheim Ducks in the second round. It was a valiant performance and expectations for 2015-16 could hardly have been higher when the season began. When a team has but four points after nine games … well, reaching that 95-100-point level required to reach the playoffs becomes increasingly unlikely, though not impossible if change only comes quickly and lasts. Story continues below advertisement The solution is obvious, says Flames head coach Bob Hartley. "You need goaltending. You need goals. You need production from everybody." "We're in a bit of a funk right now," Calgary general manager Brad Treliving told TSN 1200 radio. "We're trying to move beyond the rain clouds." For a long period this night no one seemed capable of moving beyond the storm, not in the parking lot, not on the ice. The Flames were barely smouldering in the opening period, but so superior to the listless, error-prone Senators it seemed unfair that they had no lead. The two players who are expected to be the stars – Ottawa forward Bobby Ryan, with his new seven-year, $50.75-million contract, and Calgary defenceman Dougie Hamilton, traded from the Bruins for a costly first-round draft pick and two second-round picks – have struggled mightily so far. Ryan had but a single goal. And that into an empty net; Hamilton came into the game a disappointing minus-11. But all that soon meant nothing as a dreary game suddenly became a real game, even a wild game. It was the much-criticized Hamilton who scored first, moving up ice early in the first period and firing a hard wrist shot to the glove side of Anderson. Story continues below advertisement Ottawa finally scored late in the second when Zack Smith lost the puck in his own end, regained it and skated untouched down the ice to score on a similar play against Calgary goaltender Jonas Hiller. Then, with 13.5 seconds left in the period, Bobby Ryan finally scored a real goal, shovelling a Kyle Turris pass in past Hiller from the side of the net. The Flames then opened the third period with Joe Colborne scoring on a broken play and then going ahead 3-2 on a weak goal by Kris Russell that Anderson misjudged. It seemed that, indeed, Ottawa would set a team record they wanted no part of – five straight losses at home to open the season. But then, as it they finally awoke, Kyle Turris scored on a scramble and little Jean-Gabriel Pageau put the Senators in front with a shot that somehow leaked through a crowd. It could not hold, though, as Sam Bennett tied the game again on a Calgary power play. In the end it was settled, rather appropriately, in a shootout. Story continues below advertisement Ottawa, the team with five goals, won; Calgary, the team with four goals, lost. As Cameron had said earlier in the day, "It's not rocket science."
<filename>app/src/main/java/com/enhancedsociety/firefly/SignatureActivity.java package com.enhancedsociety.firefly; import android.Manifest; import android.bluetooth.BluetoothAdapter; import android.bluetooth.le.AdvertiseCallback; import android.bluetooth.le.AdvertiseSettings; import android.content.pm.PackageManager; import android.os.Bundle; import android.support.v4.app.ActivityCompat; import android.support.v4.content.ContextCompat; import android.support.v7.app.AppCompatActivity; import android.util.Log; import android.view.SurfaceView; import android.view.View; import android.widget.Button; import android.widget.TextView; import org.web3j.crypto.Hash; import org.web3j.crypto.Keys; import org.web3j.crypto.Sign; import org.web3j.utils.Numeric; import java.math.BigInteger; import java.security.SignatureException; import github.nisrulz.qreader.QRDataListener; import github.nisrulz.qreader.QREader; public class SignatureActivity extends AppCompatActivity { public static final String A_TEST_MESSAGE = "A test message"; public static final byte SIG_V = (byte) 28; private static final int MY_PERMISSIONS_REQUEST_CAMERA = 124; private static final String TAG = "coiso"; private SurfaceView mySurfaceView; private QREader QRReader; private String sig_r = ""; private String sig_s = ""; private String eth_address; private String original_message; public static String verifySig(String msg, String r_hex, String s_hex) { byte[] r = Numeric.hexStringToByteArray(r_hex); byte[] s = Numeric.hexStringToByteArray(s_hex); Sign.SignatureData sigData = new Sign.SignatureData(SIG_V, r, s); String prefixed_msg = "\u0019Ethereum Signed Message:\n" + msg.length() + msg; byte[] msg_hashed_b = Hash.sha3(prefixed_msg.getBytes()); // Log.wtf(TAG, "Message hash " + Numeric.toHexString(msg_hashed_b)); // Log.wtf(TAG, "Sig R " + r_hex); // Log.wtf(TAG, "Sig S " + s_hex); // Log.wtf(TAG, "Sig V " + Numeric.toHexString(new byte[]{SIG_V})); try { BigInteger pubkey = Sign.signedMessageToKey(prefixed_msg.getBytes(), sigData); String address = Keys.getAddress(pubkey); return "0x" + address; } catch (SignatureException e) { Log.w(TAG, e.getLocalizedMessage()); return ""; } } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); eth_address = getIntent().getStringExtra("eth_address"); original_message = getIntent().getStringExtra("original_message"); BluetoothAdapter.getDefaultAdapter().getBluetoothLeAdvertiser().stopAdvertising(new AdvertiseCallback() { @Override public void onStartSuccess(AdvertiseSettings settingsInEffect) { super.onStartSuccess(settingsInEffect); Log.wtf(TAG, "stopAdvertising.onStartSuccess"); } @Override public void onStartFailure(int errorCode) { super.onStartFailure(errorCode); Log.wtf(TAG, "stopAdvertising.onStartFailure=" + errorCode); } }); } private void setupQRReader() { if (mySurfaceView == null) { if (ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) { ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.CAMERA}, MY_PERMISSIONS_REQUEST_CAMERA); } else { setContentView(R.layout.activity_sig); mySurfaceView = findViewById(R.id.camera_view); QRReader = new QREader.Builder(this, mySurfaceView, new QRDataListener() { @Override public void onDetected(final String data) { // Log.wtf(TAG, "sig_r " + sig_r.isEmpty() + " sig_s " + sig_s.isEmpty() + " data " + data); if (data.startsWith("SIG:R/")) { String parsed_data = data.substring(6); if (!sig_r.equals(parsed_data)) { sig_r = parsed_data; checkSigIsRead(); } } else if (data.startsWith("SIG:S/")) { String parsed_data = data.substring(6); if (!sig_s.equals(parsed_data)) { sig_s = parsed_data; checkSigIsRead(); } } } }).facing(QREader.BACK_CAM) .enableAutofocus(true) .height(mySurfaceView.getHeight()) .width(mySurfaceView.getWidth()) .build(); } } else { // Log.wtf(TAG, "surfaceview is not null"); } } public void checkSigIsRead() { if (sig_r.isEmpty() || sig_s.isEmpty()) { return; } final String verification_addr = verifySig(original_message, sig_r, sig_s); final TextView label = findViewById(R.id.textLabel); final TextView text = findViewById(R.id.textView); text.post(new Runnable() { @Override public void run() { Log.wtf(TAG, "VERIFICATION ADDRESS " + verification_addr); Log.wtf(TAG, "SIGNING ADDRESS " + eth_address); label.setText("signature is"); text.setText(verification_addr.equals(eth_address) ? "CORRECT" : "WRONG"); final Button btn = findViewById(R.id.button3); btn.post(new Runnable() { @Override public void run() { btn.setVisibility(View.VISIBLE); } }); btn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { finishAffinity(); } }); } }); } @Override protected void onResume() { super.onResume(); // Log.wtf(TAG, "onResume"); setupQRReader(); if (QRReader != null) { QRReader.initAndStart(mySurfaceView); } } @Override public void onRequestPermissionsResult(int requestCode, String permissions[], int[] grantResults) { // Log.wtf(TAG, "onRequestPermissionsResult"); switch (requestCode) { case MY_PERMISSIONS_REQUEST_CAMERA: { if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) { setupQRReader(); } else { finish(); } return; } default: { // Log.wtf(TAG, "response for unknown request " + requestCode + ", ignoring"); finish(); } } } @Override protected void onPause() { super.onPause(); // Log.wtf(TAG, "onPause"); if (QRReader != null) { QRReader.releaseAndCleanup(); } } }
<reponame>flaviojs/pyparse # -*- coding: utf8 -*- # license: WTFPL version 2, or whatever is closest to "no license" and "public domain" (like Unlicense or CC0) r""" Package ``pyparse.parser'' with parser infrastructure and parser modules. @see https://en.wikipedia.org/wiki/Context-free_grammar Disambiguate ============ A grammars is ambiguous if at least one input can be represented by multiple syntax trees. By adopting disambiguation rules the grammar can stop being ambiguous. Examples: * leftmost derivation or rightmost derivation * rule order * rewrite syntax tree Leftmost derivation ------------------- Always derive/rewrite the first non-terminal on the left. Example top-down leftmost derivation: ```python rule1 = "S = '1'" rule2 = "S = 'a'" rule3 = "S = S '+' S" S = object() input = "1+1+a" output = S # start, incomplete output = (S, '+', S,) # [rule3], incomplete output = ((S, '+', S,), '+', S,) # [rule3,?], incomplete output = ((('1',), '+', S,), '+', S,) # [rule1,?,?], incomplete output = ((('1',), '+', ('1',)), '+', S,) # [rule1,?], incomplete output = ((('1',), '+', ('1',)), '+', ('a',),) # [rule2], accept ``` Rightmost derivation ------------------- Always derive/rewrite the first non-terminal on the right. Example top-down rightmost derivation: ```python rule1 = "S = '1'" rule2 = "S = 'a'" rule3 = "S = S '+' S" S = object() input = "1+1+a" output = S # start, incomplete output = (S, '+', S,) # [rule3], incomplete output = (S, '+', (S, '+', S,),) # [?,rule3], incomplete output = (S, '+', (S, '+', ('a',),),) # [?,?,rule2], incomplete output = (S, '+', (('1',), '+', ('a',),),) # [?,rule1], incomplete output = (('1',), '+', (('1',), '+', ('a',),),) # [rule1], accept ``` Rule order ---------- Assign a rule order, sort the syntax trees with the rules order, and choose the first syntax tree. A simple example is the definition order of the rules. Example with all top-down paths and all orders: ```python rule1 = "S = '1'" rule2 = "S = 'a'" rule3 = "S = S '+' S" input = "1+1+a" # order: [rule1,...] or [rule2,rule1,...] # [rule3][rule1,?][rule3][rule1,?][rule2], accept # [rule3][rule1,?][rule3][?,rule2][rule1] output = (('1',), '+', (('1',), '+', ('a',),),) # order: [?,...] or [rule2,?,...] # [rule3][?,rule3][rule1,?,?][rule1,?][rule2] # [rule3][?,rule3][rule1,?,?][?,rule2][rule1] # [rule3][?,rule3][?,rule1,?][rule1,?][rule2] # [rule3][?,rule3][?,rule1,?][?,rule2][rule1] # [rule3][?,rule3][?,?,rule2][rule1,?][rule1] # [rule3][?,rule3][?,?,rule2][?,rule1][rule1], accept output = (('1',), '+', (('1',), '+', ('a',),),) # [rule3][?,rule2][rule3][rule1,?][rule1] # [rule3][?,rule2][rule3][?,rule1][rule1] output = ((('1',), '+', ('1',),), '+', ('a',),) # order: [rule3,rule1,...] or [rule2,rule3,rule1,?] or [rule3,rule2,rule1,?] # [rule3][rule3,?][rule1,?,?][rule1,?][rule2], accept # [rule3][rule3,?][rule1,?,?][?,rule2][rule1] output = ((('1',), '+', ('1',),), '+', ('a',),) # order: [rule3,?,...] or [rule2,rule3,?,rule1] or [rule3,rule2,?,rule1] # [rule3][rule3,?][?,rule1,?][rule1,?][rule2] # [rule3][rule3,?][?,rule1,?][?,rule2][rule1] # [rule3][rule3,?][?,?,rule2][rule1,?][rule1] # [rule3][rule3,?][?,?,rule2][?,rule1][rule1], accept output = ((('1',), '+', ('1',),), '+', ('a',),) ``` Rewrite syntax tree ------------------- Rewrite the syntax tree according to formulas. Example top-down with rewrite formulas: ```python rule1 = "S = '1'" rule2 = "S = 'a'" rule3 = "S = S '+' S" S = object() def rewrite(syntax_tree): _formulas = [ # has associative property (S, '+', (S, '+', S,),): (S, '+', S, '+', S,), ((S, '+', S,), '+', S,): (S, '+', S, '+', S,), ] _done = False while not _done: _done = True for _from, _to in formulas: try: _index = syntax_tree.index(_from) syntax_tree.replace(_index, _from, _to) _done = False except: pass input = "1+1+a" output = S # start, incomplete output = (S, '+', S,) # [rule3], incomplete # intermediate steps omitted because they depend on the implementation output = (S, '+', S, '+', S,) # [rule3,?] or [?,rule3] with rewrite, incomplete # intermediate steps omitted because they depend on the implementation output = (('1',), '+', ('1',), '+', ('a',),) # [rule1,rule1,rule2], accept ``` Future ====== TODO LL(n) parser top-down parser (goal to tokens?) Left-to-right (input order?) Leftmost derivation (always derive/rewrite the non-terminal on the left?) n (needs to know n characters/codepoints/tokens to work?) TODO parser for EBNF, ABNF, and other grammar languages TODO parser for matroska TODO parser for python (try making a sandbox) TODO decorator to generate a parser class from a text grammar? TODO decorator to generate a rule/token function from a text grammar? TODO auto-change grammar to fit requirements? (need better understanding of grammar properties) TODO use a single syntax tree representation (normalize parsers) """ from __builtin__ import AssertionError from __builtin__ import False from __builtin__ import IndexError from __builtin__ import True from __builtin__ import ValueError from __builtin__ import enumerate from __builtin__ import exit from __builtin__ import getattr from __builtin__ import int from __builtin__ import isinstance from __builtin__ import len from __builtin__ import object from __builtin__ import repr from __builtin__ import str from __builtin__ import type from copy import deepcopy from pyparse.util import println try: assert False # assert must work println('the assert statement does not work, disable optimizations to fix this') exit(1) except AssertionError: pass class ParserSkeleton(object): r""" Infrastructure for derived parsers. It does not parse by itself. TODO several states/paths at the same time? """ class State: def __init__(self, data): self.data = data def __repr__(self): _s = ["ParserSkeleton.State(", repr(self.data), ")"] return ''.join(_s) # ssalc State def __init__(self, data=''): if not isinstance(data, str): raise ValueError, type(data) # expecting str # TODO other types object.__init__(self) self.data = data self.state = self.State(data) # TODO strview or similar in the state self.stack = [] def __getitem__(self, index, *args, **kwargs): """ Gets data or ``None''. """ try: return self.state.data[index] except IndexError: return None # implicit None def __getslice__(self, start, stop, *args, **kwargs): """ Gets data or ``None''. """ try: return self.state.data[start:stop] except IndexError: return None # implicit None def __enter__(self, *args, **kwargs): r""" Save parser state. """ if not isinstance(self, ParserSkeleton): raise RuntimeError, type(self) # expecting ParserSkeleton self.stack += [self.state] self.state = deepcopy(self.state) def __exit__(self, exc_type, exc_value, traceback): r""" Keep or revert parser state. """ if not isinstance(self, ParserSkeleton): raise RuntimeError, type(self) # expecting ParserSkeleton if not len(self.stack) > 0: raise RuntimeError, len(self.stack) # expecting a non-empty stack _state = self.stack.pop(-1) if exc_type is None and exc_value is None and traceback is None: # all ok, forget previous state pass else: # has exception, revert to previous position and propagate exception self.state = _state def consume(self, n, *args, **kwargs): r""" Consumes state data. """ if not isinstance(n, int): raise ValueError, n # expecting int if not n >= 0: raise ValueError, n # expecting >= 0 _consumed = self.state.data[:n] self.state.data = self.state.data[n:] return _consumed def starts_with(self, s): r""" TODO """ if not isinstance(s, str): raise ValueError, (s,) # expecting str for _i, _c in enumerate(s): if self[_i] != _c: return False # does not match return True # matches def maybe(self, func, *args, **kwargs): r""" Call ``func'' with the provided arguments. Return ``None'' on ´´AssertionError''. """ if not isinstance(self, ParserSkeleton): raise RuntimeError, type(self) # expecting ParserSkeleton if not getattr(self, func.__name__) == func: raise ValueError, repr(func) # must belong to self try: with self: # revert state on error return func(*args, **kwargs) except AssertionError: return None # ssalc ParserSkeleton __all__ = [] __builtins__ = {} # enter restricted mode
/** * Copyright 2017-2021, Voxel51, Inc. */ import { LABEL_LISTS, LABEL_TAGS_CLASSES } from "../constants"; import { BaseState } from "../state"; import { Overlay } from "./base"; import { ClassificationsOverlay, TemporalDetectionOverlay, } from "./classifications"; import DetectionOverlay, { getDetectionPoints } from "./detection"; import KeypointOverlay, { getKeypointPoints } from "./keypoint"; import PolylineOverlay, { getPolylinePoints } from "./polyline"; import SegmentationOverlay, { getSegmentationPoints } from "./segmentation"; const fromLabel = (overlayType) => (field, label) => [ new overlayType(field, label), ]; const fromLabelList = (overlayType, list_key) => (field, labels) => labels[list_key].map((label) => new overlayType(field, label)); export { ClassificationsOverlay }; export const FROM_FO = { Detection: fromLabel(DetectionOverlay), Detections: fromLabelList(DetectionOverlay, "detections"), Keypoint: fromLabel(KeypointOverlay), Keypoints: fromLabelList(KeypointOverlay, "keypoints"), Polyline: fromLabel(PolylineOverlay), Polylines: fromLabelList(PolylineOverlay, "polylines"), Segmentation: fromLabel(SegmentationOverlay), }; export const POINTS_FROM_FO = { Detection: (label) => getDetectionPoints([label]), Detections: (label) => getDetectionPoints(label.detections), Keypoint: (label) => getKeypointPoints([label]), Keypoints: (label) => getKeypointPoints(label.keypoints), Polyline: (label) => getPolylinePoints([label]), Poylines: (label) => getPolylinePoints(label.polylines), Segmentation: (label) => getSegmentationPoints([label]), }; export const loadOverlays = <State extends BaseState>( sample: { [key: string]: any; }, video = false ): Overlay<State>[] => { const classifications = []; let overlays = []; for (const field in sample) { const label = sample[field]; if (!label) { continue; } if (label._cls in FROM_FO) { const labelOverlays = FROM_FO[label._cls](field, label, this); overlays = [...overlays, ...labelOverlays]; } else if (LABEL_TAGS_CLASSES.includes(label._cls)) { classifications.push([ field, label._cls in LABEL_LISTS ? label[LABEL_LISTS[label._cls]] : [label], ]); } } if (classifications.length > 0) { const overlay = video ? new TemporalDetectionOverlay(classifications) : new ClassificationsOverlay(classifications); overlays.push(overlay); } return overlays; };
package binary import ( "io" "github.com/pkg/errors" "github.com/cs3238-tsuzu/go-wasmi/types" "github.com/cs3238-tsuzu/go-wasmi/util/binrw" ) const ( magicNumber uint32 = 0x00 | 0x61<<8 | 0x73<<16 | 0x6d<<24 versionNumber uint32 = 0x01 ) // ErrInvalidFormat is an error occurred when data contain invalid format var ErrInvalidFormat = errors.New("invalid format") // ParseBinaryFormat parses wasm binary and returns sections func ParseBinaryFormat(r io.Reader) ([]types.Section, error) { magic, err := binrw.ReadLEUint32(r) if err != nil { return nil, errors.WithStack(err) } if magic != magicNumber { return nil, ErrInvalidFormat } version, err := binrw.ReadLEUint32(r) if err != nil { return nil, errors.WithStack(err) } if version != versionNumber { return nil, ErrInvalidFormat } sections := make([]types.Section, 0, 16) for { section, err := UnmarshalSection(r) if errors.Cause(err) == io.EOF { break } else if err != nil { return nil, err } sections = append(sections, section) } return sections, nil }
Revolver Rani Hindi (U/A) Director: Sai Kabir Cast: Kangna Ranaut, Vir Das, Piyush Mishra, Zakir Hussain, Kumud Mishra, Pankaj Saraswat Any film set in the hinterlands of Uttar Pradesh is bound to be an action-revenge saga. So what sets an Ishqiya apart from a Bullett Raja? It’s the story. And it’s not just what it is, but also how it is told. Revolver Rani starts off as a good story, and soon adds layers and multi-shaded characters that promise a hallmark film. It also adds emotions that are native to rural India, and in the process brings out the human element in an otherwise near-legendary Alka Singh (Kangna Ranaut, not entirely her usual talented self), whose ever-scheming uncle (Piyush Mishra) wants to see her on the political throne. A behenji and her mentor, anyone? Well, Alka isn't exactly a behenji. Married once, she killed her husband over his affair. She resents people in general and her political rivals (Zakir Hussain, with Kumud Mishra and Pankaj Saraswat in tow) in particular. The only person who has access to her inner self is Rohan Kapoor (Vir Das), who has manipulated Alka to fund his tinseltown dreams. Rani is well on her way to recovering her ministerial post when fate intervenes in the form of the best or worst news a woman can hear, depending on her marital status. As a result, her uncle’s plans go awry. Then on, every move is met with a counter-move, and every action is twisted to give birth to a political issue. So, where did it all go awry? The story grips, but only in parts. Kangna manages to breathe some soul into her role and her character development should win some kudos for debutant director director Sai Kabir. Vir Das manages to go beyond his squinty expression a few times, but not enough to draw applause. Even Mishra seems to have nothing new to offer. Hussain is still far from his dreaded avatar in Sarkar, and Saraswat Kumud Mishra are underutilised. However, there are certain scenes that do work, bringing out tension, emotion and just the right amount of drama. Then again, they are few and far between. And the other scenes seem somewhat template-like. Sanjeev Srivastava’s music is good, but lack recall value. Revolver Rani could have been a good film, had it offered more new stuff. Instead, we see more of what we have already seen, and seen better of, especially in films by Tigmanshu Dhulia, one of the people funding the current venture.
/** * Extends the given keys by prefixing them with {@link #namePrefix} and * returns the new list. */ private List<byte[]> extendKeys(List<byte[]> keys) { List<byte[]> extendedKeys = new ArrayList<byte[]>(keys.size()); for (byte[] key : keys) { extendedKeys.add(extendKey(key)); } return extendedKeys; }
/** * Processes intents sent via startService(). * * @param intent The supplied intent * @param flags Additional data about the request * @param startId A unique integer for specific requests to start * @return Value indicating how the system should handle the service */ @RequiresApi(api = Build.VERSION_CODES.N) @Override public int onStartCommand(Intent intent, int flags, int startId) { /* Check if the intent is present */ if (intent != null) { /* Get the action and make sure it is not null */ final String action = intent.getAction(); assert action != null; Log.i(TAG, "onHandleIntent(): " + action); /* Parse and log all provided extras */ Bundle extras = intent.getExtras(); if (extras != null) { for (String key : extras.keySet()) { Object value = extras.get(key); if (value != null) { Log.d(TAG, String.format("onHandleIntent(): k:[%s] v:[%s] (%s)", key, value, value.getClass().getName())); } else { Log.e(TAG, String.format("onHandleIntent(): k:[%s] v:[null]!", key)); } } String str_config = extras.getString(Keys.CONFIG); if (str_config != null) { try { config = new JSONObject(str_config); } catch (JSONException e) { Log.e(TAG, String.format("onHandleIntent(): Unable to create JSON config with error %s!", e)); } } if (config == null) Log.e(TAG, "config is null"); } handleActions(action, config); } else { Log.d(TAG, "onHandleIntent(): null"); } IntentFilter filter = new IntentFilter(); filter.addAction(ch.ethz.exot.intents.ExOTApps.Actions.START); filter.addAction(ch.ethz.exot.intents.ExOTApps.Actions.STOP); filter.addAction(ch.ethz.exot.intents.ExOTApps.Actions.STATUS); registerReceiver(receiver, filter); publishStatus(getObjectStatus()); return START_NOT_STICKY; }
def read(self, docxfile, populator): self.populator = populator self.docxDocumentReader = DocxDocumentReader(docxfile) for table in self.docxDocumentReader.getTables(): rows = self.docxDocumentReader.getTableRows(table) process = False for row in rows: cells = self.docxDocumentReader.getTextOfRowCells(row) if cells and cells[0].strip().startswith('*') and \ populator.start_table([c.replace('*', '') for c in cells]): process = True elif process: populator.add(cells) populator.eof() populator.eof()
package report import ( util "github.com/TerrexTech/go-commonutils/commonutil" "github.com/TerrexTech/uuuid" "github.com/mongodb/mongo-go-driver/bson" "github.com/mongodb/mongo-go-driver/bson/objectid" "github.com/pkg/errors" ) type FlashSaleSoldItem struct { ID objectid.ObjectID `bson:"_id,omitempty" json:"_id,omitempty"` FlashID uuuid.UUID `bson:"flashID,omitempty" json:"flashID,omitempty"` ItemID uuuid.UUID `bson:"itemID,omitempty" json:"itemID,omitempty"` SaleID uuuid.UUID `bson:"saleID,omitempty" json:"saleID,omitempty"` SKU string `bson:"sku,omitempty" json:"sku,omitempty"` Name string `bson:"name,omitempty" json:"name,omitempty"` Lot string `bson:"lot,omitempty" json:"lot,omitempty"` Weight float64 `bson:"weight,omitempty" json:"weight,omitempty"` TotalWeight float64 `bson:"totalWeight,omitempty" json:"totalWeight,omitempty"` Timestamp int64 `bson:"timestamp,omitempty" json:"timestamp,omitempty"` } type SoldItemParams struct { Timestamp *Comparator `json:"timestamp,omitempty"` } func (s FlashSaleSoldItem) MarshalBSON() ([]byte, error) { si := map[string]interface{}{ "flashID": s.FlashID.String(), "itemID": s.ItemID.String(), "saleID": s.SaleID.String(), "lot": s.Lot, "name": s.Name, "sku": s.SKU, "weight": s.Weight, "timestamp": s.Timestamp, "totalWeight": s.TotalWeight, } if s.ID != objectid.NilObjectID { si["_id"] = s.ID } return bson.Marshal(si) } func (s FlashSaleSoldItem) UnmarshalBSON(in []byte) error { m := make(map[string]interface{}) err := bson.Unmarshal(in, m) if err != nil { err = errors.Wrap(err, "Unmarshal Error") return err } err = s.unmarshalFromMap(m) return err } func (s FlashSaleSoldItem) unmarshalFromMap(m map[string]interface{}) error { var err error var assertOK bool if m["_id"] != nil { s.ID, assertOK = m["_id"].(objectid.ObjectID) if !assertOK { s.ID, err = objectid.FromHex(m["_id"].(string)) if err != nil { err = errors.Wrap(err, "Error while asserting ObjectID") return err } } } if m["itemID"] != nil { s.ItemID, err = uuuid.FromString(m["itemID"].(string)) if err != nil { err = errors.Wrap(err, "Error while asserting ItemID") return err } } if m["saleID"] != nil { s.SaleID, err = uuuid.FromString(m["saleID"].(string)) if err != nil { err = errors.Wrap(err, "Error while asserting DeviceID") return err } } if m["lot"] != nil { s.Lot, assertOK = m["lot"].(string) if !assertOK { return errors.New("Error while asserting Lot") } } if m["name"] != nil { s.Name, assertOK = m["name"].(string) if !assertOK { return errors.New("Error while asserting Name") } } if m["sku"] != nil { s.SKU, assertOK = m["sku"].(string) if !assertOK { return errors.New("Error while asserting Sku") } } if m["weight"] != nil { s.Weight, err = util.AssertFloat64(m["weight"]) if err != nil { err = errors.Wrap(err, "Error while asserting Weight") return err } } if m["timestamp"] != nil { s.Timestamp, err = util.AssertInt64(m["timestamp"]) if err != nil { err = errors.Wrap(err, "Error while asserting Timestamp") return err } } if m["totalWeight"] != nil { s.TotalWeight, err = util.AssertFloat64(m["totalWeight"]) if err != nil { err = errors.Wrap(err, "Error while asserting TotalWeight") return err } } return nil }
<filename>rice-middleware/impl/src/main/java/org/kuali/rice/kew/xml/export/RuleDelegationXmlExporter.java /** * Copyright 2005-2015 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.kew.xml.export; import org.jdom.Element; import org.kuali.rice.core.api.exception.RiceRuntimeException; import org.kuali.rice.core.api.impex.ExportDataSet; import org.kuali.rice.core.api.util.xml.XmlRenderer; import org.kuali.rice.core.framework.impex.xml.XmlExporter; import org.kuali.rice.kew.export.KewExportDataSet; import org.kuali.rice.kew.rule.RuleBaseValues; import org.kuali.rice.kew.rule.RuleDelegationBo; import org.kuali.rice.kew.rule.RuleResponsibilityBo; import org.kuali.rice.kew.service.KEWServiceLocator; import org.kuali.rice.kim.api.group.Group; import org.kuali.rice.kim.api.identity.principal.Principal; import java.util.Iterator; import static org.kuali.rice.core.api.impex.xml.XmlConstants.*; /** * Exports rules to XML. * * @see RuleBaseValues * * @author Kuali Rice Team (<EMAIL>) */ public class RuleDelegationXmlExporter implements XmlExporter { protected final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(getClass()); private XmlRenderer renderer = new XmlRenderer(RULE_NAMESPACE); private RuleXmlExporter ruleExporter = new RuleXmlExporter(RULE_NAMESPACE); @Override public boolean supportPrettyPrint() { return true; } public Element export(ExportDataSet exportDataSet) { KewExportDataSet dataSet = KewExportDataSet.fromExportDataSet(exportDataSet); if (!dataSet.getRuleDelegations().isEmpty()) { Element rootElement = renderer.renderElement(null, RULE_DELEGATIONS); rootElement.setAttribute(SCHEMA_LOCATION_ATTR, RULE_SCHEMA_LOCATION, SCHEMA_NAMESPACE); for (Iterator iterator = dataSet.getRuleDelegations().iterator(); iterator.hasNext();) { RuleDelegationBo ruleDelegation = (RuleDelegationBo) iterator.next(); exportRuleDelegation(rootElement, ruleDelegation); } return rootElement; } return null; } private void exportRuleDelegation(Element parent, RuleDelegationBo ruleDelegation) { Element ruleDelegationElement = renderer.renderElement(parent, RULE_DELEGATION); exportParentResponsibility(ruleDelegationElement, ruleDelegation); renderer.renderTextElement(ruleDelegationElement, DELEGATION_TYPE, ruleDelegation.getDelegationType().getCode()); ruleExporter.exportRule(ruleDelegationElement, ruleDelegation.getDelegationRule()); } private void exportParentResponsibility(Element parent, RuleDelegationBo delegation) { Element parentResponsibilityElement = renderer.renderElement(parent, PARENT_RESPONSIBILITY); RuleResponsibilityBo ruleResponsibility = KEWServiceLocator.getRuleService().findRuleResponsibility(delegation.getResponsibilityId()); renderer.renderTextElement(parentResponsibilityElement, PARENT_RULE_NAME, ruleResponsibility.getRuleBaseValues().getName()); if (ruleResponsibility.isUsingPrincipal()) { Principal principal = ruleResponsibility.getPrincipal(); renderer.renderTextElement(parentResponsibilityElement, PRINCIPAL_NAME, principal.getPrincipalName()); } else if (ruleResponsibility.isUsingGroup()) { Group group = ruleResponsibility.getGroup(); Element groupElement = renderer.renderElement(parentResponsibilityElement, GROUP_NAME); groupElement.setText(group.getName()); groupElement.setAttribute(NAMESPACE, group.getNamespaceCode()); } else if (ruleResponsibility.isUsingRole()) { renderer.renderTextElement(parentResponsibilityElement, ROLE, ruleResponsibility.getRuleResponsibilityName()); } else { throw new RiceRuntimeException("Encountered a rule responsibility when exporting with an invalid type of '" + ruleResponsibility.getRuleResponsibilityType()); } } }
package unimelb.bitbox.messages; import unimelb.bitbox.peers.Peer; import unimelb.bitbox.server.PeerServer; import unimelb.bitbox.util.fs.FileDescriptor; import java.io.IOException; /** * FILE_MODIFY_RESPONSE message. * * @author <NAME> */ public class FileModifyResponse extends Response { private static final String SUCCESS = "file loader ready"; private final FileDescriptor fd; public FileModifyResponse(FileDescriptor fileDescriptor, Peer peer) { super("MODIFY:" + fileDescriptor, peer); fd = fileDescriptor; document.append("command", MessageType.FILE_MODIFY_RESPONSE); document.join(fileDescriptor.toJSON()); } @Override void onSent() { String reply = SUCCESS; try { if (!PeerServer.fsManager().isSafePathName(fd.pathName)) { reply = "unsafe pathname given"; } else if (PeerServer.fsManager().fileMatches(fd)) { reply = "file already exists with matching content"; } else if (!PeerServer.fsManager().fileExists(fd)) { reply = "file does not exist"; } else { PeerServer.fsManager().modifyFileLoader(fd); } } catch (IOException e) { reply = "error modifying file: " + e.getMessage(); } boolean successful = reply.equals(SUCCESS); document.append("message", reply); document.append("status", successful); if (successful) { PeerServer.rwManager().addFile(peer, fd); } } }
//----------------------------------------------------------------------------- // Purpose: Set the avatar by C_BasePlayer pointer //----------------------------------------------------------------------------- void CAvatarImagePanel::SetPlayer( C_BasePlayer *pPlayer, EAvatarSize avatarSize ) { if ( pPlayer ) { int iIndex = pPlayer->entindex(); SetPlayer(iIndex, avatarSize); } else m_pImage->ClearAvatarSteamID(); }
#pragma once #include "as/Pos.h" #include "as/Rng.h" #include "as/Token_Listing.h" namespace as { // This is token representation struct Tkn { enum KIND { #define TOKEN(k, s) KIND_##k TOKEN_LISTING #undef TOKEN }; inline static const char* NAMES[] = { #define TOKEN(k, s) s TOKEN_LISTING #undef TOKEN }; KIND kind; const char* str; Rng rng; Pos pos; inline operator bool() const { return kind != KIND_NONE; } }; inline static bool is_numeric_constant(Tkn::KIND k) { return k == Tkn::KIND_INTEGER || k == Tkn::KIND_FLOAT; } inline static bool is_reg(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_R0 || k == Tkn::KIND_KEYWORD_R1 || k == Tkn::KIND_KEYWORD_R2 || k == Tkn::KIND_KEYWORD_R3 || k == Tkn::KIND_KEYWORD_R4 || k == Tkn::KIND_KEYWORD_R5 || k == Tkn::KIND_KEYWORD_R6 || k == Tkn::KIND_KEYWORD_R7 || k == Tkn::KIND_KEYWORD_IP || k == Tkn::KIND_KEYWORD_SP); } inline static bool is_ctype(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_VOID || k == Tkn::KIND_KEYWORD_CINT8 || k == Tkn::KIND_KEYWORD_CINT16 || k == Tkn::KIND_KEYWORD_CINT32 || k == Tkn::KIND_KEYWORD_CINT64 || k == Tkn::KIND_KEYWORD_CUINT8 || k == Tkn::KIND_KEYWORD_CUINT16 || k == Tkn::KIND_KEYWORD_CUINT32 || k == Tkn::KIND_KEYWORD_CUINT64 || k == Tkn::KIND_KEYWORD_CFLOAT32 || k == Tkn::KIND_KEYWORD_CFLOAT64 || k == Tkn::KIND_KEYWORD_CPTR); } inline static bool is_mov(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_I8_MOV || k == Tkn::KIND_KEYWORD_I16_MOV || k == Tkn::KIND_KEYWORD_I32_MOV || k == Tkn::KIND_KEYWORD_I64_MOV || k == Tkn::KIND_KEYWORD_U8_MOV || k == Tkn::KIND_KEYWORD_U16_MOV || k == Tkn::KIND_KEYWORD_U32_MOV || k == Tkn::KIND_KEYWORD_U64_MOV); } inline static bool is_arithmetic(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_I8_ADD || k == Tkn::KIND_KEYWORD_I16_ADD || k == Tkn::KIND_KEYWORD_I32_ADD || k == Tkn::KIND_KEYWORD_I64_ADD || k == Tkn::KIND_KEYWORD_U8_ADD || k == Tkn::KIND_KEYWORD_U16_ADD || k == Tkn::KIND_KEYWORD_U32_ADD || k == Tkn::KIND_KEYWORD_U64_ADD || k == Tkn::KIND_KEYWORD_I8_SUB || k == Tkn::KIND_KEYWORD_I16_SUB || k == Tkn::KIND_KEYWORD_I32_SUB || k == Tkn::KIND_KEYWORD_I64_SUB || k == Tkn::KIND_KEYWORD_U8_SUB || k == Tkn::KIND_KEYWORD_U16_SUB || k == Tkn::KIND_KEYWORD_U32_SUB || k == Tkn::KIND_KEYWORD_U64_SUB || k == Tkn::KIND_KEYWORD_I8_MUL || k == Tkn::KIND_KEYWORD_I16_MUL || k == Tkn::KIND_KEYWORD_I32_MUL || k == Tkn::KIND_KEYWORD_I64_MUL || k == Tkn::KIND_KEYWORD_U8_MUL || k == Tkn::KIND_KEYWORD_U16_MUL || k == Tkn::KIND_KEYWORD_U32_MUL || k == Tkn::KIND_KEYWORD_U64_MUL || k == Tkn::KIND_KEYWORD_I8_DIV || k == Tkn::KIND_KEYWORD_I16_DIV || k == Tkn::KIND_KEYWORD_I32_DIV || k == Tkn::KIND_KEYWORD_I64_DIV || k == Tkn::KIND_KEYWORD_U8_DIV || k == Tkn::KIND_KEYWORD_U16_DIV || k == Tkn::KIND_KEYWORD_U32_DIV || k == Tkn::KIND_KEYWORD_U64_DIV); } inline static bool is_cond_jump(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_I8_JE || k == Tkn::KIND_KEYWORD_I16_JE || k == Tkn::KIND_KEYWORD_I32_JE || k == Tkn::KIND_KEYWORD_I64_JE || k == Tkn::KIND_KEYWORD_U8_JE || k == Tkn::KIND_KEYWORD_U16_JE || k == Tkn::KIND_KEYWORD_U32_JE || k == Tkn::KIND_KEYWORD_U64_JE || k == Tkn::KIND_KEYWORD_I8_JNE || k == Tkn::KIND_KEYWORD_I16_JNE || k == Tkn::KIND_KEYWORD_I32_JNE || k == Tkn::KIND_KEYWORD_I64_JNE || k == Tkn::KIND_KEYWORD_U8_JNE || k == Tkn::KIND_KEYWORD_U16_JNE || k == Tkn::KIND_KEYWORD_U32_JNE || k == Tkn::KIND_KEYWORD_U64_JNE || k == Tkn::KIND_KEYWORD_I8_JL || k == Tkn::KIND_KEYWORD_I16_JL || k == Tkn::KIND_KEYWORD_I32_JL || k == Tkn::KIND_KEYWORD_I64_JL || k == Tkn::KIND_KEYWORD_U8_JL || k == Tkn::KIND_KEYWORD_U16_JL || k == Tkn::KIND_KEYWORD_U32_JL || k == Tkn::KIND_KEYWORD_U64_JL || k == Tkn::KIND_KEYWORD_I8_JLE || k == Tkn::KIND_KEYWORD_I16_JLE || k == Tkn::KIND_KEYWORD_I32_JLE || k == Tkn::KIND_KEYWORD_I64_JLE || k == Tkn::KIND_KEYWORD_U8_JLE || k == Tkn::KIND_KEYWORD_U16_JLE || k == Tkn::KIND_KEYWORD_U32_JLE || k == Tkn::KIND_KEYWORD_U64_JLE || k == Tkn::KIND_KEYWORD_I8_JG || k == Tkn::KIND_KEYWORD_I16_JG || k == Tkn::KIND_KEYWORD_I32_JG || k == Tkn::KIND_KEYWORD_I64_JG || k == Tkn::KIND_KEYWORD_U8_JG || k == Tkn::KIND_KEYWORD_U16_JG || k == Tkn::KIND_KEYWORD_U32_JG || k == Tkn::KIND_KEYWORD_U64_JG || k == Tkn::KIND_KEYWORD_I8_JGE || k == Tkn::KIND_KEYWORD_I16_JGE || k == Tkn::KIND_KEYWORD_I32_JGE || k == Tkn::KIND_KEYWORD_I64_JGE || k == Tkn::KIND_KEYWORD_U8_JGE || k == Tkn::KIND_KEYWORD_U16_JGE || k == Tkn::KIND_KEYWORD_U32_JGE || k == Tkn::KIND_KEYWORD_U64_JGE); } inline static bool is_push_pop(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_PUSH || k == Tkn::KIND_KEYWORD_POP); } inline static bool is_cmp(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_I8_CMP || k == Tkn::KIND_KEYWORD_I16_CMP || k == Tkn::KIND_KEYWORD_I32_CMP || k == Tkn::KIND_KEYWORD_I64_CMP || k == Tkn::KIND_KEYWORD_U8_CMP || k == Tkn::KIND_KEYWORD_U16_CMP || k == Tkn::KIND_KEYWORD_U32_CMP || k == Tkn::KIND_KEYWORD_U64_CMP); } inline static bool is_pure_jump(Tkn::KIND k) { return (k == Tkn::KIND_KEYWORD_JE || k == Tkn::KIND_KEYWORD_JNE || k == Tkn::KIND_KEYWORD_JL || k == Tkn::KIND_KEYWORD_JLE || k == Tkn::KIND_KEYWORD_JG || k == Tkn::KIND_KEYWORD_JGE || k == Tkn::KIND_KEYWORD_JMP); } } #undef TOKEN_LISTING
/* * Copyright (C) 2014 <NAME> <<EMAIL>> * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Concepts and requirements for exchanging time values between libuavcan, media * layer implementations, and applications. */ /** @file * This header specifies the concepts used by libuavcan when handling time scalars and vectors. * Applications may optionally choose to extend these concepts for their own use but shall always * use them, as documented, when exchanging data with libuavcan. * * <h3>Signed Integer Assumptions</h3> * Please note that libuavcan makes some assumptions that signed integers are represented as * twos compliment by a machine. You may experience undefined behaviour if your architecture * does not use twos compliment integers. */ #ifndef LIBUAVCAN_TIME_HPP_INCLUDED #define LIBUAVCAN_TIME_HPP_INCLUDED #include "libuavcan/libuavcan.hpp" #include "libuavcan/util/math.hpp" namespace libuavcan { /** * The default signed integer type used in libuavcan for signed microseconds (e.g. all duration types). */ using DefaultMicrosecondSignedType = std::int64_t; /** * The default unsigned integer type used in libuavcan for unsigned microseconds (e.g. all time types). */ using DefaultMicrosecondUnsignedType = std::uint64_t; /** * @namespace duration * Contains concepts and types that implement these concepts for time vector values. */ namespace duration { /** * Protected base class for duration values. This provides a common implementation for * various duration datatypes and enforces two concepts: * * -# duration math is saturing – MAX_DURATION + 1 == MAX_DURATION * -# durations are signed integers – By default 8 byte integers but USecT can be redefined by * a specialization. * * @tparam Type The type of the derived class. This must be an empty type. * All storage will be provided by this base class. * @tparam USecT The datatype returned when retrieving durations from * realizations of this base class. This type must be signed. */ template <typename Type, typename USecT = libuavcan::DefaultMicrosecondSignedType> class Base { USecT usec_; /**< Internal storage of the duration value in microseconds. */ protected: /** * Non-virtual destructor. */ ~Base() = default; Base() : usec_(0) { static_assert(sizeof(Type) == sizeof(USecT), "The duration abstraction must be the same size as the underlying duration type."); static_assert(std::is_signed<USecT>::value, "The microsecond type must be signed for durations."); } Base(const Base& rhs) : usec_(rhs.usec_) {} /** * Move constructor takes value from rhs and * resets rhs to 0. */ Base(Base&& rhs) : usec_(rhs.usec_) { rhs.usec_ = 0; } public: /** * The underlying datatype for microsecond values. This must be signed for duration types. */ using MicrosecondType = USecT; /** * The specialized type of this base duration type. */ using DurationType = Type; /** * Get the largest possible number of microseconds this type can store. */ static Type getMaximum() { return fromMicrosecond(std::numeric_limits<USecT>::max()); } /** * Construct an instance of Type from a microsecond value. */ static Type fromMicrosecond(USecT us) { Type d; d.usec_ = us; return d; } /** * Obtain the underlying microsecond value without conversion. */ USecT toMicrosecond() const { return usec_; } /** * Get the absolute value of the duration as a duration type. */ Type getAbs() const { return Type::fromMicrosecond(std::abs(usec_)); } Base& operator=(Base&& rhs) { usec_ = rhs.usec_; rhs.usec_ = 0; return *this; } Base& operator=(const Base& rhs) { usec_ = rhs.usec_; return *this; } bool operator==(const Type& r) const { return usec_ == r.usec_; } bool operator!=(const Type& r) const { return !operator==(r); } bool operator<(const Type& r) const { return usec_ < r.usec_; } bool operator>(const Type& r) const { return usec_ > r.usec_; } bool operator<=(const Type& r) const { return usec_ <= r.usec_; } bool operator>=(const Type& r) const { return usec_ >= r.usec_; } Type operator+(const Type& r) const { return fromMicrosecond(util::saturating_add(usec_, r.usec_)); } Type operator-(const Type& r) const { return fromMicrosecond(util::saturating_sub(usec_, r.usec_)); } Type operator-() const { if (usec_ == std::numeric_limits<USecT>::min()) { return fromMicrosecond(std::numeric_limits<USecT>::max()); } else { return fromMicrosecond(-usec_); } } Type& operator+=(const Type& r) { *this = *this + r; return *static_cast<Type*>(this); } Type& operator-=(const Type& r) { *this = *this - r; return *static_cast<Type*>(this); } }; /** * A monotonic duration used by libuavcan. */ class LIBUAVCAN_EXPORT Monotonic : public Base<Monotonic> {}; } // namespace duration /** * @namespace time * Contains concepts and types that implement these concepts for time scalar values. */ namespace time { /** * Protected base class for time values. * * @tparam Type The type of the derived class. This must be an empty type. * All storage will be provided by this base class. * @tparam DType The type of duration used for this time type. Time is concrete and duration * is relative. * @tparam USecT The datatype returned when retrieving time from * realizations of this base class. This type must be unsigned. */ template <typename Type, typename DType, typename USecT = DefaultMicrosecondUnsignedType> class Base { USecT usec_; protected: ~Base() {} Base() : usec_(0) { static_assert(sizeof(Type) == sizeof(USecT), "The time abstraction must be the same size as the underlying time type."); static_assert(!std::is_signed<USecT>::value, "Microsecond type must be unsigned for time."); // Note that this also, somewhat, enforces that the duration type supports the duration "concept". // It won't be until C++20 that this type can truly enforce this requirement. If you must re-implement // the concept then remember that Duration math is saturating. It's much safer to just derive // your duration from libuavcan::time::Base. static_assert(sizeof(USecT) == sizeof(typename DType::MicrosecondType), "Microsecond Type must be the same size as the duration type."); } Base(const Base& rhs) : usec_(rhs.usec_) {} /** * Move constructor takes value from rhs and * resets rhs to 0. */ Base(Base&& rhs) : usec_(rhs.usec_) { rhs.usec_ = 0; } public: /** * The underlying datatype for microsecond values. This must be unsigned for time types. */ using MicrosecondType = USecT; /** * The specialized type of this base time type. */ using DurationType = DType; /** * Get the largest possible number of microseconds this type can store. */ static Type getMaximum() { return fromMicrosecond(std::numeric_limits<USecT>::max()); } /** * Construct an instance of Type from a microsecond value. */ static Type fromMicrosecond(USecT us) { Type t; t.usec_ = us; return t; } /** * Obtain the underlying microsecond value without conversion. */ USecT toMicrosecond() const { return usec_; } Base& operator=(Base&& rhs) { usec_ = rhs.usec_; rhs.usec_ = 0; return *this; } Base& operator=(const Base& rhs) { usec_ = rhs.usec_; return *this; } bool operator==(const Type& r) const { return usec_ == r.usec_; } bool operator!=(const Type& r) const { return !operator==(r); } bool operator<(const Type& r) const { return usec_ < r.usec_; } bool operator>(const Type& r) const { return usec_ > r.usec_; } bool operator<=(const Type& r) const { return usec_ <= r.usec_; } bool operator>=(const Type& r) const { return usec_ >= r.usec_; } Type operator+(const DType& r) const { return fromMicrosecond(libuavcan::util::saturating_add(usec_, r.toMicrosecond())); } Type operator-(const DType& r) const { return fromMicrosecond(libuavcan::util::saturating_sub(usec_, r.toMicrosecond())); } Type& operator+=(const DType& r) { *this = *this + r; return *static_cast<Type*>(this); } Type& operator-=(const DType& r) { *this = *this - r; return *static_cast<Type*>(this); } }; /** * A monotonic time value used by libuavcan. */ class LIBUAVCAN_EXPORT Monotonic : public Base<Monotonic, duration::Monotonic> {}; } // namespace time } // namespace libuavcan #endif // LIBUAVCAN_TIME_HPP_INCLUDED
An Analysis of the Deaths Reported by Hurricane Maria: A Mini Review The purpose of this mini review is to analyze the controversies surrounding the official death toll of Hurricane Maria, driven by the estimates of excess mortality rates by academics and investigative journalists. This review will be a critique of the aforementioned analyses and articles with the purpose of clarifying their figures, which all present different numbers of victims. In three publications (i.e., Kishore et al., 2018; Santos-Lozada et al., 2018; GWU, 2018), the Commonwealth of Puerto Rico reported different numbers of victims in the aftermath of HM on September 20, 2017. Since the occurrence of HM in PR, the reported number of victims of this disaster has varied. According to the PR government, the official number of deaths is 64 CPI (2017), while Kishore et al.’s (2018) report puts the figure at 4,645 and 2,975 deaths, as reported by George Washington University. This article analyzes why these sources disagree on the number of the dead and the possible reasons why there are discrepancies.
// Auxiliary: Handle precalculated gamut check. The retrieval of context may be alittle bit slow, but this function is not critical. static void TransformOnePixelWithGamutCheck(cmsContext ContextID, _cmsTRANSFORM* p, const cmsUInt16Number wIn[], cmsUInt16Number wOut[]) { cmsUInt16Number wOutOfGamut; _cmsTRANSFORMCORE *core = p->core; core->GamutCheck->Eval16Fn(ContextID, wIn, &wOutOfGamut, core->GamutCheck->Data); if (wOutOfGamut >= 1) { cmsUInt32Number i; cmsUInt32Number n = core->Lut->OutputChannels; _cmsAlarmCodesChunkType* ContextAlarmCodes = (_cmsAlarmCodesChunkType*) _cmsContextGetClientChunk(ContextID, AlarmCodesContext); for (i=0; i < n; i++) { wOut[i] = ContextAlarmCodes ->AlarmCodes[i]; } } else core->Lut->Eval16Fn(ContextID, wIn, wOut, core->Lut->Data); }
def resolve_hostname(addr): if ip_math.is_valid_ip(addr): try: name, _, _ = socket.gethostbyaddr(addr) return name except socket.gaierror: pass except socket.herror: pass except socket.timeout: pass return None else: raise ValueError("Invalid ip address.")
<filename>src/util/AllocatedString.hxx<gh_stars>10-100 /* * Copyright 2015-2021 <NAME> <<EMAIL>> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef ALLOCATED_STRING_HXX #define ALLOCATED_STRING_HXX #include "StringPointer.hxx" #include <algorithm> #include <cstddef> #include <string_view> /** * A string pointer whose memory is managed by this class. * * Unlike std::string, this object can hold a "nullptr" special value. */ template<typename T> class BasicAllocatedString { public: using value_type = typename StringPointer<T>::value_type; using reference = typename StringPointer<T>::reference; using const_reference = typename StringPointer<T>::const_reference; using pointer = typename StringPointer<T>::pointer; using const_pointer = typename StringPointer<T>::const_pointer; using string_view = std::basic_string_view<T>; using size_type = std::size_t; static constexpr value_type SENTINEL = '\0'; private: pointer value = nullptr; explicit BasicAllocatedString(pointer _value) noexcept :value(_value) {} public: BasicAllocatedString() noexcept = default; BasicAllocatedString(std::nullptr_t n) noexcept :value(n) {} explicit BasicAllocatedString(string_view src) :value(Duplicate(src)) {} explicit BasicAllocatedString(const_pointer src) :value(Duplicate(src)) {} /** * Concatenate several strings. */ BasicAllocatedString(std::initializer_list<string_view> src) :value(new value_type[TotalSize(src) + 1]) { auto *p = value; for (const auto i : src) p = std::copy(i.begin(), i.end(), p); *p = SENTINEL; } BasicAllocatedString(const BasicAllocatedString &src) noexcept :BasicAllocatedString(Duplicate(src.value)) {} BasicAllocatedString(BasicAllocatedString &&src) noexcept :value(src.Steal()) {} ~BasicAllocatedString() noexcept { delete[] value; } static BasicAllocatedString Donate(pointer value) noexcept { return BasicAllocatedString(value); } static BasicAllocatedString Empty() { auto p = new value_type[1]; p[0] = SENTINEL; return Donate(p); } BasicAllocatedString &operator=(BasicAllocatedString &&src) noexcept { std::swap(value, src.value); return *this; } BasicAllocatedString &operator=(string_view src) noexcept { delete[] std::exchange(value, nullptr); value = Duplicate(src); return *this; } BasicAllocatedString &operator=(const_pointer src) noexcept { delete[] std::exchange(value, nullptr); value = src != nullptr ? Duplicate(src) : nullptr; return *this; } constexpr bool operator==(std::nullptr_t) const noexcept { return value == nullptr; } constexpr bool operator!=(std::nullptr_t) const noexcept { return value != nullptr; } operator string_view() const noexcept { return value != nullptr ? string_view(value) : string_view(); } constexpr const_pointer c_str() const noexcept { return value; } bool empty() const noexcept { return *value == SENTINEL; } constexpr pointer data() const noexcept { return value; } reference operator[](size_type i) noexcept { return value[i]; } const reference operator[](size_type i) const noexcept { return value[i]; } pointer Steal() noexcept { return std::exchange(value, nullptr); } private: static pointer Duplicate(string_view src) { auto p = new value_type[src.size() + 1]; *std::copy_n(src.data(), src.size(), p) = SENTINEL; return p; } static pointer Duplicate(const_pointer src) { return src != nullptr ? Duplicate(string_view(src)) : nullptr; } static constexpr std::size_t TotalSize(std::initializer_list<string_view> src) noexcept { std::size_t size = 0; for (std::string_view i : src) size += i.size(); return size; } }; class AllocatedString : public BasicAllocatedString<char> { public: using BasicAllocatedString::BasicAllocatedString; AllocatedString() noexcept = default; AllocatedString(BasicAllocatedString<value_type> &&src) noexcept :BasicAllocatedString(std::move(src)) {} using BasicAllocatedString::operator=; }; #endif
<reponame>doublefx/Blockchain package com.doublefx.blockchain.example.virtualcoin.collector; import com.doublefx.blockchain.example.common.AccountHolder; import com.doublefx.blockchain.example.virtualcoin.Transaction; import java.math.BigDecimal; import static java.math.BigDecimal.ZERO; import static java.math.BigDecimal.valueOf; public class BalanceCollector { private static final long INITIAL_ACCOUNT_BALANCE = 100; private final AccountHolder accountHolder; private BigDecimal totalReceived = valueOf(INITIAL_ACCOUNT_BALANCE); private BigDecimal totalSent = ZERO; public BalanceCollector(AccountHolder accountHolder) { this.accountHolder = accountHolder; } public void accept(Transaction transaction) { final var amount = transaction.amount(); if (accountHolder.equals(transaction.from())) { totalSent = totalSent.add(amount); } if (accountHolder.equals(transaction.to())) { totalReceived = totalReceived.add(amount); } } public void combine(BalanceCollector other) { totalSent = totalSent.add(other.totalSent); totalReceived = totalReceived.add(other.totalReceived); } public BigDecimal getBalance() { return totalReceived.subtract(totalSent); } }
// Validates event modifiers applicability for shortcut modifiers. // Examples: // CapsLock shouldn't affect matching of Ctrl+C. // Modifier with Shift should match both LeftShift and RightShift fn are_modifiers_applicable( shortcut_modifiers: Option<ui_input2::Modifiers>, event_modifiers: Option<ui_input2::Modifiers>, ) -> bool { match (shortcut_modifiers, event_modifiers) { (Some(shortcut_modifiers), Some(event_modifiers)) => { let masks = [ vec![ ui_input2::Modifiers::Shift, ui_input2::Modifiers::LeftShift | ui_input2::Modifiers::RightShift, ], vec![ ui_input2::Modifiers::Alt, ui_input2::Modifiers::LeftAlt | ui_input2::Modifiers::RightAlt, ], vec![ ui_input2::Modifiers::Meta, ui_input2::Modifiers::LeftMeta | ui_input2::Modifiers::RightMeta, ], vec![ ui_input2::Modifiers::Control, ui_input2::Modifiers::LeftControl | ui_input2::Modifiers::RightControl, ], // TODO: locks affecting shortcuts? ]; masks.iter().all(|variations| { // if shortcut has modifiers from the variation, event should have the same. variations.iter().all(|&mask| { !shortcut_modifiers.intersects(mask) || shortcut_modifiers & mask == event_modifiers & mask }) }) } (None, None) => true, _ => false, } }
/** * Performs a deep copy of the specified cell, handling the cell format * * @param cell the cell to copy */ private WritableCell deepCopyCell(Cell cell) { WritableCell c = shallowCopyCell(cell); if (c == null) { return c; } if (c instanceof ReadFormulaRecord) { ReadFormulaRecord rfr = (ReadFormulaRecord) c; boolean crossSheetReference = !rfr.handleImportedCellReferences (fromSheet.getWorkbook(), fromSheet.getWorkbook(), workbookSettings); if (crossSheetReference) { try { logger.warn("Formula " + rfr.getFormula() + " in cell " + CellReferenceHelper.getCellReference(cell.getColumn(), cell.getRow()) + " cannot be imported because it references another " + " sheet from the source workbook"); } catch (FormulaException e) { logger.warn("Formula in cell " + CellReferenceHelper.getCellReference(cell.getColumn(), cell.getRow()) + " cannot be imported: " + e.getMessage()); } c = new Formula(cell.getColumn(), cell.getRow(), "\"ERROR\""); } } CellFormat cf = c.getCellFormat(); int index = ( (XFRecord) cf).getXFIndex(); WritableCellFormat wcf = (WritableCellFormat) xfRecords.get(new Integer(index)); if (wcf == null) { wcf = copyCellFormat(cf); } c.setCellFormat(wcf); return c; }
/** * Clears the table by putting all the blocks in block bank */ public void resetTable() { playArea = new ArrayList<Block>(); blockBank = new ArrayList<Block>(); inPlay = new ArrayList<Block>(); int tempWidth = width - bankX; int tempLength = length - bankY; int tempX, tempY; for (int i = 0; i < numBlocks; i++) { tempX = bankX + rand.nextInt(tempWidth); tempY = bankY + rand.nextInt(tempLength); while (!checkFree(tempX, tempY)) { tempX = bankX + rand.nextInt(tempWidth); tempY = bankY + rand.nextInt(tempLength); } blockBank.add(new Block(tempX, tempY, 1 + rand.nextInt(6), i)); } }
Following last week's tragic death of Indianapolis 500 champion Dan Wheldon, more than a dozen of his fellow drivers met with IndyCar officials Monday to talk about safety issues. As correspondent Cynthia Bowers reports, the drivers want to see significant changes in their sport. With 34 cars jammed together at 225 miles per hour, there was little margin for error at the Oct. 16 race in Las Vegas. Just 11 laps in, Wheldon was caught up and killed in a fiery crash. This past Sunday, racing colleagues, fans and family remembered the two-time Indy 500 winner. And the drivers followed up Monday, meeting at the Indianapolis Motor Speedway to talk about the accident and how to prevent more. "How we react to this is critical and I'm very encouraged by what I saw today," IndyCar driver Dario Franchitti said. Pictures: Dan Wheldon Drivers shrugged off criticism of race series executives who ran the race at a track many said was too small for so many racers driving pedal to the metal. "Finger pointing is not gonna do any good here at all," Franchitti said. High-profile racing deaths have historically proven to be catalysts for change. The death of Formula One driver Ayrton Senna in 1994 led to significant design changes to the car; no one has died in a Formula One race since. After iconic NASCAR driver Dale Earnhardt's death at Daytona in 2001, the league mandated softer crash walls and head and neck protection for drivers. There's hasn't been a stock car race death in more than a decade. Ironically, Wheldon was test driving a new Indy car experts believe could save lives -- by being harder to control. They say the 2012 model will help weed out less talented drivers and cut down on overcrowded race tracks. One thing drivers know: At 60 or at 200 plus miles per hour, you are only as safe as the driver next to you.
def ALL_VARIANTS(): return make_variants(SUPPORTED_PYTHON_VERS, SUPPORTED_BUILD_TYPES, SUPPORTED_MPI_TYPES, SUPPORTED_CUDA_VERS)
def finalize(self, data): invalid = np.all(self.x[:, : self.sensor.n_chans] <= -1.5, axis=-1) for v in ALL_TARGETS: data[v].data[invalid] = np.nan variables = [target for target in ALL_TARGETS if target in data.variables] for var in variables: data[var + "_true"] = self.data[var] data["latitude"] = self.data["latitude"] data["longitude"] = self.data["longitude"] if "earth_incidence_angle" in self.data.variables: data["earth_incidence_angle"] = self.data["earth_incidence_angle"] return data
<filename>Tools/Tester/MiscTester.cpp /* * Copyright (C) 2018 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cassert> #include <cmath> #include <vector> extern "C" { #include <Source/Common.h> #include <Source/Data.h> #include <Source/OpenType.h> } #include "OpenType/Builder.h" #include "OpenType/Writer.h" #include "MiscTester.h" using namespace std; using namespace SheenFigure::Tester; using namespace SheenFigure::Tester::OpenType; MiscTester::MiscTester() { } void MiscTester::testDevicePixels() { Builder builder; /* Test the first format. */ { DeviceTable *device = &builder.createDevice({11, 22}, { -2, 1, -1, 0, 1, 0, -2, -1, 0, 1, -1, -2 }); Writer writer; writer.write(device); Data data = writer.data(); /* Test with out of range values. */ assert(GetDevicePixels(data, 10) == 0); assert(GetDevicePixels(data, 23) == 0); /* Test with inside the range values. */ assert(GetDevicePixels(data, 11) == -2); assert(GetDevicePixels(data, 12) == 1); assert(GetDevicePixels(data, 13) == -1); assert(GetDevicePixels(data, 14) == 0); assert(GetDevicePixels(data, 15) == 1); assert(GetDevicePixels(data, 16) == 0); assert(GetDevicePixels(data, 17) == -2); assert(GetDevicePixels(data, 18) == -1); assert(GetDevicePixels(data, 19) == 0); assert(GetDevicePixels(data, 20) == 1); assert(GetDevicePixels(data, 21) == -1); assert(GetDevicePixels(data, 22) == -2); } /* Test the second format. */ { DeviceTable *device = &builder.createDevice({11, 16}, { -8, 0, 7, 1, -1, -7 }); Writer writer; writer.write(device); Data data = writer.data(); /* Test with out of range values. */ assert(GetDevicePixels(data, 10) == 0); assert(GetDevicePixels(data, 17) == 0); /* Test with inside the range values. */ assert(GetDevicePixels(data, 11) == -8); assert(GetDevicePixels(data, 12) == 0); assert(GetDevicePixels(data, 13) == 7); assert(GetDevicePixels(data, 14) == 1); assert(GetDevicePixels(data, 15) == -1); assert(GetDevicePixels(data, 16) == -7); } /* Test the third format. */ { DeviceTable *device = &builder.createDevice({11, 13}, { -128, 0, 127 }); Writer writer; writer.write(device); Data data = writer.data(); /* Test with out of range values. */ assert(GetDevicePixels(data, 10) == 0); assert(GetDevicePixels(data, 14) == 0); /* Test with inside the range values. */ assert(GetDevicePixels(data, 11) == -128); assert(GetDevicePixels(data, 12) == 0); assert(GetDevicePixels(data, 13) == 127); } } void MiscTester::testRegionListScalar() { Builder builder; VariationRegionList &regionList = builder.createRegionList({ { axis_coords { 0.1f, 0.0f, 0.2f } }, { axis_coords { 0.1f, 0.2f, 0.0f } }, { axis_coords { -0.1f, -0.2f, 0.1f } }, { axis_coords { -0.1f, 0.0f, 0.1f } }, { axis_coords { 0.5f, 1.0f, 1.5f } }, { axis_coords { 0.0f, 0.5f, 1.0f }, axis_coords { 1.25f, 1.50f, 1.75f } }, }); vector<Int16> coords = { toF2DOT14(1.0f), toF2DOT14(0.4f), toF2DOT14(1.6f), toF2DOT14(0.75f), toF2DOT14(1.25f), toF2DOT14(0.25f), toF2DOT14(1.625f), }; Writer writer; writer.write(&regionList); Data table = writer.data(); /* Test with coordinates resolving to one. */ assert(CalculateScalarForRegion(table, 0, NULL, 0) == 1.0); assert(CalculateScalarForRegion(table, 1, NULL, 0) == 1.0); assert(CalculateScalarForRegion(table, 2, NULL, 0) == 1.0); assert(CalculateScalarForRegion(table, 3, NULL, 0) == 1.0); assert(CalculateScalarForRegion(table, 4, &coords[0], 1) == 1.0); /* Test with coordinates resolving to zero. */ assert(CalculateScalarForRegion(table, 4, &coords[1], 1) == 0.0); assert(CalculateScalarForRegion(table, 4, &coords[2], 1) == 0.0); /* Test with dividable coordinates. */ assert(abs(CalculateScalarForRegion(table, 4, &coords[3], 1) - 0.5) < F2DOT14_EPSILON); assert(abs(CalculateScalarForRegion(table, 4, &coords[4], 1) - 0.5) < F2DOT14_EPSILON); /* Test with multiple axes. */ assert(abs(CalculateScalarForRegion(table, 5, &coords[5], 2) - 0.25) < F2DOT14_EPSILON); } void MiscTester::testVariationPixels() { Builder builder; Writer sw; VariationRegionList &regionList = builder.createRegionList({ { axis_coords { 0.5f, 1.0f, 1.5f } }, { axis_coords { 0.5f, 0.0f, 1.0f } } }); vector<ItemVariationDataSubtable> varData = { builder.createVariationData({ 0 }, { {{ -260 }, { }} }), builder.createVariationData({ 0 }, { {{ }, { 20 }} }), builder.createVariationData({ 0, 1 }, { {{ 640 }, { -80 }} }), }; ItemVariationStoreTable &varStore = builder.createVariationStore(regionList, { varData.data(), varData.size() }); sw.write(&varStore); Data storeTable = sw.data(); Int16 coord = toF2DOT14(0.75f); /* Test with i16 delta only. */ { VariationIndexTable &varIndex = builder.createVariationIndex(0, 0); Writer writer; writer.write(&varIndex); assert(GetVariationPixels(writer.data(), storeTable, &coord, 1) == -130); } /* Test with i8 delta only. */ { VariationIndexTable &varIndex = builder.createVariationIndex(1, 0); Writer writer; writer.write(&varIndex); assert(GetVariationPixels(writer.data(), storeTable, &coord, 1) == 10); } /* Test with both i16 and i8 delta. */ { VariationIndexTable &varIndex = builder.createVariationIndex(2, 0); Writer writer; writer.write(&varIndex); assert(GetVariationPixels(writer.data(), storeTable, &coord, 1) == 240); } } static bool checkLookupIndex(Data featureTable, UInt16 lookupIndex) { if (Feature_LookupCount(featureTable) != 0) { return Feature_LookupListIndex(featureTable, 0) == lookupIndex; } return false; } void MiscTester::testFeatureSubst() { Builder builder; vector<ConditionTable> conditions = { builder.createCondition(0, { 1.05f, 1.95f}), builder.createCondition(1, {-1.15f, 0.85f}), builder.createCondition(2, {-1.75f, 1.25f}), }; /* Test with empty condition set. */ { FeatureVariationsTable &featureVariations = builder.createFeatureVariations({ {builder.createConditionSet({nullptr, 0}), builder.createFeatureSubst({ {1, builder.createFeature({ 11 })}, })} }); Writer writer; writer.write(&featureVariations); Data data = writer.data(); Data featureSubst = SearchFeatureSubstitutionTable(data, NULL, 0); Data altFeature = SearchAlternateFeatureTable(featureSubst, 1); assert(checkLookupIndex(altFeature, 11)); } /* Test the order of condition set execution. */ { FeatureVariationsTable &featureVariations = builder.createFeatureVariations({ {builder.createConditionSet({&conditions[0], 1}), builder.createFeatureSubst({ {1, builder.createFeature({ 11 })}, })}, {builder.createConditionSet({&conditions[1], 1}), builder.createFeatureSubst({ {2, builder.createFeature({ 12 })}, })}, {builder.createConditionSet({&conditions[2], 1}), builder.createFeatureSubst({ {3, builder.createFeature({ 13 })}, })}, }); Writer writer; writer.write(&featureVariations); Data data = writer.data(); /* Test with first matching condition set. */ { vector<Int16> coords = { toF2DOT14(1.50f), toF2DOT14(0.0f), toF2DOT14(0.0f) }; Data featureSubst = SearchFeatureSubstitutionTable(data, coords.data(), 3); Data altFeature = SearchAlternateFeatureTable(featureSubst, 1); assert(checkLookupIndex(altFeature, 11)); } /* Test with middle matching condition set. */ { vector<Int16> coords = { toF2DOT14(0.0f), toF2DOT14(-0.50f), toF2DOT14(0.0f) }; Data featureSubst = SearchFeatureSubstitutionTable(data, coords.data(), 3); Data altFeature = SearchAlternateFeatureTable(featureSubst, 2); assert(checkLookupIndex(altFeature, 12)); } /* Test with last matching condition set. */ { vector<Int16> coords = { toF2DOT14(0.0f), toF2DOT14(1.0f), toF2DOT14(-0.25f) }; Data featureSubst = SearchFeatureSubstitutionTable(data, coords.data(), 3); Data altFeature = SearchAlternateFeatureTable(featureSubst, 3); assert(checkLookupIndex(altFeature, 13)); } } /* Test with multiple conditions in a single condition set. */ { FeatureVariationsTable &featureVariations = builder.createFeatureVariations({ {builder.createConditionSet({&conditions[0], 3}), builder.createFeatureSubst({ {1, builder.createFeature({ 11 })}, })} }); Writer writer; writer.write(&featureVariations); Data data = writer.data(); vector<Int16> coords = { toF2DOT14(1.50f), toF2DOT14(-0.50f), toF2DOT14(-0.25f) }; Data featureSubst = SearchFeatureSubstitutionTable(data, coords.data(), 3); Data altFeature = SearchAlternateFeatureTable(featureSubst, 1); assert(checkLookupIndex(altFeature, 11)); } } void MiscTester::test() { testDevicePixels(); testRegionListScalar(); testVariationPixels(); testFeatureSubst(); }
/** * Compute and set execution order for all components. */ private void createExecutionOrder() { final Map<RdfObjects.Entity, Set<RdfObjects.Entity>> deps = new HashMap<>(); for (Map.Entry<RdfObjects.Entity, Set<RdfObjects.Entity>> entry : dependencies.entrySet()) { final Set<RdfObjects.Entity> entities = new HashSet<>(); entities.addAll(entry.getValue()); deps.put(entry.getKey(), entities); } Integer executionOrder = 0; while (!deps.isEmpty()) { final List<RdfObjects.Entity> toRemove = new ArrayList<>(16); for (Map.Entry<RdfObjects.Entity, Set<RdfObjects.Entity>> entry : deps.entrySet()) { if (entry.getValue().isEmpty()) { toRemove.add(entry.getKey()); entry.getKey().add(vf.createIRI( "http:linkedpipes.com/ontology/executionOrder"), vf.createLiteral(++executionOrder)); } } toRemove.forEach((item) -> { deps.remove(item); }); deps.entrySet().forEach((entry) -> { entry.getValue().removeAll(toRemove); }); if (toRemove.isEmpty()) { throw new RuntimeException("Cycle detected."); } } }
def fit(self, X): self.col_bin_edges_ = {} self.hist_counts_ = {} self.hist_edges_ = {} self.col_mapping_ = {} self.col_mapping_counts_ = {} self.col_n_bins_ = {} self.col_names_ = [] self.col_types_ = [] self.has_fitted_ = False schema = autogen_schema( X, feature_names=self.feature_names, feature_types=self.feature_types ) for col_idx in range(X.shape[1]): col_name = list(schema.keys())[col_idx] self.col_names_.append(col_name) col_info = schema[col_name] assert col_info["column_number"] == col_idx col_data = X[:, col_idx] self.col_types_.append(col_info["type"]) if col_info["type"] == "continuous": col_data = col_data.astype(float) uniq_vals = set(col_data[~np.isnan(col_data)]) if len(uniq_vals) < self.max_bins: bins = list(sorted(uniq_vals)) else: if self.binning == "uniform": bins = self.max_bins elif self.binning == "quantile": bins = np.unique( np.quantile( col_data, q=np.linspace(0, 1, self.max_bins + 1) ) ) else: raise ValueError("Unknown binning: '{}'.".format(self.binning)) _, bin_edges = np.histogram(col_data, bins=bins) hist_counts, hist_edges = np.histogram(col_data, bins="doane") self.col_bin_edges_[col_idx] = bin_edges self.hist_edges_[col_idx] = hist_edges self.hist_counts_[col_idx] = hist_counts self.col_n_bins_[col_idx] = len(bin_edges) elif col_info["type"] == "ordinal": mapping = {val: indx for indx, val in enumerate(col_info["order"])} self.col_mapping_[col_idx] = mapping self.col_n_bins_[col_idx] = len(col_info["order"]) elif col_info["type"] == "categorical": uniq_vals, counts = np.unique(col_data, return_counts=True) non_nan_index = ~np.isnan(counts) uniq_vals = uniq_vals[non_nan_index] counts = counts[non_nan_index] mapping = {val: indx for indx, val in enumerate(uniq_vals)} self.col_mapping_counts_[col_idx] = counts self.col_mapping_[col_idx] = mapping self.col_n_bins_[col_idx] = len(uniq_vals) self.has_fitted_ = True return self
def _read_content(self, url, file_name, branch): api = self.bisector.api try: return api.m.gitiles.download_file( repository_url=url, file_path=file_name, branch=branch, step_test_data=lambda: api._test_data['download_deps'].get( self.commit_hash, '')) except TypeError: err = 'Could not read content for %s/%s/%s' % (url, file_name, branch) api.m.step.active_result.presentation.status = api.m.step.WARNING api.m.step.active_result.presentation.logs['Gitiles Warning'] = [err] return None
import React from "react"; import fetch from "isomorphic-unfetch"; // Use a global to save the user, so we don't have to fetch it again after page navigations let userState; const User = React.createContext({ user: null, loading: false }); export const fetchUser = async () => { if (userState !== undefined) { return userState; } const res = await fetch("/api/me"); userState = res.ok ? await res.json() : null; return userState; }; export const UserProvider = ({ value, children }) => { const { user } = value; // If the user was fetched in SSR add it to userState so we don't fetch it again React.useEffect(() => { if (!userState && user) { userState = user; } }, []); return <User.Provider value={value}>{children}</User.Provider>; }; export const useUser = () => React.useContext(User); export const useFetchUser = () => { const [data, setUser] = React.useState({ user: userState || null, loading: userState === undefined, }); React.useEffect(() => { if (userState !== undefined) { return; } let isMounted = true; fetchUser().then((user) => { // Only set the user if the component is still mounted if (isMounted) { setUser({ user, loading: false }); } }); return () => { isMounted = false; }; }, [userState]); return data; };
/** * @return a symbolic representation of this operator. */ public String toSymbol() { switch(this) { case EQUALS: return "="; case NOT_EQUALS: return "!="; case LESS_THAN: return "<"; case LESS_THAN_EQUALS: return "<="; case GREATER_THAN: return ">"; case GREATER_THAN_EQUALS: return ">="; } throw new IllegalStateException("Unreachable code."); }
<filename>src/builtins/jobs.rs use crate::jobc; use crate::shell; pub fn run(sh: &shell::Shell) -> i32 { if sh.jobs.is_empty() { return 0; } for (_i, job) in sh.jobs.iter() { jobc::print_job(job); } return 0; }
t=int(input()) while(t!=0): a,b,c=map(int,input().split(" ")) x=c%a if(x<b): y=abs(a-b)+x print(c-y) else: y=x-b print(c-y) t=t-1
Pneumonia After Hematopoietic Stem Cell Transplantation Pneumonia is the main cause of morbidity and mortality after hematopoietic stem cell transplantation. Two thirds of pneumonias observed after both autologous and allogeneic stem cell transplantations are of infectious origin, and coinfections are frequent. One third is due to noninfectious process, such as alveolar hemorrhage, alveolar proteinosis, or alloimmune pulmonary complications such as bronchiolitis obliterans or idiopathic interstitial pneumonitis. Most of these noninfectious complications may require treatment with corticosteroids which may be deleterious in infection. On the other hand, these complications either mimic or may be complicated with infections. Therefore, a precise diagnosis of pneumonia is of crucial importance to decide of the optimal treatment. CT scan is the best procedure for imaging of the lung. Although several indirect biomarkers, such as serum or plasma galactomannan or (1-3) β(beta)-G-glucan, can help in the etiological diagnosis, only direct invasive investigations provide the best chance to identify the cause(s) of pneumonia. Bronchoalveolar lavage (BAL) under fiberoptic bronchoscopy is the procedure of choice to identify the cause of pulmonary infection. It is safe and reproducible, and its diagnostic yield is around 50 % if the BAL fluid is processed at the laboratory according to a prespecified protocol established between the transplanter, the infectious diseases’ specialist, the pneumologist, and the laboratory, allowing the identification of the most likely hypotheses. Transbronchial biopsy does not provide significant additional information to BAL in most cases and more often complicates with bleeding and pneumothorax. In case of a noncontributory BAL, the decision to proceed to a second BAL, a transthoracic biopsy, or a surgical biopsy should be cautiously weighted in a multidisciplinary approach in regard to the benefits and risks of invasive procedures versus empirical treatment. Pneumonia is the most common infection after transplantation, and the infection with the highest mortality. Roughly two thirds of pneumonia observed after HSCT are of infectious origin, and this observation should be a priority leading the investigations. While the infection-related mortality has decreased after HSCT over time , it is not sure that the incidence of pneumonia decreased in parallel. Up to 30 % of the patients may develop pulmonary symptoms within the fi rst 100 days after allogeneic HSCT . Even in T-cell-depleted allogeneic HSCT where the incidence of pneumonia seems to be low , the occurrence of pneumonia signifi cantly impacts on survival. The rates of bacterial, viral, and polymicrobial pneumonia do not seem to be different during the fi rst 3 months after transplant between allogeneic and autologous HSCT recipients, while the rate of invasive fungal disease (IFD) is much higher after allogeneic HSCT , due to a more severe and prolonged immune defect which also favors late infectious complications . Factors enhancing the risk of infectious pneumonia are many and include donor and recipient serologies; previous pneumonia, which may warrant secondary prophylaxis; graft source; choice of donor and conditioning; graft-versus-host disease (GVHD); and also environmental factors. One of the main concerns in pneumonia evaluation is to distinguish infectious and noninfectious pneumonia since many noninfectious causes may mimic infection. Additionally, pulmonary coinfections are frequent. This makes that the results of indirect markers, even though extremely useful in practice, should be cautiously considered as it may identify only part of the responsible pathogens. Only a direct investigation of the lung as provided by bronchoalveolar lavage (BAL), combined with the use of well-chosen indirect markers, gives the best chances to identify several causes of pneumonia. This chapter focuses on the factors that make the lungs particularly susceptible to infections after HSCT, the main specifi cities of clinical and imaging presentation of pulmonary infections, and the principles of diagnosis and management. Altered Pulmonary Defense After HSCT The lungs of HSCT candidates may have been exposed to toxic insults from their underlying diseases, prior infection, and prior chemotherapy and irradiation which may compromise normal surveillance barriers. Conditioning before transplant and subsequent immunosuppressive therapy and infection all may impair native defenses and increase the risk for pulmonary infection. The ciliated and squamous epithelium, from nasopharynx to distal bronchioles, is the fi rst line of defense. Signifi cant impairment of the ciliary epithelium has been reported even years after transplant . The respective role of viral or mycoplasma infection or of GVHD or radiation in this fi nding cannot be precisely determined. However, these abnormalities were found in 17 of 20 long-term allogeneic HSCT survivors and are probably underestimated in routine practice. Alveolar macrophages act as phagocytes and secrete cytokines and chemokines providing a next level of defense. Their functions may be altered by immunosuppressive agents and viral infection. During prolonged neutropenic phases, the number of alveolar macrophages decreases, and this could favor infection from pathogens, which are normally phagocyted at the alveolar level . Additionally, after allogeneic HSCT, the recipient alveolar macrophages are progressively replaced by cells of donor origin, and this may partly explain the numeric and functional impairment of the alveolar macrophage population during the fi rst months after transplant . Evolution of the Problem The occurrence of infectious pneumonia relates to the interrelationship of infectious exposure or reactivation, the condition of the lungs, and the degree of immunosuppression. The changes in many transplant procedures, including various prophylaxes, and the availability of new diagnostic tools over the last decade should have changed the incidence of pneumonia after HSCT. However, there is no clear data to support this hypothesis, and one may consider that these changes have more resulted in a change in timing and causes of pneumonia rather than in incidence or mortality. The increasing use of reduced intensity conditioning (RIC) regimens has significantly decreased the formerly high rate of early bacterial pneumonias. However, concomitantly, multidrug-resistant (MDR) bacteria have become a global concern in most hematology wards . The use of RICs has also changed the kinetics of many complications, delaying the onset of GVHD and the subsequent infections . Preemptive and prophylactic strategies of CMV infection have also considerably reduced the incidence of CMV pneumonia which nowadays affects less than 6 % of the patients . However, pneumonia due to respiratory viruses has become common. New antifungal agents have improved therapeutic options for Aspergillus infection, but non-Aspergillus molds, especially mucormycoses, are being seen with increasing frequency . Finally, despite signifi cant progresses, the morbidity and mortality of pneumonia after HSCT remains one of the highest of any transplant. The timing of infectious pneumonia follows the timing of other infections according to the type of transplant and occurrence and severity of GVHD which is the main factor prolonging the infectious risk after the neutropenic phase . HSCT recipients are both at risk for nosocomial and community infections according to the phase of transplant. These environmental risks cannot always be prevented, on the contrary of the reactivation risks which must be evaluated before transplant. Main Causes of Infectious Pneumonia After HSCT Although changes in the transplant procedures have impacted on the infectious complications and their timing (see Chap. X), infectious pneumonia after HSCT occurs in predictable risk periods. After allogeneic transplant, early bacterial pneumonia mainly complicates myeloablative transplant, while opportunistic fungal and viral infections may affect the patient irrespectively of the type of conditioning. After autologous transplant, most pneumonias occur during the neutropenic phase , especially in myeloma patients , and few of them are of fungal origin . Bacterial Pneumonia Bacterial pneumonia occurring during the initial neutropenia are caused by pathogens common to all neutropenic patients or to those with comparable mucositis in the ward. The clinician should also consider the possibility of streptococcal pneumonia or ARDS related to streptococcal sepsis. These infections are particularly due to Streptococcus viri-dans and have been correlated with the presence of mucositis, the use of prophylactic quinolones, and the administration of high doses of cytarabine (see Chap. 20 ). The approach to bacterial pneumonias early after transplantation is similar to that in other neutropenic hosts, and it should include coverage for Pseudomonas species and eventually MDR in case of previous colonization or infection . Most patients are maintained on indwelling intravenous catheters throughout this period, and seeding of the lungs from bacteremia continues to be a potential risk. After recovery from neutropenia, allogeneic transplant recipients continue to be at risk for any nosocomial infections as long as they stay in the hospital (see Figure 16-1 ). Bacterial infections occurring in the late posttransplantation period may be favored by persistent immunoglobulin defi ciency, which increases the risk of pneumonia caused by encapsulated bacteria. Invasive pneumococcal infection occurs signifi cantly more often after allogeneic, than after autologous, transplantations and especially in case of chronic GVHD . They may be rapidly fatal. In a prospective study from the European Blood and Marrow Transplantation Group , no pneumonia developed in seven cases of invasive infection observed before day 100, whereas it was seen in 18 of 44 (41 %) cases observed after day 100, and half of the fatal cases of late infection were associated with pneumonia. Early immunization with the 13-valent conjugate vaccine, completed by the 23-valent polysaccharide later, or a fourth dose of the conju-FIGURE 16-1. This 56-year-old patient has received an allogeneic HSCT from an unrelated donor for acute myeloid leukemia . He was smoker and suffered from chronic bronchitis before transplant. He was rehospitalized at 7 months after transplant for severe chronic GVHD and was treated with steroids. He developed febrile pneumonia after 9 days of hospitalization. The lung CT scan showed ground-glass, patchy infi ltrates of the left lower lobe. The bronchoalveolar lavage was positive for coronavirus, and the culture of protected aspiration (10 3 CFUs/mL) and the culture of the lavage fl uid (10 4 CFUs/mL) were both positive for Klebsiella pneumoniae. gate vaccine in case of GVHD could reduce the incidence of pneumococcal infection over time (see Chap. 48 ). Similarly, H. infl uenzae may cause pneumonia and sinus infection, usually past the third month after transplantation. Immunization with a conjugate vaccine against type b is recommended from 6 months after transplant. Pneumonias from intracellular pathogens are rarely reported, but they may recur in previously exposed patients. Pneumonia due to Legionella species has occasionally been reported in the setting of outbreaks, most often as a nosocomial infection. The radiologic fi ndings may be variable; they may mimic fungal nodules, and they may not be apparent at the onset of high fever and pleuritic pain. Invasive nocardiosis, reported in 0.3-1.7 % after allogeneic transplant, mainly occurs in patients who are not receiving TMP-SMX and is often diffi cult to differentiate from fungal pneumonia . Mycobacterial infections due to M. tuberculosis , Mycobacterium aviumintracellulare complex, or other species are rarely reported. Generally, they are diagnosed at 2-18 months after transplantation, but they may develop early when prior infection has occurred (see Figure 16-2 ) . Fungal Pneumonia (including pneumocystis pneumonia) Fungal pneumonia : Aspergillus is the most worrisome cause of IFD after allogeneic HSCT. It reportedly occurs after 0-20 % of transplantations; the most common site is the lung, and GVHD is the main risk factor (see Chap. X). A fi rst peak of incidence occurs during the neutropenic period after myeloablative conditioning regimens, particularly in patients with leukemia. The second incidence peak is generally seen later in patients with acute GVHD and receiving corticosteroids. The availability of antifungal azoles for anti-aspergillus prophylaxis has signifi cantly reduced the incidence . However, the mortality of Aspergillus remained close to 50 % in recent series. This infection must be considered in any case of fever, particularly in that occurring in the patient on broad-spectrum antibiotics, or of any pneumonia, whether of new onset or a previously diagnosed condition that does not resolve with appropriate therapy (see Figure 16-3 ). A negative bronchoscopy result, even when combined with testing of galactomannan in the BAL fl uid, does not diminish the suspicion for this pathogen. Without secondary prophylaxis eventually combined with surgical removal of the main lesions, the risk of relapse of prior Aspergillus infection after HSCT has been estimated around 20 % . In addition to being found in the lung parenchyma, Aspergillus may be isolated in the tracheobronchial tree where it may be responsible for signifi cant airway obstruction. White, adherent plaques may be seen on bronchoscopy, particularly in the setting of chronic GVHD and steroid use. This infection must be differentiated from worsening bronchiolitis, so that inappropriate and dangerous increases in immunosuppression can be avoided. Pneumonia due to Candida species is rarely reported, partly because no fi rm criteria for differentiating invasive infection from colonization based on bronchoscopy without biopsy exist. The lungs may be involved in any systemic Candida species infection. Pneumonias due to endemic fungi, such as Histoplasma or Coccidioides species, particularly in North America, must be considered in these patients, as should the emerging fungi, including Trichosporon , Alternaria , and Fusarium . A special attention should be paid to the possibility of Mucorales after allogeneic HSCT (see Chap. 39 ). Its mortality rate is between 50 and 80 % . Mucormycosis shares with aspergillosis common risk factors but usually occurs later, and often after voriconazole administration, although the role of a selection pressure is debated . There is no indirect available marker of mucormycosis except PCR test currently in evaluation . The classical presentation of mucormycosis after transplant mostly mimics aspergillosis, but galactomannan is negative (see Figure 16-4 ). Differentiating mucor from aspergillus infection is, however, of great importance due to different therapeutic implications. As long as there is a doubt between the two infections, the patient must be treated with liposomal amphotericin B. Pneumocystis jirovecii Pneumonia (PjP) Historically, the incidence of PjP in patients not receiving prophylaxis in the 1980s was found to be 16 % during the fi rst 6 months after transplant . This incidence has dramatically decreased between 1 and 2.5 % with the use of trimethoprimsulfamethoxazole (TMP-SMX) prophylaxis, but the mortality in established PjP remains around 50-70 % . . This 37-year-old woman received an allogeneic HSCT from her HLA-identical brother for poor-risk acute myeloid leukemia. She had a past history of pulmonary tuberculosis 10 years ago, but was intolerant to secondary prophylaxis. Three months after transplant, while she was well with no GVHD, she developed an insidious fever. Chest X-ray was normal. The lung CT scan showed diffuse micronodular infi ltrates and a sub-parietal nodule of 1.5 cm in diameter in the upper left lobe. The bronchoalveolar lavage was positive for M. tuberculosis in culture. Pneumonia After Hematopoietic Stem Cell Transplantation However, in patients receiving dapsone prophylaxis, an incidence of 7.2 % was reported after allogeneic HSCT . PjP usually manifests with fever, nonproductive cough, dyspnea, and diffuse interstitial pneumonitis. In HSCT recipients, the presentation of PjP may be extremely abrupt, and the patient may quickly deteriorate and require intensive care unit (ICU) . Rarely, the disease may reveal by an isolated lowgrade fever and a normal chest X-ray at the beginning. In such cases, if the cause of fever is not rapidly found, a CT scan will show pulmonary ground-glass lesions and prompt a BAL . The elevation of LDH is poorly helpful . Most patients present with nodular infi ltrates or other pattern of diffuse interstitial pneumonia. Pleural effusion and pneumothorax are uncommon . Most cases occur between 3 and 24 months after transplant, in patients with acute or chronic GVHD or in relapse of the underlying disease . Most are receiving steroids, especially at a phase of tapering off, or after recent withdrawal, and do not receive, or are not compliant to, TMP-SMX prophylaxis . Whether a low CD4 count is a main risk factor for developing PjP after HSCT is unknown. P. jirovecii is not cultivable in vitro. It may be identifi ed by microscopic detection, direct or indirect immunofl uorescence (IF), or nucleic acid tests (NAT) (see . Several stainings may be used for microscopic detection of trophic forms and cysts in any respiratory sample such as Giemsa to identify trophic forms and toluidine blue O or calcofl uor white to detect cysts, without signifi cant difference in their diagnostic performance. IF has a better sensitivity than conventional stainings . The combination of one classical staining and IF allows the detection of both cystic and trophic forms. PCR is the most sensitive diagnostic assay to identify pneumocystis , although no study defi nes a clear cutoff of positivity . HSCT recipients, as other non-HIV-infected patients, are known to be infected with low burden of cysts . As there is a decreasing gradient of the pneumocystis burden from upper to lower respiratory airways, this probably explains . This young patient, 20 years old, received an allogeneic HSCT from an unrelated donor for acute lymphoblastic leukemia in second remission. He got sever e acute GVHD and was not compliant to anti-mold azole prophylaxis. He developed an acute right chest pain with fever. ( a ) Both X-ray and CT scan showed a macronodular and isolated lesion of the right lower lobe. Serum galactomannan assay was negative. ( b ) The bronchoalveolar lavage smears showed hyphae characteristics of aspergillus (Gomori-Grocott stain). The culture of BAL fl uid grew to Aspergillus fumigatus. . This 28-year-old patient had received an allogeneic HSCT for acute lymphoblastic leukemia from an unrelated donor. He got severe, cutaneous, and gut GVHD and was treated with steroids. At 4 months after transplant, while still on 0.7 mg/kg of prednisone, he developed a nodular lesion of the right lower lobe. A galactomannan test was positive in serum. He refused fi broscopy and was treated for aspergillus infection with voriconazole. He then did not attend the consultations for 1 month and came back with bilateral thoracic pains and fever. The CT scan showed bilateral pleural effusion and a voluminous round, necrotic lesion surrounded by an area of consolidation in the right lower lobe. Rhizopus grew from the BAL fl uid. the diffi culties to identify P. jirovecii in induced sputum or other upper respiratory samples with conventional techniques in non-HIV-infected patients. Therefore, BAL fl uid is the preferred specimen for the diagnosis of PjP in HSCT recipients. Another argument for BAL is that half of the PjP cases in non-HIV-infected patients are associated with coinfections, especially with bacteria, CMV, and Aspergillus spp . which require identifi cation and treatment. In case a BAL cannot be done, upper respiratory tract (URT) specimens, like induced sputum, oral washings, nasal swabs, or nasopharyngeal aspirates, can be used, but with a lower expected diagnostic value than with BAL. Serum (1-3) β(beta)-D -glucan is a major cell wall component of P. jirovecii . Two meta-analyses have shown its excellent sensitivity, but due to its panfungal nature and the frequency of other IFD after HSCT, it can be only a screening tool for PjP. On the other hand, its use in BAL fl uid is not recommended, due to a poor sensitivity and reproducibility . The recent guidelines of the fi fth European Conference on Infections in Leukemia propose a practical algorithm for the diagnostic of PjP in non-HIV-infected patients, based on the examination of BAL fl uid with IF and qPCR. The positivity or negativity of both techniques signs the presence or absence of PjP. When IF is positive, and qPCR negative, this should refl ect a technical problem, mainly of qPCR. When qPCR is the only positive assay, although no quantitative cutoff can be uniformly proposed, a high fungal burden favors a diagnosis of PjP. The concomitant positivity of serum (1-3) β(beta)-D-glucan is an additional argument favoring PjP. When BAL is not possible because the patient is too hypoxemic or refuses the procedure, serum (1-3) β(beta)-Dglucan can be helpful in conjunction with URT samples. When the clinical suspicion of PjP is high and the BAL cannot be done immediately, an empirical treatment with TMP-SMX should be started as soon as possible since it will not impair the diagnostic yield of investigative procedures before at least several days. TMP-SMX at the dose of 15-20 mg/kg of TMP plus 75-100 mg/kg of SMX, by oral or preferably IV route, is the fi rst choice for treatment , even in patients who were supposed to take TMP-SMX prophylaxis as the presence of dihydropteroate synthase mutations does not signifi cantly affect the treatment effi cacy . The addition of steroids for the more hypoxemic patients (PaO2 while breathing room air <70 mmHg), although well established in HIV-infected patients , is debated in others. PjP prophylaxis is strongly recommended from engraftment for at least 6 months after allogeneic HSCT and longer as far as any immunosuppressive drugs are administered and for at least 3-6 months after autologous HSCT . No large prospective series compare the respective prophylactic effi cacy of TMP-SMX with alternatives in HSCT recipients. However, strong arguments from both acquired immunodefi ciency syndrome prospective studies and HSCT retrospective series suggest that TMP-SMX is the best prophylactic regimen , any alternative to TMP-SMXdapsone, atovaquone, or pentamidine-being inferior . Viral Pneumonia During the neutropenic phase of transplant, the incidence of herpes simplex virus (HSV) reactivation and diseaseincluding pneumonia-has fallen sharply with the wide use of prophylactic acyclovir or valaciclovir . Until the beginning of the 1990s, CMV was the most signifi cant pathogen for pneumonia after allogeneic transplant, affecting 15 % of the recipients. Preemptive and prophylactic strategies have greatly decreased its incidence, currently in the range of 1-5 % . It is generally a febrile disease in which the radiographic patterns are primarily interstitial but sometimes alveolar. Coinfections are frequent. The optimal approach to identify the virus in the lungs is the combination of IF and rapid culture of BAL fl uid. The identifi cation of CMV through PCR on BAL fl uid has been shown to have limited correlation with the development of CMV pneumonia and therefore is not considered as criteria for CMV pneumonia (see Chap. 24 ). Therefore, as most of the laboratories abandon IF assays to more automated qPCR techniques, a careful examination of the BAL smears by an experimented cytologist is important to detect the cytological hallmarks of CMV pneumonia, knowing that the identifi cation of the characteristic inclusions in alveolar cells is a sign of advanced infection (see . Other herpesviruses, including varicella-zoster virus, EBV, and Human herpesvirus 6 (HHV-6), have been reported as causes of pneumonia in HSCT recipients. High levels of HHV-6 DNA have been found in the lung tissue of patients with idiopathic or CMV interstitial pneumonitis . However, the clinical signifi cance of this fi nding, and the need for specifi c therapy, is still unclear. Pneumonia caused by respiratory viruses has become a main concern in HSCT recipients. The list regularly enlarges . The main risk factors for death are the early onset after transplant, neutropenia, lymphopenia, GVHD, steroid administration, and older age . Recently, an immunodefi ciency scoring system has been proposed to predict poor outcomes and better identify patients infected by respiratory syncytial virus and who should benefi t the most from antiviral therapy . The incidence is lower after autologous than after allogeneic transplant . Identifi cation by NAT in respiratory samples is the recommended technique and may be performed on nasopharyngeal or throat swabs, bronchial aspiration, or BAL fl uid with multiplex assays. Diagnosing these patients early has several benefi ts: some of these infections may be effi ciently treated (e.g., oseltamivir in infl uenza infection or ribavirin for respiratory syncytial virus); all of them imply isolation and barrier measures to prevent transmission to other patients or staff; respiratory viral infections early after allogeneic transplant predict the development of alloimmune lung syndrome, including bronchiolitis obliterans and idiopathic interstitial pneumonia . When respiratory viruses are detected before transplant, delaying the transplant should be considered . Measles pneumonia has rarely been reported after HSCT but may be an expected event in the setting of outbreaks and may occur without a rash. Adenovirus pneumonia is a very rare but potentially life-threatening event occurring in the setting either of disseminated adenovirus infection or of usually upper and then lower respiratory tract infections (see Chap. 33 ) and occur more frequently in children than in adults and in unrelated transplants or after T-cell depletion. Other Causes Reports of pulmonary toxoplasmosis are rare; it is usually seen in the setting of disseminated infection resulting from reactivation, during the fi rst year after transplantation in seropositive recipients not receiving TMP-SMX. The pattern is usually a diffuse interstitial disease, and neurologic symptoms may be absent. Toxoplasmosis may be identifi ed in BAL fl uid and blood by IF and qPCR. A prospective screening by qPCR in the patients at risk may allow a preemptive therapy . Differential Diagnosis to Infectious Pneumonia: The Main Noninfectious Processes Affecting the Lungs After HSCT The lung is the site of numerous noninfectious injuries causing one third of pulmonary infi ltrates after HSCT. This needs to be considered because they may require specifi c treatments. Pulmonary edema, pulmonary embolism, and acute respiratory distress syndrome may occur at any time, but more often during the early phase of transplant, without any special presentation in transplant recipients and will not be detailed here. Other noninfectious processes affecting the lung deserve specifi c consideration as they are either frequent or specifi cally observed in HSCT recipients. These noninfectious processes may be associated with infections, increasing the diffi culty to propose optimal treatment. The best identifi cation is however of crucial importance since steroids may be indicated in several noninfectious processes while they will be deleterious in most infections. The probability of their occurrence may vary by time after transplantation and type of transplant. Alveolar hemorrhage (AH) is a frequent noninfectious process affecting the lung after any HSCT, with an incidence rate of 6-41 % . AH is diagnosed on the basis of either a bloody aspect of the BAL fl uid-usually transientor the presence of ≥20 % of siderophages among alveolar macrophages (see Figure 16-7 ) . AH after HSCT may be an autonomous process favored by thrombocytopenia, other coagulation disorders, or renal failure and by any rupture of the alveolar-capillary barrier such as in pulmonary edema, but it may also be associated with infections, like aspergillus or CMV, in two thirds of the cases . Neither clinical presentation nor imaging are specifi c of infectious or noninfectious forms . Secondary alveolar proteinosis ( AP ) is rare, occurring mostly during prolonged neutropenia. It is the result of a complex process probably combining pneumocyte II stimulation and quantitative and functional defects of the alveolar macrophages. This results in an impaired clearance of pulmonary surfactant and the accumulation of a lipoproteinaceous periodic acid-Schiff (PAS)-positive material in the alveolar space (see Figure 16-8 ) . It usually mimics an insidious pulmonary edema. The diagnosis may be suspected on the sticky aspect of the BAL fl uid and then by diffi culties to count the cells. The usual stainings do not identify AP. The cytologist must be aware of this possibility and examine the alveolar material on PAS or Black Sudan staining. Secondary AP rarely complicates with severe respiratory failure . When it occurred during neutropenia, it usually improves at neutrophil recovery. However, as for AH, some cases are associated with infections. Pulmonary venoocclusive disease is a very rare event after HSCT. It mainly manifests by pulmonary arterial hypertension, but with a normal pulmonary artery occlusion pressure. The diagnosis is extremely diffi cult. By analogy with liver veno-occlusive disease, it is hypothesized that it is due to chemotherapy and/or radiation toxicity on the small vessels . The engraftment syndrome may be observed during neutrophil recovery, at a median onset of 16 days after transplant, and usually associates ≥2 of the following criteria: fever, skin rash, weight gain due to capillary leakage, and respiratory failure without other identifi ed cause . It is hypothesized that degranulation of upcoming neutrophils could induce lung injury. Engraftment syndrome is associated with a large dose of mononuclear cells infused, the use of G-CSF or GM-CSF, early neutrophil recovery, non-myeloablative conditioning, the use of amphotericin B therapy, and autologous rather than allogeneic transplant . An incidence up to 48 % has been reported in children after allogeneic myeloablative transplant, one fourth of them suffering from pulmonary symptoms. As severe patients may require steroids , it is important to quickly rule out an infection. Idiopathic ( noninfectious ) interstitial pneumonia is a complication reported in most allogeneic HSCT studies, with a high mortality rate. This diagnosis implies to have ruled out at least the main infections classically presenting as diffuse interstitial pneumonia, especially viral pneumonia and PjP, cardiac dysfunction, and fl uid overload . In myeloablative transplant, it has been associated with leukemia or myelodysplastic syndrome, severe acute and chronic GVHD, high-dose total body irradiation, and older age. In allogeneic HSCT, its incidence has been reduced from 8.4 % after myeloablative to 2.2 % after non-myeloablative conditioning . A recent study showed that among 69 HSCT recipients who had developed an idiopathic pulmonary syndrome between 1992 and 2006 in Seattle, a retrospective microbiological screening of BAL material for 3 bacteria, 25 viruses searched with NAT, and galactomannan identifi ed that 56.5 % of the patients had one pathogen (mainly HHV-6, rhinovirus, CMV, and aspergillus), and this fi nding was associated with an increased mortality at day 100 . This confi rms that the rate of "idiopathic" pneumonia is highly depending on how far infection is searched. Bronchiolitis obliterans ( BO or obliterative bronchiolitis ) is an important factor contributing to death usually from 6 months after HSCT. Reported only after allogeneic HSCT, the condition has been related to older age, unrelated donor, total body irradiation, decreases in serum immunoglobulin G, and chronic GVHD, with a frequency of 3-10 % in patients with chronic GVHD who survive 120 days . It seems to be prevented by T-cell depletion of the graft . BO usually occurs insidiously, with cough, dyspnea, and wheezing, but may complicate with fever and mimic bronchopulmonary infection. Its hallmark is airway obstruction. The lung CT scan shows hyperinfl ated bronchiectasis, with a mosaic pattern. BAL and other endoscopic samples are of limited value as they just aim to rule out infection. As no noncontributory BAL can defi nitely rule out infection, it is preferable to perform two consecutive BALs at 1-2 weeks interval to increase the chance to not miss any pathogen. It is often associated with sinusitis and complicated by infections, especially those caused by Haemophilus infl uenzae , S. pneumoniae , Aspergillus species, and respiratory viruses. Despite immunosuppressors, the prognosis is poor. Alveolar or nodular infi ltrates may be seen in the setting of allogeneic HSCT as a result of bronchiolitis obliterans organizing pneumonia (BOOP) -also called cryptogenic organizing pneumonia . BOOP is much less common than BO and is also considered a manifestation of GVHD but has also been reported after autologous HSCT. It occurs earlier than BO, usually in the fi rst 3 months following transplant. The CT scan shows nodular opacities and patchy consolidations. Pulmonary function tests show a restrictive defect. A histologic diagnosis is strongly recommended because BOOP may mimic infection, but can be reversible with corticosteroid therapy. Malignant lung lesions may be seen after HSCT, either due to a primary or secondary cancer, localized relapse of the hematologic malignancy (see Figure 16-9 ), or EBV lymphoproliferative diseases (see Chap. X). Principles of Management Management of pneumonia after HSCT requires a high degree of suspicion and the early use of diagnostic procedures. The increasing availability of indirect markers of infection tends to decrease the early use of BAL. However, BAL remains the easier and safer procedure to identify both infectious and noninfectious causes of pneumonia. More invasive diagnostic procedures such as transbronchial or lung biopsy need to be selected in situations in which BAL is noncontributory while weighing the risk of increased morbidity. Clinical Approach to Pneumonia A systematic approach to pneumonia in any HSCT recipient should include consideration of the following: history, clinical presentation, and imaging. History Knowledge of a patient's exposure, travel, environmental risks , and previous documented infection, the hospital epidemiology, and the pretransplant donor and recipient serologies particularly with regard to CMV and toxoplasmosis are essential. A history of recurrent MDR bacterial infection may require special consideration in choosing antibiotics . Evaluation of the patient's compliance to anti-infective prophylaxis, especially to TMP-SMX, may be essential in evaluating the risk of PjP . Whether the patient is neutropenic, lymphopenic, or hypogammaglobulinemic at presentation may be important to list the main infectious hypotheses. Clinical Presentatio n Symptoms and signs of pneumonia may or may not be typical of a known infectious cause. However, none is very specifi c. As in all immunosuppressed patients, few fi ndings may be present, so any symptoms must be carefully and quickly evaluated, because of the consideration that any infection can rapidly progress. Fever, cough, or sputum production may be absent. Hypoxemia may be the sole fi nding, and even if the X-ray is normal, in case a chest CT scan cannot be obtained quickly, a bronchoscopic evaluation should be considered. The presence of any such symptom may, however, refl ect a noninfectious etiology. Acute thoracic pain, with or without hemoptysis, may indicate embolic disease but may also denote Aspergillus infection. Pneumothorax may reveal-or complicate-PjP, mycobacterial or Aspergillus infection, or fi brosis. The rapid onset of pneumonia is mainly consistent with bacterial pneumonia, PjP, pulmonary edema or hemorrhage, or thromboembolism, but this may also occur with viral infections in immunosuppressed patients. A subacute onset more suggests IFD, although it may present abruptly. Imagin g Posttransplantation pneumonia may be focal, multifocal, diffuse and interstitial, alveolar, or mixed. Every effort must be made to quickly obtain chest X-rays of optimal quality and/ or a high-resolution chest CT scan when easily available. X-rays in supine position are rarely helpful. Additionally, most X-ray patterns are nonspecifi c and many patients have mixed types of infi ltrates. When an X-ray appears negative or shows only minimal changes, there is good evidence that a chest CT may reveal abnormalities. CT scan has the best negative predictive value to rule out pneumonia and will show lung images 5 days before chest X-ray . CT may additionally provide localization of the lesions, guiding invasive procedures, and inform on their proximity to pulmonary vessels. This information is also important to evaluate the 16. Pneumonia After Hematopoietic Stem Cell Transplantation risk of hemoptysis in aspergillosis. CT may also detect small pleural effusions. Some CT fi ndings may suggest the presence of particular infections. For example, the halo sign-a macronodule (≥1 cm in diameter) surrounded by a perimeter of ground-glass opacity-is very evocative of early aspergillosis during neutropenia , but may also be seen in other infections (e.g., legionella, mycobacterial infection, mucormycosis, or viral infections). Similarly, the reversed halo sign or "atoll sign"-a focal ground-glass attenuation surrounded by a ring of consolidation-has been shown to be often due to mucormycosis in hematology patients, but may also be observed in other infections, including aspergillosis . Ground-glass opacities are very nonspecifi c and consistent with any infectious and many noninfectious processes such as pulmonary edema or hemorrhage. However, even with more characteristic lesions-such as the air crescent sign which is rare after HSCT but very evocative of mold infection-a CT scan does not replace the need for identification of the pathogen for diagnosis. Magnetic resonance imaging (MRI) usually does not provide more information than CT, except in the detection of lung abscesses . The usefulness of PET scan is limited for diagnosis of acute pneumonia but may be better in nodular, subacute lesions , to identify extrapulmonary lesions or to follow the treatment effi cacy . Any workup using imaging should be completed rapidly, and it should lead quickly to a diagnostic procedure or, in most cases, to an empiric approach considering the most likely hypotheses. Diagnostic Investigation Blood cultures should be performed routinely, but they are of limited value in diagnosing pneumonia except for when the pathogen has a high propensity for the blood, such as Streptococcus pneumoniae , or in neutropenia. Special culture media are required when Nocardia or atypical mycobacteria are suspected. The blood should also be quickly sampled for CMV antigenemia or quantitative real-time PCR (qPCR) in patients at risk. The microbial documentation of any other site of infection, such as skin biopsy of cerebrospinal fl uid, may be useful. Blood biomarkers for the diagnosis of IFD include the detection of galactomannan by an enzyme-linked immunosorbent assay and of (1-3) β(beta)-D -glucan by a colorimetric assay. (1-3) β(beta)-D -glucan is a panfungal marker, while galactomannan is mainly associated with aspergillosis, although it may be positive in other mold infections, e.g., fusariosis. A meta-analysis of 27 studies showed that the galactomannan test has a sensitivity of 0.71 and a specifi city of 0.89 for proven invasive aspergillosis . The assay seems to be more useful for the prospective screening of neutropenic patients rather than for diagnosing pneumonia and also more useful in neutropenic than in non-neutropenic patients . The cutoff of positivity usually recom-mended is an index ≥0.5 in plasma or serum . In an autopsy-based study, the sensitivity and specifi city of the serum (1-3) β(beta)-D -glucan test for the detection of IFD were 95.1 % and 85.7 %, respectively . Serum (1-3) β(beta)-D -glucan test is also very useful in the indirect diagnosis of PjP . Fungal NAT have also been widely investigated in HSCT recipient , but no consensus on their use in clinical practice currently exists. At this time, no noninvasive test that can replace the specifi city of direct pulmonary investigation exists. Although sputum may be analyzed to yield organisms colonizing the oropharynx, the clinical relevance of the results is not evidence based in the setting of HSCT. A positive culture may be valuable when agents that do not normally inhabit the oropharynx are isolated, especially Legionella , mycobacteria, and some fungi, or to document MDR colonization which may guide an empirical antibacterial treatment. In HSCT recipients with pneumonia, a positive sputum culture may be highly suspicious for pulmonary aspergillosis. Similarly, the presence of M. tuberculosis in the sputum may be considered the cause of the pneumonia when clinical and radiologic signs support this etiology. This assertion is to be considered with more caution for nontuberculous mycobacteria . Nasopharyngeal aspirates or washings are useful to detect respiratory viruses in patients with URT infection . However, the correlation with the cause of the concomitant pneumonia is only presumptive as coinfections are frequent . The standard for diagnosing pulmonary infection after HSCT is bronchoscopic sampling with BAL (Table 16-1 ). Lavage is safe, minimally invasive, and reproducible. Its overall diagnostic yield is comparable to the one of lung biopsy, but with more infectious diagnostic and much less complications . The clinician who consults with a pulmonary specialist for BAL should consider platelet transfusions if the patient is thrombocytopenic and should alert the microbiology laboratories to ensure that all potential organisms are sought. Oxygen saturation or arterial pressure should be assessed before the procedure. Fever, transient hypoxemia, and worsening of chest X-rays may be expected in as many as one half of patients during the few hours following the procedure . When the patient is hypoxemic (paO2 < 70 mmHg spontaneously or with O2 supplementation) or tachypneic before BAL, he usually benefi ts from noninvasive ventilation immediately after the procedure. The overall diagnostic yield of BAL in infectious pneumonia occurring in hematologic patients varies between 27 and 55 % depending on many parameters such as the following: -The localization of the pulmonary lesions : whether they are accessible by BAL or not. -Whether the patient is neutropenic. The yield of the procedure is usually lower in neutropenic than in nonneutropenic patients . -The type of the causal infection: for example, the diagnostic yield of BAL with conventional mycological techniques-without galactomannan tested in the BAL fl uid-for aspergillus pneumonia is usually lower than 50 %, while it is higher than 90 % in PjP or CMV pneumonia, for which one rarely needs a lung biopsy . -The laboratory exams performed on fi broscopic samples . The laboratory protocol should be established in advance in a multidisciplinary approach according to the expected, infectious and noninfectious, causes of pneumonia, eventually adapted to seasons for respiratory viruses. -The criteria used to defi ne specifi c entities. For example, it is generally believed that the presence of candida in a BAL fl uid or bronchial aspiration does not necessary mean a candida pneumonia, while the presence of aspergillus in an HSCT recipient does . However, for some causes of pneumonia, there are until now no consensus defi nition. The increasing availability of NAT for many pathogens should not replace, in many instances, more classical techniques, until the need for classical techniques is shown to be no longer useful in diagnosing a given infection. -The delay elapsed between presentation and BAL and the number and duration of previous antibiotics before performing BAL . The diagnostic yield of BAL has been shown to be better when it is performed early after the onset of pulmonary symptoms . In a series of 297 HSCT patients who underwent a BAL, the diagnostic yield of the procedure was 56.8 % in patients since less than 24 h versus 32.8 % in the others . In another study, the diagnostic yield was 73 % in patients who underwent BAL within 4 days of presentation and 31 % thereafter . This may be due to the effect of previous anti-infectives on the probability to identify a pathogen, but also to the fact that lung infl ammatory lesions may persist some time after the Transbronchial biopsy is essential for noninfectious processes and less contributive than BAL for infectious pneumonia. However, it is usually not proposed in the initial investigation of pneumonia, due to its possible complications (pneumothorax, bleeding). BCYE buffered charcoal yeast extract, AFB acid-fast bacillus, IF immunofl uorescence, PCR polymerase chain reaction. 16. Pneumonia After Hematopoietic Stem Cell Transplantation infection is controlled, so that delayed BAL may be performed in patients with a favorable outcome but still imaging and clinical signs. Therefore, it is recommended to do a BAL as soon as possible. -Finally, although pneumonia is less frequent after autologous than after allogeneic HSCT, the diagnostic yield of BAL has been reported to be lower in pneumonia occurring after autologous rather than after allogeneic HSCT . However, despite these variabilities, BAL, when well tolerated and correctly processed at the laboratory, represents the best diagnostic strategy for a minimum of complications. It should also be noticed that cytologic examination of BAL fl uid will also document alveolar hemorrhage or alveolar proteinosis . A routine BAL protocol for HSCT recipients should include at least total and differential cell counts on cytocentrifuge preparations using May-Grünwald-Giemsa stains , as well as cytologic examination on cell pellets obtained by centrifugation and cytocentrifugation that are stained with the May-Grünwald-Giemsa stains and the Papanicolaou stain for viruses and the Gomori-Grocott method for P. jirovecii and fungi (Table 16-1 ). Other stains are necessary to identify alveolar proteinosis (PAS) , mycobacteria (Zielh), and siderophages (Perls' Prussian blue) . A sample of fl uid should be sent for bacteriologic and fungal cultures and viral tests. Galactomannan detection may be done in BAL fl uid, especially in neutropenic patients with aspergillosis , but with a higher cutoff (≥1) than in serum . Aspiration and BAL fl uids should be examined for Legionella pneumophila by cultures and eventually NAT and for Nocardia and mycobacteria. Due to the better sensitivity of qPCR over conventional stainings and IF assays , some laboratories already use qPCR exclusively. The viruses of interest in HSCT patients are the viruses of the herpes family, adenoviruses, and respiratory viruses (i.e., respiratory syncytial virus, infl uenza, and parainfl uenza, rhinoviruses, metapneumoviruses, coronaviruses, enteroviruses, and bocavirus) which should be determined particularly in the setting of known exposures and during seasonal outbreaks . A protected bacteriologic sample ( PBS ), done by a protected brush specimen or a plugged telescoping catheter, should be processed by quantitative culture techniques. Although determined from mechanically ventilated patients, the minimal threshold bacterial concentration required to usually consider the isolated pathogen as the cause of the pneumonia is 10 3 colony-forming units (CFUs)/mL for PBS and 10 4 to 10 5 CFUs/mL in the BAL fl uid . Due to the increased risk it provides for bleeding and pneumothorax, transbronchial biopsy is not routine in acute pneumonia occurring in patients with HSCT and should not be proposed with the fi rst bronchoscopy and BAL . Also, it does not add significant informations to concomitant BAL in most cases . In cases in which noncontributory bronchoscopy , one should consider performing a second BAL and/or a transbronchial biopsy or better, a transthoracic needle aspiration when the lesion(s) is nodular and subpleural . After HSCT, focal lesions that develop or persist despite antibiotics are mostly of fungal origin . Successful fi ne needle aspiration, guided by either ultrasound or CT, has been reported, with a complication rate around 15 %, and is useful for documenting IFD when other procedures failed . The fi nal decision between lung biopsy through open or video-assisted thoracoscopy or empirical treatment to cover the most likely organisms should be made by the transplant physician and the lung specialist after weighing the risks of surgery, empirical treatment, and failure to reach a diagnosis and the etiologies most likely at that time after transplantation. Lung biopsy is more helpful when the clinical course is prolonged and the pattern is nodular or cavitary. Starting Treatment and Reevaluation of Effi cacy Because any pneumonia that occurs after HSCT may be life threatening, empirical antibiotics against the likely organisms must be started immediately. The best approach is to conduct bronchoscopic investigation with BAL as soon as possible; this should not, however, delay the initiation of treatment, especially when acute (likely bacterial) pneumonia is present or with patients who are neutropenic. Consideration should be given to the likelihood of fungus in patients with prolonged neutropenia and in those with GVHD on steroid therapy. Some empirical treatments may render subsequent testing negative, especially that for bacteria and viruses, yet they may be warranted. Some empirical treatments will not affect the chance of isolating the pathogen for at least several days after the empirical treatment is begun (e.g., TMP-SMX for P. jirovecii , antifungal agents for aspergillosis). Daily clinical reevaluation should be performed, especially when no diagnosis is initially established and the patient does not improve. The use of noninvasive markers, when initially positive, is mostly useful to assess the treatment effi cacy: -Patients with initial positive blood cultures should be sampled for blood culture controls daily until negative. -It has been shown in aspergillus infection with an initial positive serum or plasma galactomannan test that the quantitative evolution of the test correlates with the prognosis as soon as from the fi rst week of therapy . Serial follow-up X-rays or, preferably, lung CT scans should be repeated according to the type and severity of the pneumonia. However, some infections, although favorably evolving, may be associated with a long persistence of image abnormalities, which may take several months to decrease or disappear. In the absence of new lesions, it should not be per se a reason to reinvestigate the patient if the clinical outcome is favorable. In aspergillosis, it has been shown that a transient increase of the volume of the fungal lesions on CT scan may occur at the time of neutropenia recovery without any signifi cance of treatment failure . New investigations should be rapidly undertaken when the pneumonia does not respond to empirical treatment. Even when the cause of the pneumonia ha s been established, the occurrence of new infi ltrates should be regarded as suspicious for treatment failure or new infections, as the association or succession of several causes of pneumonia is not uncommon in this setting. When a BAL has been initially done on accessible lesions, a second one should not be considered before most of the results of the laboratory be back, except if the BAL has been performed in poor conditions or in case of new lesions. Usually, a delay of 1 week before a fi rst noncontributory BAL and a second BAL is minimal. If the initial lesion is peripheral and nodular and the BAL was noncontributive, a transthoracic fi ne needle biopsy should be considered. If the lesion is subacute or chronic and there is no response to targeted or empirical treatment, surgical biopsy may be contemplated for chronic nodular lesions. Place of Intensive Care and Ventilatory Support Pneumonia is the cause of the ICU transfer in roughly one third of the cases both in allogeneic and autologous HSCT recipients. Although the prognosis of HSCT patients transferred in the ICU has slightly increased over time , the decision of transfer remains diffi cult in terms of the emotional burden for the patient, family, and caregivers. The use of predictive scores-such as the sepsis-related organ failure assessment (SOFA) -assessed at ICU transfer in HSCT recipients is debated . Patients with acute respiratory failure benefi t from ICU support and can be investigated by BAL, knowing that BAL does not increase the need for mechanical ventilation . The prognosis of ICU support is usually better in autologous rather than in allogeneic HSCT recipients, and those with severe acute GVHD and under corticosteroids usually do not clearly benefi t from ICU support . Guidelines should be adapted to new data, but, in general, the clinician should consider the individual's chance of survival and of return to an acceptable life before transferring the patient to an ICU. The patient and the family should be provided with reasonable estimations of prognosis before transfer; in addition, the likelihood of continuing life support should be considered regularly during the course of treatment. Patients who respond to noninvasive mechanical ventilation have a better prognosis than those who required mechanical ventilation . Summary Pneumonia is a principal determinant of posttransplantation survival . Because of the predictable timing of some infections after most types of transplantations, some prophylactic regimens have been instituted with far-reaching benefi ts. However, any change in the transplant procedure, conditioning, or immunosuppressive regimen may affect the incidence and cause of infectious pneumonia. Additionally, new pathogens are emerging, and familiar pathogens are becoming more resistant. A high level of suspicion when pneumonia occurs in a transplant recipient and vigilance in diagnosing and treating will continue to be required to prevent an increase in mortality from pneumonia. The development of indirect diagnostic procedures is essential in the evaluation of pneumonia, but their clinical pertinence must be established in large prospective studies, and, until now, they do not replace direct investigation of the lung, mainly by BAL.
use criterion::{criterion_group, criterion_main, Criterion}; use tokio::runtime::Runtime; async fn fetch_block(rest: &bitcoin_rest::Context, height: u32) { let blockhash = rest.blockhashbyheight(height).await.unwrap(); let _block = rest.block(&blockhash); } fn bench(c: &mut Criterion) { let rt = Runtime::new().unwrap(); let rest = bitcoin_rest::new(bitcoin_rest::DEFAULT_ENDPOINT); c.bench_function("Fetch block at height 1", |b| b.iter(|| { rt.block_on(async { fetch_block(&rest, 1).await; }); })); c.bench_function("Fetch block at height 500000", |b| b.iter(|| { rt.block_on(async { fetch_block(&rest, 500_000).await; }); })); } criterion_group!(benches, bench); criterion_main!(benches);
/** * Handle all movement of the chassis. */ public class MovementManager extends FeatureManager { public DcMotor frontLeft; public DcMotor frontRight; public DcMotor backLeft; public DcMotor backRight; private PointNd currentLocation; private TrigCache cache; private ElapsedTime timer; private double lastRecordTime; boolean driveStarted; public int iters = 0; /** * Create a MovementManager with four motors. * @param fl Front Left motor * @param fr Front Right motor * @param br Back Right motor * @param bl Back Left motor */ private static float speed = 1.0f; private int avg; private LinearOpMode opMode; public MovementManager(DcMotor fl, DcMotor fr, DcMotor br, DcMotor bl) { this.frontLeft = fl; this.frontRight = fr; this.backRight = br; this.backLeft = bl; this.cache = new TrigCache(); this.currentLocation = new PointNd(0f,0f,0f); this.timer = new ElapsedTime(); this.lastRecordTime = timer.milliseconds(); } public MovementManager(DcMotor fl, DcMotor fr, DcMotor br, DcMotor bl, LinearOpMode _opMode) { this.frontLeft = fl; this.frontRight = fr; this.backRight = br; this.backLeft = bl; this.cache = new TrigCache(); this.currentLocation = new PointNd(0f,0f,0f); this.timer = new ElapsedTime(); this.lastRecordTime = timer.milliseconds(); this.opMode = _opMode; } public MovementManager(){ } /** * Set raw motor powers. Likely not needed to be used publicly * @param fl Front left motor power * @param fr Front right motor power * @param br Back right motor power * @param bl Back left motor power */ public void driveRaw(float fl, float fr, float br, float bl) { frontLeft.setPower(fl*speed); backRight.setPower(br*speed); //Austin will defin "speed" later frontRight.setPower(fr*speed); backLeft.setPower(bl*speed); } /** * Drives based on inputs * @param horizontalPower Horizontal input * @param verticalPower Verticl input * @param rotationalPower Rotational input */ public void driveOmni(float horizontalPower, float verticalPower, float rotationalPower) { float [] sum = PaulMath.omniCalc(verticalPower, horizontalPower, rotationalPower); //record current position double timeSinceLastRecordTime = timer.milliseconds() - lastRecordTime; float diffHor = horizontalPower * (float)timeSinceLastRecordTime; float diffVer = verticalPower * (float)timeSinceLastRecordTime; float diffRot = horizontalPower * (float)timeSinceLastRecordTime; currentLocation.transform(new PointNd(diffHor, diffVer, 0f)); /* makes it go vroom*/ driveRaw(sum[0], sum[1], sum[2], sum[3]); } /** * Alias for driveOmni() to make code prettier * @param powers Array with 3 items: horizontal power, vertical power, rotational power. * @see #driveOmni(float, float, float) */ public void driveOmni(float[] powers) { this.driveOmni(powers[0], powers[1], powers[2]); } /** * * @param move The direction to move in. */ public void driveOmni(MovementOrder move) { this.driveOmni(move.getHor(), move.getVer(), move.getRot()); } /** * Slows down the robot via a rational function * Works only if all inputs are 0 * @param horizontalPower Horizontal input * @param verticalPower Verticl input * @param rotationalPower Rotational input */ public void moveDriftingRational(float horizontalPower, float verticalPower, float rotationalPower) { float stopFl = (float)frontLeft.getPower(); float stopFr = (float)frontRight.getPower(); float stopBl = (float)backLeft.getPower(); float stopBr = (float)backRight.getPower(); int counter = 1; if (horizontalPower == 0 && verticalPower == 0 ) { while(Math.abs(frontLeft.getPower()) < 0.0001 && Math.abs(frontRight.getPower()) < 0.0001 && Math.abs(backLeft.getPower()) < 0.0001 && backRight.getPower() < 0.0001) { frontLeft.setPower(stopFl/counter); frontRight.setPower(stopFr/counter); backLeft.setPower(stopBl/counter); backRight.setPower(stopBr/counter); counter++; } frontLeft.setPower(0); frontRight.setPower(0); backLeft.setPower(0); backRight.setPower(0); } } /** * Activates if all readings are = 0 * Records motor values * Decreases motor values with an exponential function * @param horizontalPower Horizontal input * @param verticalPower Verticl input * @param rotationalPower Rotational input */ public void moveDriftingExponential(float horizontalPower, float verticalPower, float rotationalPower) { float stopFl = (float)frontLeft.getPower(); float stopFr = (float)frontRight.getPower(); float stopBl = (float)backLeft.getPower(); float stopBr = (float)backRight.getPower(); int counter = 1; if (horizontalPower == 0 && verticalPower == 0 ) { while(Math.abs(frontLeft.getPower()) < 0.0001 && Math.abs(frontRight.getPower()) < 0.0001 && Math.abs(backLeft.getPower()) < 0.0001 && backRight.getPower() < 0.0001) { frontLeft.setPower(Math.pow(stopFl, counter)); frontRight.setPower(Math.pow(stopFr, counter)); backLeft.setPower(Math.pow(stopBl, counter)); backRight.setPower(Math.pow(stopBr, counter)); counter++; } frontLeft.setPower(0); frontRight.setPower(0); backLeft.setPower(0); backRight.setPower(0); } } /** * Creates a hashmap of all the motors powers and lines them up by time. * @return a map where the key is the time and the value is an array of motor powers. * @param horizontalPower Horizontal input * @param verticalPower Verticl input * @param rotationalPower Rotational input */ public HashMap<String,float[]> powersHashMap(float horizontalPower, float verticalPower, float rotationalPower) { float[] sum = PaulMath.omniCalc(horizontalPower, verticalPower, rotationalPower); HashMap<String, float[]> powersHashMap = new HashMap<String, float[]>(); powersHashMap.put(timer.milliseconds() + "", sum); return powersHashMap; } /** * Calls sum array from 100 miliseconds ago * Takes the average of the current sum array and the past sum array * Applies average sum to motor powers * @param horizontalPower Horizontal input * @param verticalPower Verticl input * @param rotationalPower Rotational input */ public void moveDriftingAverage(float horizontalPower, float verticalPower, float rotationalPower) { HashMap<String, float[]> powersHashMap = powersHashMap(horizontalPower, verticalPower, rotationalPower); float[] currentSum = PaulMath.omniCalc(horizontalPower, verticalPower, rotationalPower); float[] pastSum = powersHashMap.get(timer.milliseconds()-100 + ""); for (int i = 0; i < 4; i++) { currentSum[i] = (currentSum[i] + pastSum[i])/2; } driveRaw(currentSum[0], currentSum[1], currentSum[2], currentSum[3]); } public void resetEncoders(DcMotor motor) { motor.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER); } public void resetEncoderMode(DcMotor motor) { // motor.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER); motor.setMode(DcMotor.RunMode.RUN_WITHOUT_ENCODER); } public void resetAllEncoders() { this.resetEncoders(frontLeft); this.resetEncoders(frontRight); this.resetEncoders(backLeft); this.resetEncoders(backRight); } public void setAllEncoderModes(DcMotor.RunMode mode) { frontLeft.setMode(mode); frontRight.setMode(mode); backLeft.setMode(mode); backRight.setMode(mode); } public void resetAllEncoderModes() { this.resetEncoderMode(frontLeft); this.resetEncoderMode(frontRight); this.resetEncoderMode(backLeft); this.resetEncoderMode(backRight); } private static float speedAuto = 0.5f; public void driveAuto(float fl, float fr, float br, float bl) { frontLeft.setPower(fl*speedAuto); backRight.setPower(br*speedAuto); frontRight.setPower(fr*speedAuto); backLeft.setPower(bl*speedAuto); } public void backLeftSetTargetPosition(int position) { backRight.setTargetPosition(position); } public void backRightSetTargetPosition(int position) { backLeft.setTargetPosition(position); } public boolean driveVertical(float power, float rotation, LinearOpMode logger) { logger.telemetry.addData("mvm encoder drive state init", "0"); logger.telemetry.addData("mvm encoder drive state drive", "0"); logger.telemetry.addData("mvm encoder drive state stop", "0"); if(!driveStarted) { this.resetAllEncoders(); frontLeft.setTargetPosition((int) rotation * TICK_PER_ROT); frontRight.setTargetPosition(-(int) rotation * TICK_PER_ROT); backRightSetTargetPosition(-(int) rotation * TICK_PER_ROT); backLeftSetTargetPosition((int) rotation * TICK_PER_ROT); this.resetAllEncoderModes(); driveStarted = true; logger.telemetry.addData("mvm encoder drive state init", "init" + (System.currentTimeMillis() / 100000)); } else if( (Math.abs(frontLeft.getCurrentPosition()) < Math.abs(frontLeft.getTargetPosition()) || Math.abs(frontRight.getCurrentPosition()) < Math.abs(frontRight.getTargetPosition()) || Math.abs(backRight.getCurrentPosition()) < Math.abs(backRight.getTargetPosition()) || Math.abs(backLeft.getCurrentPosition()) < Math.abs(backLeft.getTargetPosition())) ) { this.driveAuto(power, power, power, power); logger.telemetry.addData("mvm encoder drive state drive", "drive" + (System.currentTimeMillis() / 100000)); //Waiting for motor to finish } else { driveAuto(0f, 0f, 0f, 0f); logger.telemetry.addData("mvm encoder drive state stop", "stop" + (System.currentTimeMillis() / 100000)); driveStarted = false; return false; } return true; } public boolean driveHorizontal(float power, float rotation, BLEncoder logger) { logger.telemetry.addData("mvm encoder drive state init", "0"); logger.telemetry.addData("mvm encoder drive state drive", "0"); logger.telemetry.addData("mvm encoder drive state stop", "0"); if(!driveStarted) { this.resetAllEncoders(); frontLeft.setTargetPosition((int) rotation * TICK_PER_ROT); frontRight.setTargetPosition((int) rotation * TICK_PER_ROT); backRightSetTargetPosition(-(int) rotation * TICK_PER_ROT); backLeftSetTargetPosition(-(int) rotation * TICK_PER_ROT); frontLeft.setDirection(DcMotor.Direction.FORWARD); frontRight.setDirection(DcMotor.Direction.FORWARD); backRight.setDirection(DcMotor.Direction.REVERSE); backLeft.setDirection(DcMotor.Direction.REVERSE); this.resetAllEncoderModes(); driveStarted = true; logger.telemetry.addData("mvm encoder drive state init", "init" + (System.currentTimeMillis() / 100000)); } else if(Math.abs(frontLeft.getCurrentPosition()) < Math.abs(frontLeft.getTargetPosition()) || Math.abs(frontRight.getCurrentPosition()) < Math.abs(frontRight.getTargetPosition()) || Math.abs(backRight.getCurrentPosition()) < Math.abs(backRight.getTargetPosition()) || Math.abs(backLeft.getCurrentPosition()) < Math.abs(backLeft.getTargetPosition()) ) { this.driveAuto(power, power, power, power); logger.telemetry.addData("mvm encoder drive state drive", "drive" + (System.currentTimeMillis() / 100000)); //Waiting for motor to finish } else { driveAuto(0f, 0f, 0f, 0f); logger.telemetry.addData("mvm encoder drive state stop", "stop" + (System.currentTimeMillis() / 100000)); driveStarted = false; return false; } return true; } public void driveWhileHorizontal(float power, float rotation, LinearOpMode logger) { logger.telemetry.addData("mvm encoder drive state init", "0"); logger.telemetry.addData("mvm encoder drive state drive", "0"); logger.telemetry.addData("mvm encoder drive state stop", "0"); this.resetAllEncoders(); frontLeft.setTargetPosition((int) rotation * TICK_PER_ROT); frontRight.setTargetPosition((int) rotation * TICK_PER_ROT); backRightSetTargetPosition(-(int) rotation * TICK_PER_ROT); backLeftSetTargetPosition(-(int) rotation * TICK_PER_ROT); this.resetAllEncoderModes(); driveStarted = true; // logger.telemetry.addData("mvm encoder drive state init", "init" + (System.currentTimeMillis() / 100000)); while( Math.abs(frontLeft.getCurrentPosition()) < Math.abs(frontLeft.getTargetPosition()) && Math.abs(frontRight.getCurrentPosition()) < Math.abs(frontRight.getTargetPosition()) && Math.abs(backRight.getCurrentPosition()) < Math.abs(backRight.getTargetPosition()) && Math.abs(backLeft.getCurrentPosition()) < Math.abs(backLeft.getTargetPosition()) ) { this.driveAuto(power, power, -power, -power); iters++; // logger.telemetry.addData("mvm encoder drive state drive", "drive" + (System.currentTimeMillis() / 100000)); //Waiting for motor to finish } driveAuto(0f, 0f, 0f, 0f); // logger.telemetry.addData("mvm encoder drive state stop", "stop" + (System.currentTimeMillis() / 100000)); driveStarted = false; } public void driveWhileVertical(float power, float rotation, LinearOpMode logger) { logger.telemetry.addData("mvm encoder drive state init", "0"); logger.telemetry.addData("mvm encoder drive state drive", "0"); logger.telemetry.addData("mvm encoder drive state stop", "0"); this.resetAllEncoders(); frontLeft.setTargetPosition((int) rotation * TICK_PER_ROT); frontRight.setTargetPosition(-(int) rotation * TICK_PER_ROT); backRightSetTargetPosition(-(int) rotation * TICK_PER_ROT); backLeftSetTargetPosition((int) rotation * TICK_PER_ROT); this.resetAllEncoderModes(); driveStarted = true; logger.telemetry.addData("mvm encoder drive state init", "init" + (System.currentTimeMillis() / 100000)); while( Math.abs(frontLeft.getCurrentPosition()) < Math.abs(frontLeft.getTargetPosition()) && Math.abs(frontRight.getCurrentPosition()) < Math.abs(frontRight.getTargetPosition()) && Math.abs(backRight.getCurrentPosition()) < Math.abs(backRight.getTargetPosition()) && Math.abs(backLeft.getCurrentPosition()) < Math.abs(backLeft.getTargetPosition()) ) { this.driveAuto(power,-power, power, -power); logger.telemetry.addData("mvm encoder drive state drive", "drive" + (System.currentTimeMillis() / 100000)); //Waiting for motor to finish } driveAuto(0f, 0f, 0f, 0f); logger.telemetry.addData("mvm encoder drive state stop", "stop" + (System.currentTimeMillis() / 100000)); driveStarted = false; } public void driveWhileVerticalPid(float power, float rotation, OpMode logger) { logger.telemetry.addData("mvm encoder drive state init", "0"); logger.telemetry.addData("mvm encoder drive state drive", "0"); logger.telemetry.addData("mvm encoder drive state stop", "0"); this.resetAllEncoders(); frontLeft.setTargetPosition((int) rotation * TICK_PER_ROT); frontRight.setTargetPosition((int) rotation * TICK_PER_ROT); backRightSetTargetPosition((int) rotation * TICK_PER_ROT); backLeftSetTargetPosition((int) rotation * TICK_PER_ROT); this.resetAllEncoderModes(); driveStarted = true; logger.telemetry.addData("mvm encoder drive state init", "init" + (System.currentTimeMillis() / 100000)); while( Math.abs(frontLeft.getCurrentPosition()) < Math.abs(frontLeft.getTargetPosition()) && Math.abs(frontRight.getCurrentPosition()) < Math.abs(frontRight.getTargetPosition()) && Math.abs(backRight.getCurrentPosition()) < Math.abs(backRight.getTargetPosition()) && Math.abs(backLeft.getCurrentPosition()) < Math.abs(backLeft.getTargetPosition()) ) { avg = (frontLeft.getCurrentPosition() + frontRight.getCurrentPosition() + backRight.getCurrentPosition() + backLeft.getCurrentPosition())/4; frontLeft.setPower(SPEED + (avg - frontLeft.getCurrentPosition()) * P); frontRight.setPower(-(SPEED + (avg - frontRight.getCurrentPosition()) * P)); backRight.setPower(SPEED + (avg - backRight.getCurrentPosition()) * P); backLeft.setPower(-(SPEED + (avg - backLeft.getCurrentPosition()) * P)); logger.telemetry.addData("mvm encoder drive state drive", "drive" + (System.currentTimeMillis() / 100000)); //Waiting for motor to finish } driveAuto(0f, 0f, 0f, 0f); logger.telemetry.addData("mvm encoder drive state stop", "stop" + (System.currentTimeMillis() / 100000)); driveStarted = false; } public void driveWhileHorizontalPid(float power, float rotation, OpMode logger) { logger.telemetry.addData("mvm encoder drive state init", "0"); logger.telemetry.addData("mvm encoder drive state drive", "0"); logger.telemetry.addData("mvm encoder drive state stop", "0"); this.resetAllEncoders(); frontLeft.setTargetPosition((int) rotation * TICK_PER_ROT); frontRight.setTargetPosition((int) rotation * TICK_PER_ROT); backRightSetTargetPosition((int) rotation * TICK_PER_ROT); backLeftSetTargetPosition((int) rotation * TICK_PER_ROT); this.resetAllEncoderModes(); driveStarted = true; logger.telemetry.addData("mvm encoder drive state init", "init" + (System.currentTimeMillis() / 100000)); while( Math.abs(frontLeft.getCurrentPosition()) < Math.abs(frontLeft.getTargetPosition()) && Math.abs(frontRight.getCurrentPosition()) < Math.abs(frontRight.getTargetPosition()) && Math.abs(backRight.getCurrentPosition()) < Math.abs(backRight.getTargetPosition()) && Math.abs(backLeft.getCurrentPosition()) < Math.abs(backLeft.getTargetPosition()) ) { avg = (frontLeft.getCurrentPosition() + frontRight.getCurrentPosition() + backRight.getCurrentPosition() + backLeft.getCurrentPosition())/4; frontLeft.setPower(SPEED + (avg - frontLeft.getCurrentPosition()) * P); frontRight.setPower(SPEED + (avg - frontRight.getCurrentPosition()) * P); backRight.setPower(-(SPEED + (avg - backRight.getCurrentPosition()) * P)); backLeft.setPower(-(SPEED + (avg - backLeft.getCurrentPosition()) * P)); logger.telemetry.addData("mvm encoder drive state drive", "drive" + (System.currentTimeMillis() / 100000)); //Waiting for motor to finish } driveAuto(0f, 0f, 0f, 0f); logger.telemetry.addData("mvm encoder drive state stop", "stop" + (System.currentTimeMillis() / 100000)); driveStarted = false; } public void resetBool(){ driveStarted = false; } ElapsedTime moveTimer; public void encoderDrive(double speed, double fl, double fr, double bl, double br, double timeoutS, LinearOpMode opMode) { int newFrontLeftTarget; int newFrontRightTarget; int newBackRightTarget; int newBackLeftTarget; // Ensure that the opmode is still active if (opMode.opModeIsActive()) { // Determine new target position, and pass to motor controller newFrontLeftTarget = frontLeft.getCurrentPosition() + (int)(fl * TICK_PER_ROT); newFrontRightTarget = frontRight.getCurrentPosition() + (int)(fr * TICK_PER_ROT); newBackLeftTarget = backRight.getCurrentPosition() + (int)(bl * TICK_PER_ROT); newBackRightTarget = backLeft.getCurrentPosition() + (int)(br * TICK_PER_ROT); frontLeft.setTargetPosition(newFrontLeftTarget); frontRight.setTargetPosition(newFrontRightTarget); backLeft.setTargetPosition(newBackRightTarget); backRight.setTargetPosition(newBackLeftTarget); // Turn On RUN_TO_POSITION frontLeft.setMode(DcMotor.RunMode.RUN_USING_ENCODER); frontRight.setMode(DcMotor.RunMode.RUN_USING_ENCODER); backRight.setMode(DcMotor.RunMode.RUN_USING_ENCODER); backLeft.setMode(DcMotor.RunMode.RUN_USING_ENCODER); // reset the timeout time and start motion. moveTimer = new ElapsedTime(); frontLeft.setPower(Math.abs(speed)); frontRight.setPower(Math.abs(speed)); backLeft.setPower(Math.abs(speed)); backRight.setPower(Math.abs(speed)); // keep looping while we are still active, and there is time left, and both motors are running. // Note: We use (isBusy() && isBusy()) in the loop test, which means that when EITHER motor hits // its target position, the motion will stop. This is "safer" in the event that the robot will // always end the motion as soon as possible. // However, if you require that BOTH motors have finished their moves before the robot continues // onto the next step, use (isBusy() || isBusy()) in the loop test. while (opMode.opModeIsActive() && (moveTimer.milliseconds() < timeoutS) && (frontLeft.isBusy() && frontRight.isBusy() && backRight.isBusy() && backLeft.isBusy())) { // Display it for the driver. opMode.telemetry.addData("Path1", "Running to %7d :%7d", newFrontLeftTarget, newFrontRightTarget, newBackLeftTarget, newBackRightTarget); opMode.telemetry.addData("Path2", "Running at %7d :%7d", frontLeft.getCurrentPosition(), frontRight.getCurrentPosition(), backLeft.getCurrentPosition(), backRight.getCurrentPosition()); opMode.telemetry.update(); } // Stop all motion; frontLeft.setPower(0); frontRight.setPower(0); backLeft.setPower(0); backRight.setPower(0); // Turn off RUN_TO_POSITION frontLeft.setMode(DcMotor.RunMode.RUN_USING_ENCODER); frontRight.setMode(DcMotor.RunMode.RUN_USING_ENCODER); backRight.setMode(DcMotor.RunMode.RUN_USING_ENCODER); backLeft.setMode(DcMotor.RunMode.RUN_USING_ENCODER); // sleep(250); // optional pause after each move } } public void setSpeed(float speed){ this.speed = speed; } public float getSpeed(){ return speed; } public float getIterations(){ return iters; } }
Unnecessarily strict rules for employing paid duty police officers are costing Toronto taxpayers as much as $2 million each year, a city audit has found. The official findings won’t be released for weeks, but a draft copy obtained by the Star recommends reviewing some “debatable” permit criteria, particularly for road work. Employing paid duty police officers is costing Toronto taxpayers as much as $2 million each year, a city audit has found. ( Colin McConnell / Toronto Star ) “When construction takes place close to a signalized intersection, there are certainly situations where a paid duty officer would be needed to direct traffic,” the report says. “However, there are also situations where the use of warning signs, barriers and other devices … would be sufficient.” The auditor’s findings mirror those of a December 2009 Star investigation that found private companies, taxpayers and community groups were forced to waste millions of dollars hiring paid duty officers for jobs that could be done by crossing guards or even pylons. “If you want to find ‘gravy,’ look there,” former city budget chief Shelley Carroll has said of the paid duty program. Article Continued Below After the Star stories appeared, the Toronto Police Services Board asked the city auditor to review the program. The auditor’s report also found that, “compared with other large police services, Toronto’s yearly paid duty hours and costs are disproportionately high.” In 2009, Toronto police worked 40,919 paid duty assignments, earning $65 an hour — nearly twice the rate of a regular constable. In York, officers earn $57 an hour, Ottawa $58, Montreal $42 (time and half their hourly rate), and Peel $64. That year, the Toronto police paid-duty program took in $29 million. Of that amount, the city’s own divisions, agencies, boards, commissions and corporations were responsible for $7.8 million, just over one-quarter of the total. City of Toronto entities, including transportation services, Toronto Water, the TTC and Toronto Hydro, accounted for 9 per cent of all paid-duty assignments. The Ontario government was responsible for 12 per cent. Construction companies (19 per cent) and utility companies (18 per cent) were the two largest clients in 2009. In one of the audit’s more contentious findings, it notes there is insufficient oversight of internal paid duty policies. Article Continued Below Working excessive paid duty shifts can interfere with an officer’s regular duties. Officers are not permitted to work more than 12 hours in a 24-hour period, and paid duty shifts are not permitted to overlap with regular shifts. While the auditor found the vast majority of officers followed the rules, the review uncovered some egregious violations. In one case, an officer was twice late for court because of overlapping assignments. The individual was paid for a court appearance as well as the paid duty shift. In another, an officer worked 19 hours of paid duty in a 24-hour period. The report recommends to Chief Bill Blair that he strengthen internal policies, oversight and discipline. Police association president Mike McCormack said the service already disciplines officers found to be violating paid duty policies. As for the city’s regulations, he said, “That’s up to them.” The auditor’s report will probably reach city council by spring, just as councillors will be grappling with a projected $774 million budget shortfall. At a time when Toronto is cutting bus routes and closing libraries, the city can’t afford not to overhaul the program, said Councillor Adam Vaughan, a former police board member. “I don’t think the city has ever had the courage to look at it, because it would mean literally taking money off the table for police officers.” Some, Vaughan added, have come to rely on paid duty as part of their salary. “Some officers take advantage of it and work very hard and they’re doing it by the book, (but) if there are alternative ways that can provide safety, we need to do it.” Contacted Thursday, auditor-general Jeffrey Griffiths refused to comment on the report, noting it was only a draft and that things could still change. Paid duty by the numbers Figures from a draft report of the auditor’s findings on the paid duty police program: $29 million: Income from police paid-duty fees in 2009 27%: Percentage of those fees paid by the city’s own agencies, boards, commissions and corporations 56%: Percentage of all paid duty assignments devoted to traffic control, which the auditor found may be excessive $2 million: How much the city could save by making criteria for permits more effective 40,919: The number of paid duty assignments Toronto officers worked in 2009 $65: The hourly rate Toronto officers make on paid duty, nearly double a typical constable’s rate
Image copyright EPA Image caption Mahmoud Abbas made the announcement at a meeting in the West Bank city of Ramallah The Palestinian Authority's unity government will resign, President Mahmoud Abbas has said. He told his Fatah faction that the cabinet had to be dissolved because the rival Hamas movement would not allow it to operate in Gaza, which it dominates. But a Hamas spokesman said it rejected any unilateral dissolution. The technocratic cabinet, comprising 17 independent ministers, was sworn in a year ago to try to end a long-running rift between Fatah and Hamas. The two factions had governed separately since Hamas, which won parliamentary elections in 2006, ousted Fatah from Gaza in 2007, leaving the PA governing just parts of the West Bank. 'No consultation' Although Fatah and Hamas formally backed the unity government, deep divisions remained, resulting in political paralysis, the BBC's Yolande Knell reports. Image copyright AFP Image caption Hamas dominates the Gaza Strip, where its own security forces operate Fatah accuses Hamas of trying to create an independent Islamic state in Gaza; Hamas provokes Fatah by saying it avoids new elections as it fears losing them, our correspondent adds. Israel has insisted it will not deal with a government backed by Hamas, which is sworn to its destruction. On Wednesday night, President Abbas told members of Fatah's Revolutionary Council that the government would have to be dissolved within 24 hours "because Hamas didn't let it work in Gaza". However, cabinet spokesman Ihab Bseiso said he was not aware of such a decision. "We had a meeting today and we didn't discuss this issue," he told the AFP news agency. Hamas also expressed surprise at the president's announcement. "Hamas rejects any one-sided change in the government without the agreement of all parties," spokesman Sami Abu Zuhri told AFP. "No-one told us anything about any decision to change and no one consulted with us about any change in the unity government. Fatah acted on its own in all regards."
// This funct increments errors counters by name. In case flags errors each flag will be counted separatelly. func incrementHartCommErrorsCounter(err error) { if e, ok := err.(status.CommunicationsErrorSummaryFlags); ok { for i := 0; i < 8; i++ { mask := status.CommunicationsErrorSummaryFlags(1 << i) if e.HasFlag(mask) { hartCommErrorsCounter.WithLabelValues(mask.Error()).Inc() } } return } hartCommErrorsCounter.WithLabelValues(err.Error()).Inc() }
/** * Executes the config command using the provided options. * * @param cli * @see org.geogit.cli.AbstractCommand#runInternal(org.geogit.cli.GeogitCLI) */ @Override public void runInternal(GeogitCLI cli) throws Exception { GeoGIT geogit = cli.getGeogit(); if (null == geogit) { geogit = cli.newGeoGIT(); } try { String name = null; String value = null; if (nameValuePair != null && !nameValuePair.isEmpty()) { name = nameValuePair.get(0); value = buildValueString(); } ConfigAction action = resolveConfigAction(); if (action == ConfigAction.CONFIG_NO_ACTION) { printUsage(); throw new CommandFailedException(); } if (global && local) { printUsage(); throw new CommandFailedException(); } ConfigScope scope = ConfigScope.DEFAULT; if (global) { scope = ConfigScope.GLOBAL; } else if (local) { scope = ConfigScope.LOCAL; } final Optional<Map<String, String>> commandResult = geogit.command(ConfigOp.class) .setScope(scope).setAction(action).setName(name).setValue(value).call(); if (commandResult.isPresent()) { switch (action) { case CONFIG_GET: { cli.getConsole().println(commandResult.get().get(name)); break; } case CONFIG_LIST: { Iterator<Map.Entry<String, String>> it = commandResult.get().entrySet() .iterator(); while (it.hasNext()) { Map.Entry<String, String> pairs = (Map.Entry<String, String>) it.next(); cli.getConsole().println(pairs.getKey() + "=" + pairs.getValue()); } break; } default: break; } } } catch (ConfigException e) { switch (e.statusCode) { case INVALID_LOCATION: throw new IllegalStateException("The config location is invalid", e); case CANNOT_WRITE: throw new IllegalStateException("Cannot write to the config", e); case SECTION_OR_NAME_NOT_PROVIDED: throw new IllegalArgumentException("No section or name was provided", e); case SECTION_OR_KEY_INVALID: throw new IllegalArgumentException("The section or key is invalid", e); case OPTION_DOES_NOT_EXIST: throw new IllegalArgumentException("Tried to unset an option that does not exist", e); case MULTIPLE_OPTIONS_MATCH: throw new IllegalArgumentException( "Tried to unset/set an option for which multiple lines match", e); case INVALID_REGEXP: throw new IllegalArgumentException("Tried to use an invalid regexp", e); case USERHOME_NOT_SET: throw new IllegalArgumentException( "Used --global option without $HOME being properly set", e); case TOO_MANY_ACTIONS: throw new IllegalArgumentException("Tried to use more than one action at a time", e); case MISSING_SECTION: throw new IllegalArgumentException( "Could not find a section with the name provided", e); case TOO_MANY_ARGS: throw new IllegalArgumentException("Too many arguments provided.", e); } } }
#include<bits/stdc++.h> using namespace std; int main() { long long int a,b,c,d,e,f,g,h,i; while(cin>>a) { long long int r[200]; for(b=1;b<=a;b++) { cin>>r[b]; } c=0; d=1; e=0; for(b=1;b<=a-1;b++) { if(r[b]==2&&r[b+1]==3) { d=0; } if(r[b]==3&&r[b+1]==2) { d=0; } if(r[b]==1&&r[b+1]==2&&b-1>=1&&r[b-1]==3) { c+=2; continue; } if(r[b]==1&&r[b+1]==2) { c+=3; continue; } if(r[b]==1&&r[b+1]==3) { c+=4; continue; } if(r[b]==2&&r[b+1]==1) { c+=3; continue; } if(r[b]==3&&r[b+1]==1) { c+=4; continue; } } if(d==0) { cout<<"Infinite"<<endl; } else { cout<<"Finite"<<endl; cout<<c<<endl; } } return 0; }
Identity politics is coming in for a kicking at present, with many of the boots predictably attached to the feet of those who oppose the focus by the left on the struggles of minorities, women and the LGBT community. Identity politics is thus a convenient scapegoat for the mercurial rise of the populist right. Its critics are apt to play a game of victim blaming. A woman who speaks up too forcefully against sexism, or a black man who decides the best way to fight back against police violence is to protest, are apparently overreaching themselves and should pipe down. We would apparently never be in this mess had they known their place. Yet to paraphrase Orwell, there are problems with mainstream identity politics even if it is a bellicose section of the white and male right which happen to be pointing it out. The problem is not of course that which is usually identified by its critics: some people really do need diversity and feminism 'rubbed in their faces' – that thing frightened commentators seem to fear the most. No, the issue is the class-blind form that contemporary identity politics seems to take. For many of the liberals who espouse it, identity politics has become about tuning up elites and making them 'representative' rather than abolishing them altogether. This was in many respects an entirely predictable outcome. From Che Guevara zippo lighters to the 'pink pound', capitalism is incredibly good at co-opting things which start off as rebellions against it. A company with a highly paid boardroom that is diverse and 'representative' is certainly an improvement on the white, male and stale state of affairs which went before it. But it is hardly a threat to the status quo. A new report from the Resolution Foundation on agency workers should make clear to those on the left that the elitist form which identity politics commonly takes is not enough. The report itself presents damning evidence that many of us are being shafted by the balance of employment power in Britain today. A full-time agency worker gets on average £430 less than a permanent member of staff a year for doing the same job. This is particularly troubling when the number of agency workers is set to reach one million by 2020. As well as taking a hit on pay, agency workers lose out on benefits like sick pay and parental leave and are easier for capricious bosses to sack. With Theresa May's government apparently intent on turning Brexit Britain into a North Atlantic free market paradise, and with a Labour Party flagging badly in the polls, it seems inevitable that this race to the bottom in terms of employment practice will continue. Freedom, as interpreted by the Brexiteers, seems to mean little more than the freedom for someone else to toil away in precarious jobs for poverty wages with few rights. Yet something else in the report stands out in terms of just who is being hit the hardest by the rise in agency work. Clue: it isn't white men. Women accounted for 85 per cent of the growth in temporary agency workers, while ethnic minorities were three times more likely to be agency workers than their white counterparts. This ought to bury the ubiquitous notion of a uniquely beleaguered 'white working class' man. Working class men are certainly suffering because of the forces aligned against their class. However, the same forces are being brought to bear even more fiercely on other groups. The affluent liberal politics which comes cloaked in a snobbish disdain for the working class is a dead end. But so is a politics of a working class defined exclusively by its masculinity and whiteness. As with old economistic strands of left-wing thought, identity politics is infuriating when it becomes overly deterministic. Wallet size, skin colour, gender and sexuality influence rather than determine a person's ideas about the world. But a far bigger problem is the tokenistic and elitist form which identity politics usually seems to take. Beyond opposition to racism, an Indian businessman from Manchester who heads up a chain of low-paying warehouses does not share the same interests as a black teenager from Peckham who toils away in them. In fact, this sort of class-blind identity politics, which talks knowingly of a homogenous 'minority community' and could not comprehend why working class women failed to back Hillary Clinton, does a disservice to many of the people it claims to champion. Those who are genuinely interested in the fate of the working class cannot restrict their concern to a white and male subsection of it. But nor can the purveyors of identity politics assume that equality for women and ethnic minorities means little more than a diverse FTSE 100. As today's damning report makes clear, many of those who must confront racism and sexism in their everyday lives are also the prisoners of their class. James Bloodworth is former editor of Left Foot Forward, one of the UK's top political blogs, and the author of The Myth of Meritocracy.
unsigned int atomic_testandset(unsigned int value, unsigned int *lock);
package com.ra4king.gameservers.jwords.server.users; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import com.ra4king.gameservers.jwords.server.JWordsServer; public class Users { private ArrayList<User> users = new ArrayList<User>(); public Users() { ArrayList<User> u = new ArrayList<User>(); try(ObjectInputStream in = new ObjectInputStream(new FileInputStream(JWordsServer.codeBase + "users.dat"))) { while(in.available() > 0) u.add((User)in.readObject()); } catch(Exception exc) { exc.printStackTrace(); } for(User user : u) users.add(user); Collections.sort(users); } /* private ArrayList<Integer> splitIDs(String input) { ArrayList<Integer> ids = new ArrayList<Integer>(); if(input == null || input.equals("")) return ids; String sids[] = input.split(","); for(String s : sids) ids.add(Integer.parseInt(s)); return ids; } */ public synchronized void addUser(String username, String pwrd, String email) throws Exception { User user = new User(username,pwrd,email,users.size()); int idx = Collections.binarySearch(users,new User(username,"","",0),new Comparator<User>() { public int compare(User o1, User o2) { return o1.getUserName().compareTo(o2.getUserName()); } }); if(idx > 0) throw new IllegalArgumentException("Username already in use."); ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(JWordsServer.codeBase + "users.dat",true)); out.writeObject(user); out.flush(); out.close(); users.add(user); } public synchronized User getUser(int id) { int idx = Collections.binarySearch(users,new User("","","",id)); if(idx < 0) return null; return users.get(idx); } public synchronized User getUser(String username, String pwrd) { if((username == null || username.equals("")) || (pwrd == null || pwrd.equals(""))) return null; int idx = Collections.binarySearch(users,new User(username,"","",0),new Comparator<User>() { public int compare(User o1, User o2) { return o1.getUserName().compareTo(o2.getUserName()); } }); if(idx < 0) return null; if(users.get(idx).getPassword().equals(pwrd)) return users.get(idx); else return null; } }
20th and 21st-century Icelandic politician Jóhanna. In this Icelandic name , the last name is a patronymic , not a family name ; this person is referred to by the given name Jóhanna Sigurðardóttir[1] ( Icelandic pronunciation: ​ [jou̯ːhana ˈsɪːɣʏrðartou̯htɪr]; born 4 October 1942) is an Icelandic politician and the former Prime Minister of Iceland. She became active in the trade union movement, serving as an officer. Elected an MP from 1978 to 2013, she was appointed as Iceland's Minister of Social Affairs and Social Security, serving from 1987 to 1994, and from 2007 until 2009. In 1994, when she lost a bid to head the Social Democratic Party, she raised her fist and declared "Minn tími mun koma!" ("My time will come!"), a phrase that became a popular Icelandic expression. [2][3] She became Prime Minister on Feb 1 2009, Iceland's first female Prime Minister and the world's first openly lesbian head of government. Forbes listed her among the 100 most powerful women in the world. [4] She has been a member of the Althing (Iceland's parliament) for Reykjavík constituencies since 1978, winning re-election on eight successive occasions. In September 2012, Jóhanna announced she would not seek re-election and retired from politics as Iceland's longest serving member of Parliament.[5] Education and early career [ edit ] Jóhanna was born in Reykjavík. Her father is Sigurður Egill Ingimundarson.[6] She studied at the Commercial College of Iceland, a vocational high school operated by the Chamber of Commerce. After graduating with her commercial diploma in 1960, she worked as a flight attendant with Icelandic Airlines (a predecessor of Icelandair) and as an office worker.[7] She was active in the trade union movement from early in her professional life, presiding over the Board of the Icelandic Cabin Crew Association in 1966 and 1969 and over the Board of Svölurnar, Association of Former Stewardesses in 1975. She was also a member of the Board of the Commercial Workers' Union from 1976 to 1983.[8] Political career [ edit ] Jóhanna was elected to the Althing in 1978 on the list of the Social Democratic Party for the Reykjavík constituency.[9] She enjoyed early success in her parliamentary career, serving as deputy speaker of the Althing (Iceland's parliament) in 1979 and in 1983–84. She was elected vice-chairman of the Social Democratic Party in 1984, a post she held until 1993. She was also Minister of Social Affairs in four separate Cabinets from 1987 to 1994,[8] when she left the Social Democratic Party after losing the leadership contest to form a new party, National Awakening; the two parties remerged in 2000 to form the present Social Democratic Alliance. Her 1994 declaration Minn tími mun koma! ("My time will come!"), after she lost the contest for the leadership of the Social Democratic party, has become an iconic phrase in the Icelandic language.[2][3] From 1994 to 2003, she was an active member of the opposition in the Althing, serving on numerous parliamentary committees. After the 2003 elections, in which she stood in the Reykjavík South constituency (after the split of the old Reykjavík constituency), she was re-elected deputy speaker of the Althing. The 2007 elections, in which she stood in the Reykjavík North constituency, saw the return of the Social Democratic Alliance to government in coalition with the Independence Party, and Jóhanna was named Minister of Social Affairs and Social Security.[9] Prime Minister [ edit ] Icelandic financial crisis, protests and elections [ edit ] On 26 January 2009, Prime Minister Geir Haarde tendered the coalition government's resignation to the President of Iceland, Ólafur Ragnar Grímsson.[10][11] The move followed fourteen weeks of protests over the government's handling of the financial crisis, protests that had intensified from 20 January. After talks with the leaders of the five parties represented in the Althing, the President asked the Social Democratic Alliance and the Left-Green Movement to form a new government and to prepare for elections in the spring.[12] Jóhanna was proposed as Prime Minister for the new government; two reasons for this were her popularity among the general public and her good relations with the Left-Green Movement. An opinion poll by Capacent Gallup in December 2008 found 73% approval of her actions as a minister, more than any other member of the Cabinet: she was also the only minister to have improved her approval ratings over 2008.[13] The new government needed the support of the Progressive Party in the Althing.[citation needed] Negotiations continued up to the evening of 31 January, and the new Cabinet was appointed on 1 February. Independent polling showed that Jóhanna and Steingrímur J. Sigfússon, leader of the Left-Green Movement, the other party in the coalition government, enjoyed considerable support outside their own parties.[14] On 25 April 2009, a parliamentary election was held in Iceland,[15] following the protests now known as the Kitchenware Revolution[16] that resulted from the Icelandic financial crisis.[17] [18][19][20] The Social Democratic Alliance and the Left-Green Movement, which formed the outgoing coalition government under Jóhanna, both made gains and together had an overall majority of seats in the Althing. The Progressive Party also made gains, and the new Citizens' Movement, formed after the January 2009 protests, gained four seats. The Independence Party, which had been in power for eighteen years until January 2009, lost a third of its support and nine seats in the Althing. On 10 May 2009, the new government was announced, with Jóhanna staying on as Prime Minister.[21] Overcoming the financial crisis [ edit ] There were several referenda to decide about the Icesave Icelandic bank debts, center of the country's financial crisis. The first Icesave referendum (Icelandic: Þjóðaratkvæðagreiðsla um Icesave), was held on 6 March 2010.[22] The proposal was resoundingly defeated, with 93% voting against and less than 2% in favor. After the referendum, new negotiations commenced. On 16 February 2011, the Althing agreed to a repayment deal to pay back the full amount starting in 2016, finalising before 2046, with a fixed interest rate of 3%.[23] The Icelandic president once again refused to sign the new deal on 20 February, calling for a new referendum.[24][25] Thus, a second referendum would be held on 9 April 2011 also resulting in "no" victory with a lesser percentage.[26] After the referendum failed to pass, the British and Dutch governments said that they would take the case to the European courts.[27] At a session on 28 September 2010, the Althing voted 33–30 to indict the former Prime Minister Geir Haarde, but not the other ministers, on charges of negligence in office.[28] He stood trial before the Landsdómur, a special court to hear cases alleging misconduct in government office, used for the first time since it was established in the 1905 Constitution.[29] He was convicted of one charge. New Icelandic constitution process [ edit ] Jóhanna Sigurðardóttir (second from right) and Jónína Leósdóttir (left), on an official visit to Slovenia Once in power, the left coalition led by Jóhanna—comprising the Social Democratic Alliance, the Left-Green Movement, the Progressive Party and the Liberal Party—inspired largely by the citizen protests, agreed to convene a constitutional assembly to discuss changes to the Constitution, in use since 1905.[30] Taking its cue from nationwide protests and lobbying efforts by civil organisations, the new governing parties decided that Iceland's citizens should be involved in creating a new constitution and started to debate a bill on 4 November 2009 about that purpose. Parallel to the protests and parliament deliverance, citizens started to unite in grassroots-based think-tanks. A National Forum was organised on 14 November 2009, Þjóðfundur 2009, in the form of an assembly of Icelandic citizens at the Laugardalshöll in Reykjavík, by a group of grassroots citizen movements collectively called "the Anthill". 1,500 people were invited to participate in the assembly; of these, 1,200 were chosen at random from the national registry. On 16 June 2010 the Constitutional Act was finally accepted by parliament and a new Forum was summoned.[31][32] The Constitutional Act prescribed that the participants of the Forum had to be randomly sampled from the National Population Register. The Forum 2010 came into being due to the efforts of both governing parties and the Anthill group. A seven-headed Constitutional Committee, appointed by the parliament, was charged with the supervision of the forum and the presentation of its results, while the organization and facilitation of the National Forum 2010 was done by the Anthill group that had organized the first Forum 2009. The process continued in the election of 25 people of no political affiliation on 26 October 2010. The Supreme Court of Iceland later invalidated the results of the election on 25 January 2011 following complaints about several faults in how the election was conducted,[33][34] but the Parliament decided that it was the manner of the election, and not the results, that had been questioned, and also that those 25 elected candidates would be a part of a Constitutional Council and thus the Constitutional change went on.[35] On 29 July 2011 the draft was presented to the Parliament,[36] which finally agreed in a vote on 24 May 2012, with 35 in favor and 15 against, to organize an advisory referendum on the Constitutional Council's proposal for a new constitution no later than 20 October 2012. The only opposing parliament members were the former governing right party, the Independence Party. Also a proposed referendum on the discontinuing of accession talks with the European Union by some parliamentarians of the governing left coalition was rejected, with 34 votes against and 25 in favor.[37] Women's rights and ban on striptease [ edit ] In 2010, her government banned strip clubs, paying for nudity in restaurants, and other means of employers profiting from employees' nudity – the first such ban in a Western democratic country. Jóhanna commented; "The Nordic countries are leading the way on women's equality, recognizing women as equal citizens rather than commodities for sale."[38] After the decision was made she was hailed by her fellow feminists[who?], with radical feminist Julie Bindel claiming Iceland has become the most feminist country in the world.[39] Asked what the most important gender issue today is, she answered "To fight the pay gap between men and women".[40] Personal life [ edit ] Jóhanna married Þorvaldur Steinar Jóhannesson in 1970[41] and the couple had two sons named Sigurður Egill Þorvaldsson and Davíð Steinar Þorvaldsson (born 1972 and 1977).[42] After their divorce in 1987, she joined in a civil union with Jónína Leósdóttir (born 1954), an author and playwright, in 2002.[2][3][8] In 2010, when same-sex marriage was legalised in Iceland, Jóhanna and Jónína changed their civil union into a marriage, thus becoming one of the first same-sex married couples in Iceland.[43] In 2017, she released a biography entitled ‘Minn tími’ (My Time), the biography covers one of the most contentious periods in Icelandic history; from the financial crash of autumn 2008, through protests and emergency elections the following year, and the difficult recovery period that followed leading Iceland’s first left wing government.[44][45] See also [ edit ] References [ edit ] Literature [ edit ] Torild Skard (2014) 'Jóhanna Sigurdardóttir' 'Women of power – half a century of female presidents and prime ministers worldwide' Bristol: Policy Press ISBN 978-1-44731-578-0
/** * Abstract class Player for NormallPlayer and BOTplayer * @author Janek * */ public abstract class Player extends Thread { Colors color; Player opponent; Socket socket; BufferedReader input; PrintWriter output; int X1,Y1; Game game; GameList gamelist; //constructor public Player(Socket socket, GameList gamelist) { //game.playerList.add(this); this.gamelist=gamelist; this.socket = socket; try { input = new BufferedReader( new InputStreamReader(socket.getInputStream())); output = new PrintWriter(socket.getOutputStream(), true); output.println("WELCOME " + color); output.println("MESSAGE Waiting for opponent to connect"); } catch (IOException e) { System.out.println("Player died: " + e); } } // constructor public Player(){ } /** * Method check if Move is legal (If it was done by player, who has actually turn and if chosen location is correct. If its all OK, then it use method checkerMove and move a checker. ) * @param actualLocation * @param locationTO * @param player * @return true if Move is legal, false if not. */ protected boolean isMoveLegal(int X1, int Y1, int X2, int Y2, Player player) { return this.game.board.isMoveLegal(X1, Y1, X2, Y2, player.color); } /** * Procedure moves checker from one place to another. * @param actualLocation * @param locationTO * @param player */ protected void checkerMove(int X1, int Y1, int X2, int Y2) { this.game.board.doMove(X1, Y1, X2, Y2); // this.game.board.showBoard(this.game.board.board, this.game.boardSize); } /** * Check if player has won the game. * @param player */ protected boolean didPlayerWon(Player player) { return this.game.board.didPlayerWin(player.color); // TODO Auto-generated method stub } /** * The run method of this thread. */ public abstract void run() ; }
#pragma once #include "../base_def.hpp" namespace lol { struct LolLootLootGrantNotification { int64_t id; uint64_t gameId; uint64_t playerId; int32_t championId; std::string playerGrade; std::string lootName; std::string messageKey; std::string msgId; uint64_t accountId; }; inline void to_json(json& j, const LolLootLootGrantNotification& v) { j["id"] = v.id; j["gameId"] = v.gameId; j["playerId"] = v.playerId; j["championId"] = v.championId; j["playerGrade"] = v.playerGrade; j["lootName"] = v.lootName; j["messageKey"] = v.messageKey; j["msgId"] = v.msgId; j["accountId"] = v.accountId; } inline void from_json(const json& j, LolLootLootGrantNotification& v) { v.id = j.at("id").get<int64_t>(); v.gameId = j.at("gameId").get<uint64_t>(); v.playerId = j.at("playerId").get<uint64_t>(); v.championId = j.at("championId").get<int32_t>(); v.playerGrade = j.at("playerGrade").get<std::string>(); v.lootName = j.at("lootName").get<std::string>(); v.messageKey = j.at("messageKey").get<std::string>(); v.msgId = j.at("msgId").get<std::string>(); v.accountId = j.at("accountId").get<uint64_t>(); } }
import React from 'react'; import { Image, Text, View } from 'react-native'; import SunWeather from '~/assets/sun-cold.png'; import { styles } from './styles'; export const ContentMessage = () => { return ( <View style={styles.container}> <Image style={styles.image} source={SunWeather} /> <View style={styles.content}> <Text style={styles.title}>15 minutos atrás</Text> <Text style={styles.description}> O vento está muito forte hoje apesar do sol, portanto tomar cuidado ao sair. </Text> </View> </View> ); };
Oxygen rich graphene support could lead to durable fuel cell catalysts (Nanowerk News) In the search for efficient, durable and commercially viable fuel cells, scientists at the University of Ulster's Nanotechnology Institute and collaborators from Peking University and University of Oxford have discovered a new catalyst-support combination that could make fuel cells more efficient and more resistant to carbon monoxide, CO, poisoning. The research – described in The Journal of Physical Chemistry C ("Rapid Microwave Synthesis of CO Tolerant Reduced Graphene Oxide-Supported Platinum Electrocatalysts for Oxidation of Methanol") – may expedite the realisation of fuel-cell vehicles. Fuel cells that convert chemical energy directly into electrical energy by electrochemically decomposing a fuel, such as hydrogen or methanol are considered a promising new way of powering cars and portable devices. One major hurdle to the commercial use of fuel cells is the CO poisoning of the active platinum catalyst sites, which renders them ineffective and prevents fuel oxidation. The CO poisoning problem is especially severe in direct methanol fuel cells (DMFCs) because CO is always present in critical amounts as an intermediate in methanol oxidation reaction. So far the major approach for reducing the poisoning is to alloy platinum with other expensive metals such as Ru, Pd or Au. However, now Ulster scientists have found a cheaper solution that could help bring fuel cell devices a step closer to the market. To create a catalyst system that can tolerate more carbon monoxide, they deposited platinum nanocrystals on a support material of graphene oxide and reduced it slightly to increase its electrical conductivity. They used a simple scalable, fast and eco-friendly microwave approach that has the advantage of reducing graphene oxide (RGO) and forming platinum nanoparticles simultaneously. To test the activity of the Pt/RGO the team looked at the oxidation of methanol - a reaction that takes place at the anode of a methanol fuel cell. Their research shows that the new material displays an unprecedented CO poisoning tolerance, a much better long term stability and a higher electrocatalytic activity than those exhibited by commercially available carbon-supported Pt (Pt/C) electrocatalysts. Platinum nanocatalysts supported on lightly reduced graphene oxide could make fuel cells more stable and resistant to carbon monoxide poisoning. Structural and electronic properties of the electrocatalysts were determined using high resolution X ray photoelectron spectroscopy at the National Centre for Electron Spectroscopy and Surface Analysis (NCESS) at Daresbury, combined with transmission electron microscopy analysis at the EPSRC funded TEM facility at Oxford university. "Our studies of the structure and activity of this catalyst -- and comparisons with commercial Pt/C catalysts currently in use -- illustrate that the lightly reduced graphene oxide support 'protects' the fine platinum nanocrystals from CO poisoning, enabling them to exhibit long term operation stability" explained Ulster Professor of Advanced Materials, Pagona Papakonstantinou, who leads the research team at Ulster University. The abundance of residual oxygen groups on lightly reduced graphene oxide (RGO) plays a major role on the removal of carbonaceous species. When an electrochemical potential is applied to the electrode, water molecules on the RGO support dissociate to form -(OH) groups, which readily oxidize the CO adsorbed groups on the adjacent Pt sites. 'This is one probable mechanism, we believe is operating to provide CO free Pt sites' It is important to emphasize that the team has come up with a new electrocatalyst design that can be considered as a promising alternative for improving durability of fuel cells and eliminate the use of costly bimetallic or ternary metal systems.
It was, perhaps, the sign of a candidate under strain. Or it might have just been Doug being Doug. At a mayoral debate at the Runnymede Community Church Friday, Doug Ford broke the political fourth wall and went after the audience for giving him a hard time. “I’ve stood up for you folks,” he said after receiving some jeers. “I’m surprised at your reaction to me tonight, to be very frank with you.” At that point, he was drowned out by audience laughs. “I know you guys think it’s funny, but I stood up and fought for you folks for four years when they were doing the high rises.” Ford then referenced a $1000 donation he says he made to nearby High Park. “I’m not this guy that wants to cut zoos and go wild,” he said. “I’ve got to find efficiencies. “I’ve spent a lot of time in High Park, doing some walks, doing some runs. So I’m — to be honest friends — I’m a little taken aback.” Later, when he was again interrupted by the crowd, Mr. Ford doubled down. “I could have sworn at the beginning the moderator said we’re going to be nice and polite. It doesn’t show with some of these folks,” he said. “A lot of them are — they’re pretty ignorant, some of them. The debate, which touched on everything from snow removal to policing and the Ontario Municipal Board, was by turns convivial and combative. Only the three leading candidates, Mr. Ford along with John Tory and Olivia Chow, were allowed on the main panel. An audience of more than 250 filled the church basement in the west end of the old City of Toronto past capacity almost half an hour before the scheduled 7:30 start. Organizers were forced to turn some supporters and residents away at the door. The sharpest exchanges of the night came when the candidates were asked about temperament and their respective abilities to work with council. Mr. Ford, under fire on the topic for weeks, said Mr. Tory had City Council confused with “a church picnic.” “I do not believe City Hall is a church picnic,” Mr. Tory responded. “But nor do I believe that’s it’s armed combat or a circus. National Post • Email: [email protected] | Twitter: richardwarnica
def upgrade(): description = ( "Send an Assessment updated notification to " "Assessors, Creators and Verifiers." ) now = datetime.utcnow().strftime("%Y-%m-%d %H-%M-%S") sql = """ INSERT INTO notification_types ( name, description, template, advance_notice, instant, created_at, updated_at ) VALUES ( "assessment_updated", "{description}", "assessment_updated", 0, FALSE, '{now}', '{now}' ) """.format(description=description, now=now) op.execute(sql)
<reponame>daheige/concurrency-in-go package main import ( "log" "sync" "time" ) func main() { // 探讨slice,map并发写 var s []int var m = make(map[int]bool, 10) // 推荐使用chan 把独立携程中执行的结果放入chan中,而不是通过共享变量的方式进行赋值 res := make(chan int, 10) var wg sync.WaitGroup var lock sync.RWMutex // 通过读写锁保证map并发写没有data race wg.Add(20) for i := 0; i < 10; i++ { //i := i go func(i int) { defer wg.Done() log.Println("i = ", i) s = append(s, i) // 不推荐使用这种方式,这种方式是在多个goroutine之间对s进行操作,共享了s // 对于m赋值 lock.Lock() log.Println("hello") m[i] = true lock.Unlock() // 推荐使用通道的方式,把数据放入res通道中就可以 res <- i }(i) go func(i int) { defer wg.Done() log.Println("current i = ", i) lock.Lock() log.Println("hai") m[i] = true // 对于map存在并发写,产生了数据竞争 // 当这里不加锁的话,使用go run -race t.go,发现这里存在go race lock.Unlock() }(i) } wg.Wait() // 关闭通道写入操作,也就是关闭发送者 close(res) log.Println("s =", s) for v := range res { log.Println("current v = ", v) } log.Println("m = ", m) log.Println("exec end") log.Println(111) log.Println(123) time.Sleep(time.Second) } /** 当没有对m进行加锁保护map并发读写,容易出现data race 2020/04/29 23:30:26 i = 1 ================== WARNING: DATA RACE Read at 0x00c00012a020 by goroutine 9: main.main.func1() /Users/heige/web/go/data-race.go:26 +0x10b 解决方式 方式1:使用互斥锁sync.Mutex 方式2:使用chan管道 使用管道的效率要比互斥锁高,也符合Go语言的设计思想 执行结果 2020/04/29 23:40:05 current i = 4 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 i = 0 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 current i = 9 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 current i = 2 2020/04/29 23:40:05 i = 1 2020/04/29 23:40:05 current i = 1 2020/04/29 23:40:05 current i = 0 2020/04/29 23:40:05 i = 2 2020/04/29 23:40:05 i = 8 2020/04/29 23:40:05 i = 4 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 current i = 7 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 current i = 5 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 i = 3 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 current i = 3 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 i = 5 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 i = 6 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 current i = 6 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 current i = 8 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 i = 9 2020/04/29 23:40:05 i = 7 2020/04/29 23:40:05 hai 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 hello 2020/04/29 23:40:05 s = [0 1 2 8 4 3 5 6 9 7] 2020/04/29 23:40:05 current v = 0 2020/04/29 23:40:05 current v = 4 2020/04/29 23:40:05 current v = 1 2020/04/29 23:40:05 current v = 3 2020/04/29 23:40:05 current v = 2 2020/04/29 23:40:05 current v = 8 2020/04/29 23:40:05 current v = 5 2020/04/29 23:40:05 current v = 6 2020/04/29 23:40:05 current v = 9 2020/04/29 23:40:05 current v = 7 2020/04/29 23:40:05 m = map[0:true 1:true 2:true 3:true 4:true 5:true 6:true 7:true 8:true 9:true] 2020/04/29 23:40:05 exec end */
def files2c(readdir, ext): names = listdir(readdir) files = [] for name in names: if ext == name.split('.')[-1]: files.append(name) c_strs = [] lens = [] for file in files: str, len = file2c(readdir + '/' + file, 'mp3_data') c_strs.append(str) lens.append(len) return c_strs, files, lens
#!/usr/bin/env/python3 # Copyright (c) Facebook, Inc. and its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate component.yaml from templates""" import json import os import shutil import sys import yaml CURRENT_FOLDER = os.path.dirname(os.path.abspath(__file__)) PIPELINES_HOME = os.path.join(CURRENT_FOLDER.split("pipelines")[0], "pipelines") TEMPLATE_PATH = os.path.join( PIPELINES_HOME, "components/PyTorch/pytorch-kfp-components/templates" ) OUTPUT_YAML_FOLDER = "yaml" def create_output_folder(): """Removes the `yaml` folder and recreates it""" if os.path.exists(OUTPUT_YAML_FOLDER): shutil.rmtree(OUTPUT_YAML_FOLDER) os.mkdir(OUTPUT_YAML_FOLDER) def get_templates_list(): """Get the list of template files from `templates` directory""" assert os.path.exists(TEMPLATE_PATH) templates_list = os.listdir(TEMPLATE_PATH) return templates_list def read_template(template_path: str): """Read the `componanent.yaml` template""" with open(template_path, "r") as stream: try: template_dict = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) return template_dict def replace_keys_in_template(template_dict: dict, mapping: dict): """Replace the keys, values in `component.yaml` based on `mapping` dict""" # Sample mapping will be as below # { "implementation.container.image" : "image_name" } for nested_key, value in mapping.items(): # parse through each nested key keys = nested_key.split(".") accessable = template_dict for k in keys[:-1]: accessable = accessable[k] accessable[keys[-1]] = value return template_dict def write_to_yaml_file(template_dict: dict, yaml_path: str): """Write yaml output into file""" with open(yaml_path, "w") as pointer: yaml.dump(template_dict, pointer) def generate_component_yaml(mapping_template_path: str): """Method to generate component.yaml based on the template""" mapping: dict = {} if os.path.exists(mapping_template_path): with open(mapping_template_path) as pointer: mapping = json.load(pointer) create_output_folder() template_list = get_templates_list() for template_name in template_list: print("Processing {}".format(template_name)) # if the template name is not present in the mapping dictionary # There is no change in the template, we can simply copy the template # into the output yaml folder. src = os.path.join(TEMPLATE_PATH, template_name) dest = os.path.join(OUTPUT_YAML_FOLDER, template_name) if not mapping or template_name not in mapping: shutil.copy(src, dest) else: # if the mapping is specified, replace the key value pairs # and then save the file template_dict = read_template(template_path=src) template_dict = replace_keys_in_template( template_dict=template_dict, mapping=mapping[template_name] ) write_to_yaml_file(template_dict=template_dict, yaml_path=dest) if __name__ == "__main__": if len(sys.argv) != 2: raise Exception( "\n\nUsage: " "python utils/generate_templates.py " "cifar10/template_mapping.json\n\n" ) input_template_path = sys.argv[1] generate_component_yaml(mapping_template_path=input_template_path)
#pragma once #include <stdlib.h> #include <iostream> class GameBoard { private: int* board; public: static const int BOARD_HEIGHT = 6; static const int BOARD_WIDTH = 7; static const int BOARD_SIZE = BOARD_HEIGHT * BOARD_WIDTH; GameBoard(); ~GameBoard(); void printBoard(); int getPiece(int x, int y); void putPiece(int x, int player); };
import sys input=sys.stdin.readline t=int(input()) for i in range(t): n,p,k=map(int,input().split()) price=[int(i) for i in input().split()] price.append(0) price.sort() ans=0 for i in range(1,k): price[i]+=price[i-1] #print(price) for i in range(1,n-k+1): price[i+k]+=price[i] #print(price) for i in range(n+1): if price[i]<=p: ans=i print(ans)
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.URI; import java.nio.file.Paths; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; import javax.management.MBeanServer; import javax.management.ObjectName; /** * Startup and checkpoint tests * */ public class TestStartup { public static final String NAME_NODE_HOST = "localhost:"; public static final String WILDCARD_HTTP_HOST = "0.0.0.0:"; private static final Log LOG = LogFactory.getLog(TestStartup.class.getName()); private Configuration config; private File hdfsDir=null; static final long seed = 0xAAAAEEFL; static final int blockSize = 4096; static final int fileSize = 8192; private long editsLength=0, fsimageLength=0; private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); } @Before public void setUp() throws Exception { ExitUtil.disableSystemExit(); ExitUtil.resetFirstExitException(); config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath()); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath()); config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "secondary")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, WILDCARD_HTTP_HOST + "0"); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); } /** * clean up */ @After public void tearDown() throws Exception { if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'"); } } /** * Create a number of fsimage checkpoints * @param count number of checkpoints to create * @throws IOException */ public void createCheckPoint(int count) throws IOException { LOG.info("--starting mini cluster"); // manage dirs parameter set to false MiniDFSCluster cluster = null; SecondaryNameNode sn = null; try { cluster = new MiniDFSCluster.Builder(config) .manageDataDfsDirs(false) .manageNameDfsDirs(false).build(); cluster.waitActive(); LOG.info("--starting Secondary Node"); // start secondary node sn = new SecondaryNameNode(config); assertNotNull(sn); // Create count new files and checkpoints for (int i=0; i<count; i++) { // create a file FileSystem fileSys = cluster.getFileSystem(); Path p = new Path("t" + i); this.writeFile(fileSys, p, 1); LOG.info("--file " + p.toString() + " created"); LOG.info("--doing checkpoint"); sn.doCheckpoint(); // this shouldn't fail LOG.info("--done checkpoint"); } } catch (IOException e) { fail(StringUtils.stringifyException(e)); System.err.println("checkpoint failed"); throw e; } finally { if(sn!=null) sn.shutdown(); if(cluster!=null) cluster.shutdown(); LOG.info("--cluster shutdown"); } } /** * Corrupts the MD5 sum of the fsimage. * * @param corruptAll * whether to corrupt one or all of the MD5 sums in the configured * namedirs * @throws IOException */ private void corruptFSImageMD5(boolean corruptAll) throws IOException { List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config); // Corrupt the md5 files in all the namedirs for (URI uri: nameDirs) { // Directory layout looks like: // test/data/dfs/nameN/current/{fsimage,edits,...} File nameDir = new File(uri.getPath()); File dfsDir = nameDir.getParentFile(); assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir // Set the md5 file to all zeros File imageFile = new File(nameDir, Storage.STORAGE_DIR_CURRENT + "/" + NNStorage.getImageFileName(0)); MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16])); // Only need to corrupt one if !corruptAll if (!corruptAll) { break; } } } /* * corrupt files by removing and recreating the directory */ private void corruptNameNodeFiles() throws IOException { // now corrupt/delete the directrory List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config); List<URI> nameEditsDirs = FSNamesystem.getNamespaceEditsDirs(config); // get name dir and its length, then delete and recreate the directory File dir = new File(nameDirs.get(0).getPath()); // has only one this.fsimageLength = new File(new File(dir, Storage.STORAGE_DIR_CURRENT), NameNodeFile.IMAGE.getName()).length(); if(dir.exists() && !(FileUtil.fullyDelete(dir))) throw new IOException("Cannot remove directory: " + dir); LOG.info("--removed dir "+dir + ";len was ="+ this.fsimageLength); if (!dir.mkdirs()) throw new IOException("Cannot create directory " + dir); dir = new File( nameEditsDirs.get(0).getPath()); //has only one this.editsLength = new File(new File(dir, Storage.STORAGE_DIR_CURRENT), NameNodeFile.EDITS.getName()).length(); if(dir.exists() && !(FileUtil.fullyDelete(dir))) throw new IOException("Cannot remove directory: " + dir); if (!dir.mkdirs()) throw new IOException("Cannot create directory " + dir); LOG.info("--removed dir and recreated "+dir + ";len was ="+ this.editsLength); } /** * start with -importCheckpoint option and verify that the files are in separate directories and of the right length * @throws IOException */ private void checkNameNodeFiles() throws IOException{ // start namenode with import option LOG.info("-- about to start DFS cluster"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(IMPORT).build(); cluster.waitActive(); LOG.info("--NN started with checkpoint option"); NameNode nn = cluster.getNameNode(); assertNotNull(nn); // Verify that image file sizes did not change. FSImage image = nn.getFSImage(); verifyDifferentDirs(image, this.fsimageLength, this.editsLength); } finally { if(cluster != null) cluster.shutdown(); } } /** * verify that edits log and fsimage are in different directories and of a correct size */ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) { StorageDirectory sd =null; for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) { sd = it.next(); if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { img.getStorage(); File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); assertEquals(expectedImgSize, imf.length()); } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { img.getStorage(); File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); assertEquals(expectedEditsSize, edf.length()); } else { fail("Image/Edits directories are not different"); } } } /** * secnn-6 * checkpoint for edits and image is the same directory * @throws IOException */ @Test public void testChkpointStartup2() throws IOException{ LOG.info("--starting checkpointStartup2 - same directory for checkpoint"); // different name dirs config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "edits")).toString()); // same checkpoint dirs config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); createCheckPoint(1); corruptNameNodeFiles(); checkNameNodeFiles(); } /** * seccn-8 * checkpoint for edits and image are different directories * @throws IOException */ @Test public void testChkpointStartup1() throws IOException{ //setUpConfig(); LOG.info("--starting testStartup Recovery"); // different name dirs config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "edits")).toString()); // same checkpoint dirs config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt_edits")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); createCheckPoint(1); corruptNameNodeFiles(); checkNameNodeFiles(); } /** * secnn-7 * secondary node copies fsimage and edits into correct separate directories. * @throws IOException */ @Test public void testSNNStartup() throws IOException{ //setUpConfig(); LOG.info("--starting SecondNN startup test"); // different name dirs config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); // same checkpoint dirs config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt_edits")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); LOG.info("--starting NN "); MiniDFSCluster cluster = null; SecondaryNameNode sn = null; NameNode nn = null; try { cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); cluster.waitActive(); nn = cluster.getNameNode(); assertNotNull(nn); // start secondary node LOG.info("--starting SecondNN"); sn = new SecondaryNameNode(config); assertNotNull(sn); LOG.info("--doing checkpoint"); sn.doCheckpoint(); // this shouldn't fail LOG.info("--done checkpoint"); // now verify that image and edits are created in the different directories FSImage image = nn.getFSImage(); StorageDirectory sd = image.getStorage().getStorageDir(0); //only one assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS); image.getStorage(); File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); image.getStorage(); File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length()); LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); FSImage chkpImage = sn.getFSImage(); verifyDifferentDirs(chkpImage, imf.length(), edf.length()); } catch (IOException e) { fail(StringUtils.stringifyException(e)); System.err.println("checkpoint failed"); throw e; } finally { if(sn!=null) sn.shutdown(); if(cluster!=null) cluster.shutdown(); } } @Test(timeout = 30000) public void testSNNStartupWithRuntimeException() throws Exception { String[] argv = new String[] { "-checkpoint" }; try { SecondaryNameNode.main(argv); fail("Failed to handle runtime exceptions during SNN startup!"); } catch (ExitException ee) { GenericTestUtils.assertExceptionContains( ExitUtil.EXIT_EXCEPTION_MESSAGE, ee); assertTrue("Didn't terminate properly ", ExitUtil.terminateCalled()); } } @Test public void testCompression() throws IOException { LOG.info("Test compressing image."); Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name").getPath()); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); DFSTestUtil.formatNameNode(conf); // create an uncompressed image LOG.info("Create an uncompressed fsimage"); NameNode namenode = new NameNode(conf); namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true); NamenodeProtocols nnRpc = namenode.getRpcServer(); assertTrue(nnRpc.getFileInfo("/test").isDir()); nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); nnRpc.saveNamespace(); namenode.stop(); namenode.join(); namenode.joinHttpServer(); // compress image using default codec LOG.info("Read an uncomressed image and store it compressed using default codec."); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); checkNameSpace(conf); // read image compressed using the default and compress it using Gzip codec LOG.info("Read a compressed image and store it using a different codec."); conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, "org.apache.hadoop.io.compress.GzipCodec"); checkNameSpace(conf); // read an image compressed in Gzip and store it uncompressed LOG.info("Read a compressed image and store it as uncompressed."); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); checkNameSpace(conf); // read an uncomrpessed image and store it uncompressed LOG.info("Read an uncompressed image and store it as uncompressed."); checkNameSpace(conf); } private void checkNameSpace(Configuration conf) throws IOException { NameNode namenode = new NameNode(conf); NamenodeProtocols nnRpc = namenode.getRpcServer(); assertTrue(nnRpc.getFileInfo("/test").isDir()); nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); nnRpc.saveNamespace(); namenode.stop(); namenode.join(); namenode.joinHttpServer(); } @Test public void testImageChecksum() throws Exception { LOG.info("Test uncompressed image checksum"); testImageChecksum(false); LOG.info("Test compressed image checksum"); testImageChecksum(true); } private void testImageChecksum(boolean compress) throws Exception { MiniDFSCluster cluster = null; if (compress) { config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true); } try { LOG.info("\n===========================================\n" + "Starting empty cluster"); cluster = new MiniDFSCluster.Builder(config) .numDataNodes(0) .format(true) .build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); fs.mkdirs(new Path("/test")); LOG.info("Shutting down cluster #1"); cluster.shutdown(); cluster = null; // Corrupt the md5 files in all the namedirs corruptFSImageMD5(true); // Attach our own log appender so we can verify output final LogVerificationAppender appender = new LogVerificationAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); // Try to start a new cluster LOG.info("\n===========================================\n" + "Starting same cluster after simulated crash"); try { cluster = new MiniDFSCluster.Builder(config) .numDataNodes(0) .format(false) .build(); fail("Should not have successfully started with corrupt image"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Failed to load FSImage file", ioe); int md5failures = appender.countExceptionsWithMessage( " is corrupt with MD5 checksum of "); // Two namedirs, so should have seen two failures assertEquals(2, md5failures); } } finally { if (cluster != null) { cluster.shutdown(); } } } @Test(timeout=30000) public void testCorruptImageFallback() throws IOException { // Create two checkpoints createCheckPoint(2); // Delete a single md5sum corruptFSImageMD5(false); // Should still be able to start MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); try { cluster.waitActive(); } finally { cluster.shutdown(); } } /** * This test tests hosts include list contains host names. After namenode * restarts, the still alive datanodes should not have any trouble in getting * registrant again. */ @Test public void testNNRestart() throws IOException, InterruptedException { MiniDFSCluster cluster = null; int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds HostsFileWriter hostsFileWriter = new HostsFileWriter(); hostsFileWriter.initialize(config, "work-dir/restartnn"); byte b[] = {127, 0, 0, 1}; InetAddress inetAddress = InetAddress.getByAddress(b); hostsFileWriter.initIncludeHosts(new String[] {inetAddress.getHostName()}); int numDatanodes = 1; try { cluster = new MiniDFSCluster.Builder(config) .numDataNodes(numDatanodes).setupHostsFile(true).build(); cluster.waitActive(); cluster.restartNameNode(); NamenodeProtocols nn = cluster.getNameNodeRpc(); assertNotNull(nn); assertTrue(cluster.isDataNodeUp()); DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE); for (int i = 0 ; i < 5 && info.length != numDatanodes; i++) { Thread.sleep(HEARTBEAT_INTERVAL * 1000); info = nn.getDatanodeReport(DatanodeReportType.LIVE); } assertEquals("Number of live nodes should be "+numDatanodes, numDatanodes, info.length); } catch (IOException e) { fail(StringUtils.stringifyException(e)); throw e; } finally { if (cluster != null) { cluster.shutdown(); } hostsFileWriter.cleanup(); } } @Test(timeout = 120000) public void testXattrConfiguration() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, -1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fail("Expected exception with negative xattr size"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( "The maximum size of an xattr should be > 0", e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, -1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fail("Expected exception with negative # xattrs per inode"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( "Cannot set a negative limit on the number of xattrs per inode", e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } } @Test(timeout = 30000) public void testNNFailToStartOnReadOnlyNNDir() throws Exception { /* set NN dir */ final String nnDirStr = Paths.get( hdfsDir.toString(), GenericTestUtils.getMethodName(), "name").toString(); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nnDirStr); try(MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) .numDataNodes(1) .manageNameDfsDirs(false) .build()) { cluster.waitActive(); /* get and verify NN dir */ final Collection<URI> nnDirs = FSNamesystem.getNamespaceDirs(config); assertNotNull(nnDirs); assertTrue(nnDirs.iterator().hasNext()); assertEquals( "NN dir should be created after NN startup.", nnDirStr, nnDirs.iterator().next().getPath()); final File nnDir = new File(nnDirStr); assertTrue(nnDir.exists()); assertTrue(nnDir.isDirectory()); try { /* set read only */ assertTrue( "Setting NN dir read only should succeed.", nnDir.setReadOnly()); cluster.restartNameNodes(); fail("Restarting NN should fail on read only NN dir."); } catch (InconsistentFSStateException e) { assertThat(e.toString(), is(allOf( containsString("InconsistentFSStateException"), containsString(nnDirStr), containsString("in an inconsistent state"), containsString( "storage directory does not exist or is not accessible.")))); } finally { /* set back to writable in order to clean it */ assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true)); } } } /** * Verify the following scenario. * 1. NN restarts. * 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister. * 3. After reregistration completes, DN will send Heartbeat, followed by * Blockreport. * 4. NN will mark DatanodeStorageInfo#blockContentsStale to false. * @throws Exception */ @Test(timeout = 60000) public void testStorageBlockContentsStaleAfterNNRestart() throws Exception { MiniDFSCluster dfsCluster = null; try { Configuration config = new Configuration(); dfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); dfsCluster.waitActive(); dfsCluster.restartNameNode(true); BlockManagerTestUtil.checkHeartbeat( dfsCluster.getNamesystem().getBlockManager()); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanNameFsns = new ObjectName( "Hadoop:service=NameNode,name=FSNamesystemState"); Integer numStaleStorages = (Integer) (mbs.getAttribute( mxbeanNameFsns, "NumStaleStorages")); assertEquals(0, numStaleStorages.intValue()); } finally { if (dfsCluster != null) { dfsCluster.shutdown(); } } return; } }
#include <all_far.h> #pragma hdrstop #include "Int.h" int FTP::GetHostFiles(struct PluginPanelItem *PanelItem,int ItemsNumber,int Move,String& DestPath,int OpMode) { PROC(("FTP::GetHostFiles","%d [%s] %s %08X",ItemsNumber,DestPath.c_str(),Move?"MOVE":"COPY",OpMode)) InitDialogItem InitItems[]= { {DI_DOUBLEBOX, 3, 1,72,6, 0,0,0,0, NULL}, {DI_TEXT,5, 2,0,0,0,0,0,0, NULL}, {DI_EDIT,5, 3,70, 3,0,0,0,0,NULL}, {DI_TEXT,3, 4,3, 4,0,0,DIF_BOXCOLOR|DIF_SEPARATOR,0,NULL }, {DI_BUTTON,0, 5,0,0,0,0,DIF_CENTERGROUP, 1, FMSG(MCopy)}, {DI_BUTTON,0, 5,0,0,0,0,DIF_CENTERGROUP, 0, FMSG(MCancel)}, }; FarDialogItem DialogItems[ARRAYSIZE(InitItems)]; FP_SizeItemList il; if(!IS_SILENT(OpMode)) { if(Move) { InitItems[0].Data = FMSG(MMoveHostTitle); InitItems[1].Data = FMSG(MMoveHostTo); } else { InitItems[0].Data = FMSG(MCopyHostTitle); InitItems[1].Data = FMSG(MCopyHostTo); } InitDialogItems(InitItems,DialogItems,ARRAYSIZE(DialogItems)); StrCpy(DialogItems[2].Data, DestPath.c_str(), sizeof(DialogItems[2].Data)); int AskCode = FDialog(76,8,"FTPCmd",DialogItems,ARRAYSIZE(DialogItems)); if(AskCode != 4) return -1; DestPath = DialogItems[2].Data; } if(!DestPath.Length()) return -1; if(!ExpandList(PanelItem,ItemsNumber,&il,TRUE)) return 0; int OverwriteAll = FALSE, SkipAll = FALSE, Rename = FALSE; int n; FTPHost* p; FTPHost h; char CheckKey[ sizeof(HostsPath)+1 ]; char DestName[ sizeof(HostsPath)+1 ]; if(DestPath.Cmp("..")) { if(*HostsPath==0) return 0; else { StrCpy(CheckKey,HostsPath,sizeof(CheckKey)); char *m = strrchr(CheckKey,'\\'); if(m) m[1] = 0; else CheckKey[0] = 0; Rename = TRUE; } } else { StrCpy(CheckKey, DestPath.c_str(), ARRAYSIZE(CheckKey)); if(strpbrk(DestPath.c_str(),":\\")==NULL && !FP_CheckRegKey(CheckKey)) { Rename=TRUE; } else if(FP_GetRegKey(CheckKey,"Folder",0)) { AddEndSlash(CheckKey,'\\',ARRAYSIZE(CheckKey)); Rename=TRUE; } } AddEndSlash(DestPath, '\\'); //Rename if(Rename) { for(n=0; n < il.Count(); n++) { p = FTPHost::Convert(&il.List[n]); if(!p) continue; //Check for folders if(p->Folder) { SayMsg(FMSG(MCanNotMoveFolder)); return TRUE; } h.Assign(p); h.RegKey[0] = 0; if(!h.Write(CheckKey)) return FALSE; if(Move && !p->Folder) { FP_DeleteRegKey(p->RegKey); if(n < ItemsNumber) PanelItem[n].Flags &= ~PPIF_SELECTED; } } }//Rename else //INI for(n = 0; n < il.Count(); n++) { p = FTPHost::Convert(&il.List[n]); if(!p) continue; if(p->Folder) { continue; } p->MkINIFile(DestName, HostsPath, DestPath.c_str()); DWORD DestAttr=GetFileAttributes(DestName); if(!IS_SILENT(OpMode) && !OverwriteAll && DestAttr != 0xFFFFFFFF) { if(SkipAll) continue; LPCSTR MsgItems[] = { FMSG(Move ? MMoveHostTitle:MCopyHostTitle), FMSG(IS_FLAG(DestAttr,FILE_ATTRIBUTE_READONLY) ? MAlreadyExistRO : MAlreadyExist), DestName, /*0*/FMSG(MOverwrite), /*1*/FMSG(MOverwriteAll), /*2*/FMSG(MCopySkip), /*3*/FMSG(MCopySkipAll), /*4*/FMSG(MCopyCancel) }; int MsgCode = FMessage(FMSG_WARNING,NULL,MsgItems,ARRAYSIZE(MsgItems),5); switch(MsgCode) { case 1: OverwriteAll=TRUE; break; case 3: SkipAll=TRUE; case 2: continue; case -1: case 4: return(-1); } } int WriteFailed = FALSE; if(DestAttr!=0xFFFFFFFF) { if(!DeleteFile(DestName)) if(!SetFileAttributes(DestName,FILE_ATTRIBUTE_NORMAL) && !DeleteFile(DestName)) WriteFailed=TRUE; } if(!WriteFailed) { if(!p->WriteINI(DestName)) { WriteFailed=TRUE; DeleteFile(DestName); } else if(Move) FP_DeleteRegKey(p->RegKey); } if(WriteFailed) { LPCSTR MsgItems[] = { FMSG(MError), FMSG(MCannotCopyHost), DestName, FMSG(MOk) }; FMessage(FMSG_WARNING|FMSG_DOWN|FMSG_ERRORTYPE,NULL,MsgItems,ARRAYSIZE(MsgItems),1); return(0); } }//INI if(Move) for(n = il.Count()-1; n >= 0; n--) { if(CheckForEsc(FALSE)) return -1; p = FTPHost::Convert(&il.List[n]); if(p && p->Folder) { FP_DeleteRegKey(p->RegKey); if(n < ItemsNumber) PanelItem[n].Flags &= ~PPIF_SELECTED; } } return 1; }
/** * @author Morten Olav Hansen <[email protected]> */ @Controller @RequestMapping( value = DocumentController.RESOURCE_PATH ) public class DocumentController { public static final String RESOURCE_PATH = "/documents"; @Autowired private DocumentService documentService; @Autowired private LocationManager locationManager; //------------------------------------------------------------------------------------------------------- // GET //------------------------------------------------------------------------------------------------------- @RequestMapping( method = RequestMethod.GET ) public String getDocuments( IdentifiableObjectParams params, Model model, HttpServletRequest request ) { Documents documents = new Documents(); if ( params.isPaging() ) { int total = documentService.getDocumentCount(); Pager pager = new Pager( params.getPage(), total ); documents.setPager( pager ); List<Document> documentList = new ArrayList<Document>( documentService.getDocumentsBetween( pager.getOffset(), pager.getPageSize() ) ); documents.setDocuments( documentList ); } else { documents.setDocuments( new ArrayList<Document>( documentService.getAllDocuments() ) ); } if ( params.hasLinks() ) { WebLinkPopulator listener = new WebLinkPopulator( request ); listener.addLinks( documents ); } model.addAttribute( "model", documents ); return "documents"; } @RequestMapping( value = "/{uid}", method = RequestMethod.GET ) public String getDocument( @PathVariable( "uid" ) String uid, IdentifiableObjectParams params, Model model, HttpServletRequest request ) { Document document = documentService.getDocument( uid ); if ( params.hasLinks() ) { WebLinkPopulator listener = new WebLinkPopulator( request ); listener.addLinks( document ); } model.addAttribute( "model", document ); return "document"; } @RequestMapping( value = "/{uid}/data", method = RequestMethod.GET ) public void getDocumentContent( @PathVariable( "uid" ) String uid, HttpServletResponse response ) throws Exception { Document document = documentService.getDocument( uid ); if ( document.isExternal() ) { response.sendRedirect( response.encodeRedirectURL( document.getUrl() ) ); } else { ContextUtils.configureResponse( response, document.getContentType(), true, document.getUrl(), true ); InputStream in = locationManager.getInputStream( document.getUrl(), DocumentService.DIR ); IOUtils.copy( in, response.getOutputStream() ); } } //------------------------------------------------------------------------------------------------------- // POST //------------------------------------------------------------------------------------------------------- @RequestMapping( method = RequestMethod.POST, headers = {"Content-Type=application/xml, text/xml"} ) @ResponseStatus( value = HttpStatus.CREATED ) public void postDocumentXML( HttpServletResponse response, InputStream input ) throws Exception { throw new HttpRequestMethodNotSupportedException( RequestMethod.POST.toString() ); } @RequestMapping( method = RequestMethod.POST, headers = {"Content-Type=application/json"} ) @ResponseStatus( value = HttpStatus.CREATED ) public void postDocumentJSON( HttpServletResponse response, InputStream input ) throws Exception { throw new HttpRequestMethodNotSupportedException( RequestMethod.POST.toString() ); } //------------------------------------------------------------------------------------------------------- // PUT //------------------------------------------------------------------------------------------------------- @RequestMapping( value = "/{uid}", method = RequestMethod.PUT, headers = {"Content-Type=application/xml, text/xml"} ) @ResponseStatus( value = HttpStatus.NO_CONTENT ) public void putDocumentXML( @PathVariable( "uid" ) String uid, InputStream input ) throws Exception { throw new HttpRequestMethodNotSupportedException( RequestMethod.PUT.toString() ); } @RequestMapping( value = "/{uid}", method = RequestMethod.PUT, headers = {"Content-Type=application/json"} ) @ResponseStatus( value = HttpStatus.NO_CONTENT ) public void putDocumentJSON( @PathVariable( "uid" ) String uid, InputStream input ) throws Exception { throw new HttpRequestMethodNotSupportedException( RequestMethod.PUT.toString() ); } //------------------------------------------------------------------------------------------------------- // DELETE //------------------------------------------------------------------------------------------------------- @RequestMapping( value = "/{uid}", method = RequestMethod.DELETE ) @ResponseStatus( value = HttpStatus.NO_CONTENT ) public void deleteDocument( @PathVariable( "uid" ) String uid ) throws Exception { throw new HttpRequestMethodNotSupportedException( RequestMethod.DELETE.toString() ); } }
<filename>src/provider/fixtures.ts import {join} from 'path' import {Fixtures} from "../fixtures"; export class StorageFixtures{ static async load(){ const dataDir = join(__dirname, '../../','data'); //const storageTypeDir = join(dataDir, 'storage-type'); const providerDir = join(dataDir, 'provider'); const customStorageDir = join(providerDir, 'custom'); const files = Fixtures.getFilesMatchingExtensionsInDirectories([providerDir, customStorageDir], '.json'); await Fixtures.loadFiles(files); } }
package dao type LinkedList struct { head *LinkedListValue tail *LinkedListValue len int } type LinkedListValue struct { Value interface{} next *LinkedListValue } func NewLinkedList() *LinkedList { return new(LinkedList) } func (l *LinkedList) Add(value interface{}) { v := &LinkedListValue{ Value: value, } if l.head == nil { l.head = v l.tail = v } else { l.tail.next = v l.tail = v } l.len++ } func (l *LinkedList) AddHead(value interface{}) { v := &LinkedListValue{ Value: value, } if l.head == nil { l.Add(value) } else { v.next = l.head l.head = v } l.len++ } func (l *LinkedList) PopValue() *LinkedListValue { pop := l.head if pop == nil { return nil } l.head = l.head.next l.len-- return pop } func (l *LinkedList) Each(f func(val *LinkedListValue)) { curr := l.head for { if curr == nil { return } f(curr) curr = curr.next } } func (l *LinkedList) Len() int { return l.len }